aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm16/utils
diff options
context:
space:
mode:
authorvitalyisaev <vitalyisaev@yandex-team.com>2023-06-29 10:00:50 +0300
committervitalyisaev <vitalyisaev@yandex-team.com>2023-06-29 10:00:50 +0300
commit6ffe9e53658409f212834330e13564e4952558f6 (patch)
tree85b1e00183517648b228aafa7c8fb07f5276f419 /contrib/libs/llvm16/utils
parent726057070f9c5a91fc10fde0d5024913d10f1ab9 (diff)
downloadydb-6ffe9e53658409f212834330e13564e4952558f6.tar.gz
YQ Connector: support managed ClickHouse
Со стороны dqrun можно обратиться к инстансу коннектора, который работает на streaming стенде, и извлечь данные из облачного CH.
Diffstat (limited to 'contrib/libs/llvm16/utils')
-rw-r--r--contrib/libs/llvm16/utils/TableGen/AsmMatcherEmitter.cpp4009
-rw-r--r--contrib/libs/llvm16/utils/TableGen/AsmWriterEmitter.cpp1314
-rw-r--r--contrib/libs/llvm16/utils/TableGen/AsmWriterInst.cpp206
-rw-r--r--contrib/libs/llvm16/utils/TableGen/AsmWriterInst.h108
-rw-r--r--contrib/libs/llvm16/utils/TableGen/Attributes.cpp138
-rw-r--r--contrib/libs/llvm16/utils/TableGen/CTagsEmitter.cpp93
-rw-r--r--contrib/libs/llvm16/utils/TableGen/CallingConvEmitter.cpp436
-rw-r--r--contrib/libs/llvm16/utils/TableGen/CodeEmitterGen.cpp574
-rw-r--r--contrib/libs/llvm16/utils/TableGen/CodeGenDAGPatterns.cpp4797
-rw-r--r--contrib/libs/llvm16/utils/TableGen/CodeGenDAGPatterns.h1275
-rw-r--r--contrib/libs/llvm16/utils/TableGen/CodeGenHwModes.cpp113
-rw-r--r--contrib/libs/llvm16/utils/TableGen/CodeGenHwModes.h64
-rw-r--r--contrib/libs/llvm16/utils/TableGen/CodeGenInstruction.cpp857
-rw-r--r--contrib/libs/llvm16/utils/TableGen/CodeGenInstruction.h410
-rw-r--r--contrib/libs/llvm16/utils/TableGen/CodeGenIntrinsics.h189
-rw-r--r--contrib/libs/llvm16/utils/TableGen/CodeGenMapTable.cpp605
-rw-r--r--contrib/libs/llvm16/utils/TableGen/CodeGenRegisters.cpp2503
-rw-r--r--contrib/libs/llvm16/utils/TableGen/CodeGenRegisters.h847
-rw-r--r--contrib/libs/llvm16/utils/TableGen/CodeGenSchedule.cpp2273
-rw-r--r--contrib/libs/llvm16/utils/TableGen/CodeGenSchedule.h646
-rw-r--r--contrib/libs/llvm16/utils/TableGen/CodeGenTarget.cpp952
-rw-r--r--contrib/libs/llvm16/utils/TableGen/CodeGenTarget.h225
-rw-r--r--contrib/libs/llvm16/utils/TableGen/CompressInstEmitter.cpp911
-rw-r--r--contrib/libs/llvm16/utils/TableGen/DAGISelEmitter.cpp195
-rw-r--r--contrib/libs/llvm16/utils/TableGen/DAGISelMatcher.cpp435
-rw-r--r--contrib/libs/llvm16/utils/TableGen/DAGISelMatcher.h1125
-rw-r--r--contrib/libs/llvm16/utils/TableGen/DAGISelMatcherEmitter.cpp1171
-rw-r--r--contrib/libs/llvm16/utils/TableGen/DAGISelMatcherGen.cpp1111
-rw-r--r--contrib/libs/llvm16/utils/TableGen/DAGISelMatcherOpt.cpp471
-rw-r--r--contrib/libs/llvm16/utils/TableGen/DFAEmitter.cpp379
-rw-r--r--contrib/libs/llvm16/utils/TableGen/DFAEmitter.h107
-rw-r--r--contrib/libs/llvm16/utils/TableGen/DFAPacketizerEmitter.cpp362
-rw-r--r--contrib/libs/llvm16/utils/TableGen/DXILEmitter.cpp442
-rw-r--r--contrib/libs/llvm16/utils/TableGen/DecoderEmitter.cpp2773
-rw-r--r--contrib/libs/llvm16/utils/TableGen/DirectiveEmitter.cpp885
-rw-r--r--contrib/libs/llvm16/utils/TableGen/DisassemblerEmitter.cpp138
-rw-r--r--contrib/libs/llvm16/utils/TableGen/ExegesisEmitter.cpp211
-rw-r--r--contrib/libs/llvm16/utils/TableGen/FastISelEmitter.cpp875
-rw-r--r--contrib/libs/llvm16/utils/TableGen/GICombinerEmitter.cpp1081
-rw-r--r--contrib/libs/llvm16/utils/TableGen/GlobalISel/CodeExpander.cpp83
-rw-r--r--contrib/libs/llvm16/utils/TableGen/GlobalISel/CodeExpander.h55
-rw-r--r--contrib/libs/llvm16/utils/TableGen/GlobalISel/CodeExpansions.h43
-rw-r--r--contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDag.cpp138
-rw-r--r--contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDag.h240
-rw-r--r--contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagEdge.cpp39
-rw-r--r--contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagEdge.h70
-rw-r--r--contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagInstr.cpp48
-rw-r--r--contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagInstr.h118
-rw-r--r--contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagOperands.cpp153
-rw-r--r--contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagOperands.h133
-rw-r--r--contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagPredicate.cpp69
-rw-r--r--contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagPredicate.h145
-rw-r--r--contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagPredicateDependencyEdge.cpp38
-rw-r--r--contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagPredicateDependencyEdge.h61
-rw-r--r--contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchTree.cpp786
-rw-r--r--contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchTree.h626
-rw-r--r--contrib/libs/llvm16/utils/TableGen/GlobalISel/ya.make34
-rw-r--r--contrib/libs/llvm16/utils/TableGen/GlobalISelEmitter.cpp6314
-rw-r--r--contrib/libs/llvm16/utils/TableGen/InfoByHwMode.cpp214
-rw-r--r--contrib/libs/llvm16/utils/TableGen/InfoByHwMode.h196
-rw-r--r--contrib/libs/llvm16/utils/TableGen/InstrDocsEmitter.cpp219
-rw-r--r--contrib/libs/llvm16/utils/TableGen/InstrInfoEmitter.cpp1257
-rw-r--r--contrib/libs/llvm16/utils/TableGen/IntrinsicEmitter.cpp961
-rw-r--r--contrib/libs/llvm16/utils/TableGen/OptEmitter.cpp84
-rw-r--r--contrib/libs/llvm16/utils/TableGen/OptEmitter.h16
-rw-r--r--contrib/libs/llvm16/utils/TableGen/OptParserEmitter.cpp502
-rw-r--r--contrib/libs/llvm16/utils/TableGen/OptRSTEmitter.cpp105
-rw-r--r--contrib/libs/llvm16/utils/TableGen/PredicateExpander.cpp547
-rw-r--r--contrib/libs/llvm16/utils/TableGen/PredicateExpander.h126
-rw-r--r--contrib/libs/llvm16/utils/TableGen/PseudoLoweringEmitter.cpp322
-rw-r--r--contrib/libs/llvm16/utils/TableGen/RISCVTargetDefEmitter.cpp82
-rw-r--r--contrib/libs/llvm16/utils/TableGen/RegisterBankEmitter.cpp336
-rw-r--r--contrib/libs/llvm16/utils/TableGen/RegisterInfoEmitter.cpp1915
-rw-r--r--contrib/libs/llvm16/utils/TableGen/SDNodeProperties.cpp40
-rw-r--r--contrib/libs/llvm16/utils/TableGen/SDNodeProperties.h39
-rw-r--r--contrib/libs/llvm16/utils/TableGen/SearchableTableEmitter.cpp831
-rw-r--r--contrib/libs/llvm16/utils/TableGen/SequenceToOffsetTable.h175
-rw-r--r--contrib/libs/llvm16/utils/TableGen/SubtargetEmitter.cpp1993
-rw-r--r--contrib/libs/llvm16/utils/TableGen/SubtargetFeatureInfo.cpp166
-rw-r--r--contrib/libs/llvm16/utils/TableGen/SubtargetFeatureInfo.h101
-rw-r--r--contrib/libs/llvm16/utils/TableGen/TableGen.cpp312
-rw-r--r--contrib/libs/llvm16/utils/TableGen/TableGenBackends.h101
-rw-r--r--contrib/libs/llvm16/utils/TableGen/Types.cpp44
-rw-r--r--contrib/libs/llvm16/utils/TableGen/Types.h24
-rw-r--r--contrib/libs/llvm16/utils/TableGen/VarLenCodeEmitterGen.cpp513
-rw-r--r--contrib/libs/llvm16/utils/TableGen/VarLenCodeEmitterGen.h59
-rw-r--r--contrib/libs/llvm16/utils/TableGen/WebAssemblyDisassemblerEmitter.cpp175
-rw-r--r--contrib/libs/llvm16/utils/TableGen/WebAssemblyDisassemblerEmitter.h30
-rw-r--r--contrib/libs/llvm16/utils/TableGen/X86DisassemblerShared.h57
-rw-r--r--contrib/libs/llvm16/utils/TableGen/X86DisassemblerTables.cpp1089
-rw-r--r--contrib/libs/llvm16/utils/TableGen/X86DisassemblerTables.h292
-rw-r--r--contrib/libs/llvm16/utils/TableGen/X86EVEX2VEXTablesEmitter.cpp246
-rw-r--r--contrib/libs/llvm16/utils/TableGen/X86FoldTablesEmitter.cpp618
-rw-r--r--contrib/libs/llvm16/utils/TableGen/X86MnemonicTables.cpp94
-rw-r--r--contrib/libs/llvm16/utils/TableGen/X86ModRMFilters.cpp23
-rw-r--r--contrib/libs/llvm16/utils/TableGen/X86ModRMFilters.h143
-rw-r--r--contrib/libs/llvm16/utils/TableGen/X86RecognizableInstr.cpp1307
-rw-r--r--contrib/libs/llvm16/utils/TableGen/X86RecognizableInstr.h367
-rw-r--r--contrib/libs/llvm16/utils/TableGen/ya.make84
99 files changed, 60304 insertions, 0 deletions
diff --git a/contrib/libs/llvm16/utils/TableGen/AsmMatcherEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/AsmMatcherEmitter.cpp
new file mode 100644
index 0000000000..c13e5b5def
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/AsmMatcherEmitter.cpp
@@ -0,0 +1,4009 @@
+//===- AsmMatcherEmitter.cpp - Generate an assembly matcher ---------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend emits a target specifier matcher for converting parsed
+// assembly operands in the MCInst structures. It also emits a matcher for
+// custom operand parsing.
+//
+// Converting assembly operands into MCInst structures
+// ---------------------------------------------------
+//
+// The input to the target specific matcher is a list of literal tokens and
+// operands. The target specific parser should generally eliminate any syntax
+// which is not relevant for matching; for example, comma tokens should have
+// already been consumed and eliminated by the parser. Most instructions will
+// end up with a single literal token (the instruction name) and some number of
+// operands.
+//
+// Some example inputs, for X86:
+// 'addl' (immediate ...) (register ...)
+// 'add' (immediate ...) (memory ...)
+// 'call' '*' %epc
+//
+// The assembly matcher is responsible for converting this input into a precise
+// machine instruction (i.e., an instruction with a well defined encoding). This
+// mapping has several properties which complicate matching:
+//
+// - It may be ambiguous; many architectures can legally encode particular
+// variants of an instruction in different ways (for example, using a smaller
+// encoding for small immediates). Such ambiguities should never be
+// arbitrarily resolved by the assembler, the assembler is always responsible
+// for choosing the "best" available instruction.
+//
+// - It may depend on the subtarget or the assembler context. Instructions
+// which are invalid for the current mode, but otherwise unambiguous (e.g.,
+// an SSE instruction in a file being assembled for i486) should be accepted
+// and rejected by the assembler front end. However, if the proper encoding
+// for an instruction is dependent on the assembler context then the matcher
+// is responsible for selecting the correct machine instruction for the
+// current mode.
+//
+// The core matching algorithm attempts to exploit the regularity in most
+// instruction sets to quickly determine the set of possibly matching
+// instructions, and the simplify the generated code. Additionally, this helps
+// to ensure that the ambiguities are intentionally resolved by the user.
+//
+// The matching is divided into two distinct phases:
+//
+// 1. Classification: Each operand is mapped to the unique set which (a)
+// contains it, and (b) is the largest such subset for which a single
+// instruction could match all members.
+//
+// For register classes, we can generate these subgroups automatically. For
+// arbitrary operands, we expect the user to define the classes and their
+// relations to one another (for example, 8-bit signed immediates as a
+// subset of 32-bit immediates).
+//
+// By partitioning the operands in this way, we guarantee that for any
+// tuple of classes, any single instruction must match either all or none
+// of the sets of operands which could classify to that tuple.
+//
+// In addition, the subset relation amongst classes induces a partial order
+// on such tuples, which we use to resolve ambiguities.
+//
+// 2. The input can now be treated as a tuple of classes (static tokens are
+// simple singleton sets). Each such tuple should generally map to a single
+// instruction (we currently ignore cases where this isn't true, whee!!!),
+// which we can emit a simple matcher for.
+//
+// Custom Operand Parsing
+// ----------------------
+//
+// Some targets need a custom way to parse operands, some specific instructions
+// can contain arguments that can represent processor flags and other kinds of
+// identifiers that need to be mapped to specific values in the final encoded
+// instructions. The target specific custom operand parsing works in the
+// following way:
+//
+// 1. A operand match table is built, each entry contains a mnemonic, an
+// operand class, a mask for all operand positions for that same
+// class/mnemonic and target features to be checked while trying to match.
+//
+// 2. The operand matcher will try every possible entry with the same
+// mnemonic and will check if the target feature for this mnemonic also
+// matches. After that, if the operand to be matched has its index
+// present in the mask, a successful match occurs. Otherwise, fallback
+// to the regular operand parsing.
+//
+// 3. For a match success, each operand class that has a 'ParserMethod'
+// becomes part of a switch from where the custom method is called.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenInstruction.h"
+#include "CodeGenTarget.h"
+#include "SubtargetFeatureInfo.h"
+#include "Types.h"
+#include "llvm/ADT/CachedHashString.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/StringMatcher.h"
+#include "llvm/TableGen/StringToOffsetTable.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include <cassert>
+#include <cctype>
+#include <forward_list>
+#include <map>
+#include <set>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "asm-matcher-emitter"
+
+cl::OptionCategory AsmMatcherEmitterCat("Options for -gen-asm-matcher");
+
+static cl::opt<std::string>
+ MatchPrefix("match-prefix", cl::init(""),
+ cl::desc("Only match instructions with the given prefix"),
+ cl::cat(AsmMatcherEmitterCat));
+
+namespace {
+class AsmMatcherInfo;
+
+// Register sets are used as keys in some second-order sets TableGen creates
+// when generating its data structures. This means that the order of two
+// RegisterSets can be seen in the outputted AsmMatcher tables occasionally, and
+// can even affect compiler output (at least seen in diagnostics produced when
+// all matches fail). So we use a type that sorts them consistently.
+typedef std::set<Record*, LessRecordByID> RegisterSet;
+
+class AsmMatcherEmitter {
+ RecordKeeper &Records;
+public:
+ AsmMatcherEmitter(RecordKeeper &R) : Records(R) {}
+
+ void run(raw_ostream &o);
+};
+
+/// ClassInfo - Helper class for storing the information about a particular
+/// class of operands which can be matched.
+struct ClassInfo {
+ enum ClassInfoKind {
+ /// Invalid kind, for use as a sentinel value.
+ Invalid = 0,
+
+ /// The class for a particular token.
+ Token,
+
+ /// The (first) register class, subsequent register classes are
+ /// RegisterClass0+1, and so on.
+ RegisterClass0,
+
+ /// The (first) user defined class, subsequent user defined classes are
+ /// UserClass0+1, and so on.
+ UserClass0 = 1<<16
+ };
+
+ /// Kind - The class kind, which is either a predefined kind, or (UserClass0 +
+ /// N) for the Nth user defined class.
+ unsigned Kind;
+
+ /// SuperClasses - The super classes of this class. Note that for simplicities
+ /// sake user operands only record their immediate super class, while register
+ /// operands include all superclasses.
+ std::vector<ClassInfo*> SuperClasses;
+
+ /// Name - The full class name, suitable for use in an enum.
+ std::string Name;
+
+ /// ClassName - The unadorned generic name for this class (e.g., Token).
+ std::string ClassName;
+
+ /// ValueName - The name of the value this class represents; for a token this
+ /// is the literal token string, for an operand it is the TableGen class (or
+ /// empty if this is a derived class).
+ std::string ValueName;
+
+ /// PredicateMethod - The name of the operand method to test whether the
+ /// operand matches this class; this is not valid for Token or register kinds.
+ std::string PredicateMethod;
+
+ /// RenderMethod - The name of the operand method to add this operand to an
+ /// MCInst; this is not valid for Token or register kinds.
+ std::string RenderMethod;
+
+ /// ParserMethod - The name of the operand method to do a target specific
+ /// parsing on the operand.
+ std::string ParserMethod;
+
+ /// For register classes: the records for all the registers in this class.
+ RegisterSet Registers;
+
+ /// For custom match classes: the diagnostic kind for when the predicate fails.
+ std::string DiagnosticType;
+
+ /// For custom match classes: the diagnostic string for when the predicate fails.
+ std::string DiagnosticString;
+
+ /// Is this operand optional and not always required.
+ bool IsOptional;
+
+ /// DefaultMethod - The name of the method that returns the default operand
+ /// for optional operand
+ std::string DefaultMethod;
+
+public:
+ /// isRegisterClass() - Check if this is a register class.
+ bool isRegisterClass() const {
+ return Kind >= RegisterClass0 && Kind < UserClass0;
+ }
+
+ /// isUserClass() - Check if this is a user defined class.
+ bool isUserClass() const {
+ return Kind >= UserClass0;
+ }
+
+ /// isRelatedTo - Check whether this class is "related" to \p RHS. Classes
+ /// are related if they are in the same class hierarchy.
+ bool isRelatedTo(const ClassInfo &RHS) const {
+ // Tokens are only related to tokens.
+ if (Kind == Token || RHS.Kind == Token)
+ return Kind == Token && RHS.Kind == Token;
+
+ // Registers classes are only related to registers classes, and only if
+ // their intersection is non-empty.
+ if (isRegisterClass() || RHS.isRegisterClass()) {
+ if (!isRegisterClass() || !RHS.isRegisterClass())
+ return false;
+
+ RegisterSet Tmp;
+ std::insert_iterator<RegisterSet> II(Tmp, Tmp.begin());
+ std::set_intersection(Registers.begin(), Registers.end(),
+ RHS.Registers.begin(), RHS.Registers.end(),
+ II, LessRecordByID());
+
+ return !Tmp.empty();
+ }
+
+ // Otherwise we have two users operands; they are related if they are in the
+ // same class hierarchy.
+ //
+ // FIXME: This is an oversimplification, they should only be related if they
+ // intersect, however we don't have that information.
+ assert(isUserClass() && RHS.isUserClass() && "Unexpected class!");
+ const ClassInfo *Root = this;
+ while (!Root->SuperClasses.empty())
+ Root = Root->SuperClasses.front();
+
+ const ClassInfo *RHSRoot = &RHS;
+ while (!RHSRoot->SuperClasses.empty())
+ RHSRoot = RHSRoot->SuperClasses.front();
+
+ return Root == RHSRoot;
+ }
+
+ /// isSubsetOf - Test whether this class is a subset of \p RHS.
+ bool isSubsetOf(const ClassInfo &RHS) const {
+ // This is a subset of RHS if it is the same class...
+ if (this == &RHS)
+ return true;
+
+ // ... or if any of its super classes are a subset of RHS.
+ SmallVector<const ClassInfo *, 16> Worklist(SuperClasses.begin(),
+ SuperClasses.end());
+ SmallPtrSet<const ClassInfo *, 16> Visited;
+ while (!Worklist.empty()) {
+ auto *CI = Worklist.pop_back_val();
+ if (CI == &RHS)
+ return true;
+ for (auto *Super : CI->SuperClasses)
+ if (Visited.insert(Super).second)
+ Worklist.push_back(Super);
+ }
+
+ return false;
+ }
+
+ int getTreeDepth() const {
+ int Depth = 0;
+ const ClassInfo *Root = this;
+ while (!Root->SuperClasses.empty()) {
+ Depth++;
+ Root = Root->SuperClasses.front();
+ }
+ return Depth;
+ }
+
+ const ClassInfo *findRoot() const {
+ const ClassInfo *Root = this;
+ while (!Root->SuperClasses.empty())
+ Root = Root->SuperClasses.front();
+ return Root;
+ }
+
+ /// Compare two classes. This does not produce a total ordering, but does
+ /// guarantee that subclasses are sorted before their parents, and that the
+ /// ordering is transitive.
+ bool operator<(const ClassInfo &RHS) const {
+ if (this == &RHS)
+ return false;
+
+ // First, enforce the ordering between the three different types of class.
+ // Tokens sort before registers, which sort before user classes.
+ if (Kind == Token) {
+ if (RHS.Kind != Token)
+ return true;
+ assert(RHS.Kind == Token);
+ } else if (isRegisterClass()) {
+ if (RHS.Kind == Token)
+ return false;
+ else if (RHS.isUserClass())
+ return true;
+ assert(RHS.isRegisterClass());
+ } else if (isUserClass()) {
+ if (!RHS.isUserClass())
+ return false;
+ assert(RHS.isUserClass());
+ } else {
+ llvm_unreachable("Unknown ClassInfoKind");
+ }
+
+ if (Kind == Token || isUserClass()) {
+ // Related tokens and user classes get sorted by depth in the inheritence
+ // tree (so that subclasses are before their parents).
+ if (isRelatedTo(RHS)) {
+ if (getTreeDepth() > RHS.getTreeDepth())
+ return true;
+ if (getTreeDepth() < RHS.getTreeDepth())
+ return false;
+ } else {
+ // Unrelated tokens and user classes are ordered by the name of their
+ // root nodes, so that there is a consistent ordering between
+ // unconnected trees.
+ return findRoot()->ValueName < RHS.findRoot()->ValueName;
+ }
+ } else if (isRegisterClass()) {
+ // For register sets, sort by number of registers. This guarantees that
+ // a set will always sort before all of it's strict supersets.
+ if (Registers.size() != RHS.Registers.size())
+ return Registers.size() < RHS.Registers.size();
+ } else {
+ llvm_unreachable("Unknown ClassInfoKind");
+ }
+
+ // FIXME: We should be able to just return false here, as we only need a
+ // partial order (we use stable sorts, so this is deterministic) and the
+ // name of a class shouldn't be significant. However, some of the backends
+ // accidentally rely on this behaviour, so it will have to stay like this
+ // until they are fixed.
+ return ValueName < RHS.ValueName;
+ }
+};
+
+class AsmVariantInfo {
+public:
+ StringRef RegisterPrefix;
+ StringRef TokenizingCharacters;
+ StringRef SeparatorCharacters;
+ StringRef BreakCharacters;
+ StringRef Name;
+ int AsmVariantNo;
+};
+
+/// MatchableInfo - Helper class for storing the necessary information for an
+/// instruction or alias which is capable of being matched.
+struct MatchableInfo {
+ struct AsmOperand {
+ /// Token - This is the token that the operand came from.
+ StringRef Token;
+
+ /// The unique class instance this operand should match.
+ ClassInfo *Class;
+
+ /// The operand name this is, if anything.
+ StringRef SrcOpName;
+
+ /// The operand name this is, before renaming for tied operands.
+ StringRef OrigSrcOpName;
+
+ /// The suboperand index within SrcOpName, or -1 for the entire operand.
+ int SubOpIdx;
+
+ /// Whether the token is "isolated", i.e., it is preceded and followed
+ /// by separators.
+ bool IsIsolatedToken;
+
+ /// Register record if this token is singleton register.
+ Record *SingletonReg;
+
+ explicit AsmOperand(bool IsIsolatedToken, StringRef T)
+ : Token(T), Class(nullptr), SubOpIdx(-1),
+ IsIsolatedToken(IsIsolatedToken), SingletonReg(nullptr) {}
+ };
+
+ /// ResOperand - This represents a single operand in the result instruction
+ /// generated by the match. In cases (like addressing modes) where a single
+ /// assembler operand expands to multiple MCOperands, this represents the
+ /// single assembler operand, not the MCOperand.
+ struct ResOperand {
+ enum {
+ /// RenderAsmOperand - This represents an operand result that is
+ /// generated by calling the render method on the assembly operand. The
+ /// corresponding AsmOperand is specified by AsmOperandNum.
+ RenderAsmOperand,
+
+ /// TiedOperand - This represents a result operand that is a duplicate of
+ /// a previous result operand.
+ TiedOperand,
+
+ /// ImmOperand - This represents an immediate value that is dumped into
+ /// the operand.
+ ImmOperand,
+
+ /// RegOperand - This represents a fixed register that is dumped in.
+ RegOperand
+ } Kind;
+
+ /// Tuple containing the index of the (earlier) result operand that should
+ /// be copied from, as well as the indices of the corresponding (parsed)
+ /// operands in the asm string.
+ struct TiedOperandsTuple {
+ unsigned ResOpnd;
+ unsigned SrcOpnd1Idx;
+ unsigned SrcOpnd2Idx;
+ };
+
+ union {
+ /// This is the operand # in the AsmOperands list that this should be
+ /// copied from.
+ unsigned AsmOperandNum;
+
+ /// Description of tied operands.
+ TiedOperandsTuple TiedOperands;
+
+ /// ImmVal - This is the immediate value added to the instruction.
+ int64_t ImmVal;
+
+ /// Register - This is the register record.
+ Record *Register;
+ };
+
+ /// MINumOperands - The number of MCInst operands populated by this
+ /// operand.
+ unsigned MINumOperands;
+
+ static ResOperand getRenderedOp(unsigned AsmOpNum, unsigned NumOperands) {
+ ResOperand X;
+ X.Kind = RenderAsmOperand;
+ X.AsmOperandNum = AsmOpNum;
+ X.MINumOperands = NumOperands;
+ return X;
+ }
+
+ static ResOperand getTiedOp(unsigned TiedOperandNum, unsigned SrcOperand1,
+ unsigned SrcOperand2) {
+ ResOperand X;
+ X.Kind = TiedOperand;
+ X.TiedOperands = { TiedOperandNum, SrcOperand1, SrcOperand2 };
+ X.MINumOperands = 1;
+ return X;
+ }
+
+ static ResOperand getImmOp(int64_t Val) {
+ ResOperand X;
+ X.Kind = ImmOperand;
+ X.ImmVal = Val;
+ X.MINumOperands = 1;
+ return X;
+ }
+
+ static ResOperand getRegOp(Record *Reg) {
+ ResOperand X;
+ X.Kind = RegOperand;
+ X.Register = Reg;
+ X.MINumOperands = 1;
+ return X;
+ }
+ };
+
+ /// AsmVariantID - Target's assembly syntax variant no.
+ int AsmVariantID;
+
+ /// AsmString - The assembly string for this instruction (with variants
+ /// removed), e.g. "movsx $src, $dst".
+ std::string AsmString;
+
+ /// TheDef - This is the definition of the instruction or InstAlias that this
+ /// matchable came from.
+ Record *const TheDef;
+
+ /// DefRec - This is the definition that it came from.
+ PointerUnion<const CodeGenInstruction*, const CodeGenInstAlias*> DefRec;
+
+ const CodeGenInstruction *getResultInst() const {
+ if (DefRec.is<const CodeGenInstruction*>())
+ return DefRec.get<const CodeGenInstruction*>();
+ return DefRec.get<const CodeGenInstAlias*>()->ResultInst;
+ }
+
+ /// ResOperands - This is the operand list that should be built for the result
+ /// MCInst.
+ SmallVector<ResOperand, 8> ResOperands;
+
+ /// Mnemonic - This is the first token of the matched instruction, its
+ /// mnemonic.
+ StringRef Mnemonic;
+
+ /// AsmOperands - The textual operands that this instruction matches,
+ /// annotated with a class and where in the OperandList they were defined.
+ /// This directly corresponds to the tokenized AsmString after the mnemonic is
+ /// removed.
+ SmallVector<AsmOperand, 8> AsmOperands;
+
+ /// Predicates - The required subtarget features to match this instruction.
+ SmallVector<const SubtargetFeatureInfo *, 4> RequiredFeatures;
+
+ /// ConversionFnKind - The enum value which is passed to the generated
+ /// convertToMCInst to convert parsed operands into an MCInst for this
+ /// function.
+ std::string ConversionFnKind;
+
+ /// If this instruction is deprecated in some form.
+ bool HasDeprecation;
+
+ /// If this is an alias, this is use to determine whether or not to using
+ /// the conversion function defined by the instruction's AsmMatchConverter
+ /// or to use the function generated by the alias.
+ bool UseInstAsmMatchConverter;
+
+ MatchableInfo(const CodeGenInstruction &CGI)
+ : AsmVariantID(0), AsmString(CGI.AsmString), TheDef(CGI.TheDef), DefRec(&CGI),
+ UseInstAsmMatchConverter(true) {
+ }
+
+ MatchableInfo(std::unique_ptr<const CodeGenInstAlias> Alias)
+ : AsmVariantID(0), AsmString(Alias->AsmString), TheDef(Alias->TheDef),
+ DefRec(Alias.release()),
+ UseInstAsmMatchConverter(
+ TheDef->getValueAsBit("UseInstAsmMatchConverter")) {
+ }
+
+ // Could remove this and the dtor if PointerUnion supported unique_ptr
+ // elements with a dynamic failure/assertion (like the one below) in the case
+ // where it was copied while being in an owning state.
+ MatchableInfo(const MatchableInfo &RHS)
+ : AsmVariantID(RHS.AsmVariantID), AsmString(RHS.AsmString),
+ TheDef(RHS.TheDef), DefRec(RHS.DefRec), ResOperands(RHS.ResOperands),
+ Mnemonic(RHS.Mnemonic), AsmOperands(RHS.AsmOperands),
+ RequiredFeatures(RHS.RequiredFeatures),
+ ConversionFnKind(RHS.ConversionFnKind),
+ HasDeprecation(RHS.HasDeprecation),
+ UseInstAsmMatchConverter(RHS.UseInstAsmMatchConverter) {
+ assert(!DefRec.is<const CodeGenInstAlias *>());
+ }
+
+ ~MatchableInfo() {
+ delete DefRec.dyn_cast<const CodeGenInstAlias*>();
+ }
+
+ // Two-operand aliases clone from the main matchable, but mark the second
+ // operand as a tied operand of the first for purposes of the assembler.
+ void formTwoOperandAlias(StringRef Constraint);
+
+ void initialize(const AsmMatcherInfo &Info,
+ SmallPtrSetImpl<Record*> &SingletonRegisters,
+ AsmVariantInfo const &Variant,
+ bool HasMnemonicFirst);
+
+ /// validate - Return true if this matchable is a valid thing to match against
+ /// and perform a bunch of validity checking.
+ bool validate(StringRef CommentDelimiter, bool IsAlias) const;
+
+ /// findAsmOperand - Find the AsmOperand with the specified name and
+ /// suboperand index.
+ int findAsmOperand(StringRef N, int SubOpIdx) const {
+ auto I = find_if(AsmOperands, [&](const AsmOperand &Op) {
+ return Op.SrcOpName == N && Op.SubOpIdx == SubOpIdx;
+ });
+ return (I != AsmOperands.end()) ? I - AsmOperands.begin() : -1;
+ }
+
+ /// findAsmOperandNamed - Find the first AsmOperand with the specified name.
+ /// This does not check the suboperand index.
+ int findAsmOperandNamed(StringRef N, int LastIdx = -1) const {
+ auto I =
+ llvm::find_if(llvm::drop_begin(AsmOperands, LastIdx + 1),
+ [&](const AsmOperand &Op) { return Op.SrcOpName == N; });
+ return (I != AsmOperands.end()) ? I - AsmOperands.begin() : -1;
+ }
+
+ int findAsmOperandOriginallyNamed(StringRef N) const {
+ auto I =
+ find_if(AsmOperands,
+ [&](const AsmOperand &Op) { return Op.OrigSrcOpName == N; });
+ return (I != AsmOperands.end()) ? I - AsmOperands.begin() : -1;
+ }
+
+ void buildInstructionResultOperands();
+ void buildAliasResultOperands(bool AliasConstraintsAreChecked);
+
+ /// operator< - Compare two matchables.
+ bool operator<(const MatchableInfo &RHS) const {
+ // The primary comparator is the instruction mnemonic.
+ if (int Cmp = Mnemonic.compare_insensitive(RHS.Mnemonic))
+ return Cmp == -1;
+
+ if (AsmOperands.size() != RHS.AsmOperands.size())
+ return AsmOperands.size() < RHS.AsmOperands.size();
+
+ // Compare lexicographically by operand. The matcher validates that other
+ // orderings wouldn't be ambiguous using \see couldMatchAmbiguouslyWith().
+ for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i) {
+ if (*AsmOperands[i].Class < *RHS.AsmOperands[i].Class)
+ return true;
+ if (*RHS.AsmOperands[i].Class < *AsmOperands[i].Class)
+ return false;
+ }
+
+ // Give matches that require more features higher precedence. This is useful
+ // because we cannot define AssemblerPredicates with the negation of
+ // processor features. For example, ARM v6 "nop" may be either a HINT or
+ // MOV. With v6, we want to match HINT. The assembler has no way to
+ // predicate MOV under "NoV6", but HINT will always match first because it
+ // requires V6 while MOV does not.
+ if (RequiredFeatures.size() != RHS.RequiredFeatures.size())
+ return RequiredFeatures.size() > RHS.RequiredFeatures.size();
+
+ // For X86 AVX/AVX512 instructions, we prefer vex encoding because the
+ // vex encoding size is smaller. Since X86InstrSSE.td is included ahead
+ // of X86InstrAVX512.td, the AVX instruction ID is less than AVX512 ID.
+ // We use the ID to sort AVX instruction before AVX512 instruction in
+ // matching table.
+ if (TheDef->isSubClassOf("Instruction") &&
+ TheDef->getValueAsBit("HasPositionOrder"))
+ return TheDef->getID() < RHS.TheDef->getID();
+
+ return false;
+ }
+
+ /// couldMatchAmbiguouslyWith - Check whether this matchable could
+ /// ambiguously match the same set of operands as \p RHS (without being a
+ /// strictly superior match).
+ bool couldMatchAmbiguouslyWith(const MatchableInfo &RHS) const {
+ // The primary comparator is the instruction mnemonic.
+ if (Mnemonic != RHS.Mnemonic)
+ return false;
+
+ // Different variants can't conflict.
+ if (AsmVariantID != RHS.AsmVariantID)
+ return false;
+
+ // The number of operands is unambiguous.
+ if (AsmOperands.size() != RHS.AsmOperands.size())
+ return false;
+
+ // Otherwise, make sure the ordering of the two instructions is unambiguous
+ // by checking that either (a) a token or operand kind discriminates them,
+ // or (b) the ordering among equivalent kinds is consistent.
+
+ // Tokens and operand kinds are unambiguous (assuming a correct target
+ // specific parser).
+ for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i)
+ if (AsmOperands[i].Class->Kind != RHS.AsmOperands[i].Class->Kind ||
+ AsmOperands[i].Class->Kind == ClassInfo::Token)
+ if (*AsmOperands[i].Class < *RHS.AsmOperands[i].Class ||
+ *RHS.AsmOperands[i].Class < *AsmOperands[i].Class)
+ return false;
+
+ // Otherwise, this operand could commute if all operands are equivalent, or
+ // there is a pair of operands that compare less than and a pair that
+ // compare greater than.
+ bool HasLT = false, HasGT = false;
+ for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i) {
+ if (*AsmOperands[i].Class < *RHS.AsmOperands[i].Class)
+ HasLT = true;
+ if (*RHS.AsmOperands[i].Class < *AsmOperands[i].Class)
+ HasGT = true;
+ }
+
+ return HasLT == HasGT;
+ }
+
+ void dump() const;
+
+private:
+ void tokenizeAsmString(AsmMatcherInfo const &Info,
+ AsmVariantInfo const &Variant);
+ void addAsmOperand(StringRef Token, bool IsIsolatedToken = false);
+};
+
+struct OperandMatchEntry {
+ unsigned OperandMask;
+ const MatchableInfo* MI;
+ ClassInfo *CI;
+
+ static OperandMatchEntry create(const MatchableInfo *mi, ClassInfo *ci,
+ unsigned opMask) {
+ OperandMatchEntry X;
+ X.OperandMask = opMask;
+ X.CI = ci;
+ X.MI = mi;
+ return X;
+ }
+};
+
+class AsmMatcherInfo {
+public:
+ /// Tracked Records
+ RecordKeeper &Records;
+
+ /// The tablegen AsmParser record.
+ Record *AsmParser;
+
+ /// Target - The target information.
+ CodeGenTarget &Target;
+
+ /// The classes which are needed for matching.
+ std::forward_list<ClassInfo> Classes;
+
+ /// The information on the matchables to match.
+ std::vector<std::unique_ptr<MatchableInfo>> Matchables;
+
+ /// Info for custom matching operands by user defined methods.
+ std::vector<OperandMatchEntry> OperandMatchInfo;
+
+ /// Map of Register records to their class information.
+ typedef std::map<Record*, ClassInfo*, LessRecordByID> RegisterClassesTy;
+ RegisterClassesTy RegisterClasses;
+
+ /// Map of Predicate records to their subtarget information.
+ std::map<Record *, SubtargetFeatureInfo, LessRecordByID> SubtargetFeatures;
+
+ /// Map of AsmOperandClass records to their class information.
+ std::map<Record*, ClassInfo*> AsmOperandClasses;
+
+ /// Map of RegisterClass records to their class information.
+ std::map<Record*, ClassInfo*> RegisterClassClasses;
+
+private:
+ /// Map of token to class information which has already been constructed.
+ std::map<std::string, ClassInfo*> TokenClasses;
+
+private:
+ /// getTokenClass - Lookup or create the class for the given token.
+ ClassInfo *getTokenClass(StringRef Token);
+
+ /// getOperandClass - Lookup or create the class for the given operand.
+ ClassInfo *getOperandClass(const CGIOperandList::OperandInfo &OI,
+ int SubOpIdx);
+ ClassInfo *getOperandClass(Record *Rec, int SubOpIdx);
+
+ /// buildRegisterClasses - Build the ClassInfo* instances for register
+ /// classes.
+ void buildRegisterClasses(SmallPtrSetImpl<Record*> &SingletonRegisters);
+
+ /// buildOperandClasses - Build the ClassInfo* instances for user defined
+ /// operand classes.
+ void buildOperandClasses();
+
+ void buildInstructionOperandReference(MatchableInfo *II, StringRef OpName,
+ unsigned AsmOpIdx);
+ void buildAliasOperandReference(MatchableInfo *II, StringRef OpName,
+ MatchableInfo::AsmOperand &Op);
+
+public:
+ AsmMatcherInfo(Record *AsmParser,
+ CodeGenTarget &Target,
+ RecordKeeper &Records);
+
+ /// Construct the various tables used during matching.
+ void buildInfo();
+
+ /// buildOperandMatchInfo - Build the necessary information to handle user
+ /// defined operand parsing methods.
+ void buildOperandMatchInfo();
+
+ /// getSubtargetFeature - Lookup or create the subtarget feature info for the
+ /// given operand.
+ const SubtargetFeatureInfo *getSubtargetFeature(Record *Def) const {
+ assert(Def->isSubClassOf("Predicate") && "Invalid predicate type!");
+ const auto &I = SubtargetFeatures.find(Def);
+ return I == SubtargetFeatures.end() ? nullptr : &I->second;
+ }
+
+ RecordKeeper &getRecords() const {
+ return Records;
+ }
+
+ bool hasOptionalOperands() const {
+ return any_of(Classes,
+ [](const ClassInfo &Class) { return Class.IsOptional; });
+ }
+};
+
+} // end anonymous namespace
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void MatchableInfo::dump() const {
+ errs() << TheDef->getName() << " -- " << "flattened:\"" << AsmString <<"\"\n";
+
+ errs() << " variant: " << AsmVariantID << "\n";
+
+ for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i) {
+ const AsmOperand &Op = AsmOperands[i];
+ errs() << " op[" << i << "] = " << Op.Class->ClassName << " - ";
+ errs() << '\"' << Op.Token << "\"\n";
+ }
+}
+#endif
+
+static std::pair<StringRef, StringRef>
+parseTwoOperandConstraint(StringRef S, ArrayRef<SMLoc> Loc) {
+ // Split via the '='.
+ std::pair<StringRef, StringRef> Ops = S.split('=');
+ if (Ops.second == "")
+ PrintFatalError(Loc, "missing '=' in two-operand alias constraint");
+ // Trim whitespace and the leading '$' on the operand names.
+ size_t start = Ops.first.find_first_of('$');
+ if (start == std::string::npos)
+ PrintFatalError(Loc, "expected '$' prefix on asm operand name");
+ Ops.first = Ops.first.slice(start + 1, std::string::npos);
+ size_t end = Ops.first.find_last_of(" \t");
+ Ops.first = Ops.first.slice(0, end);
+ // Now the second operand.
+ start = Ops.second.find_first_of('$');
+ if (start == std::string::npos)
+ PrintFatalError(Loc, "expected '$' prefix on asm operand name");
+ Ops.second = Ops.second.slice(start + 1, std::string::npos);
+ end = Ops.second.find_last_of(" \t");
+ Ops.first = Ops.first.slice(0, end);
+ return Ops;
+}
+
+void MatchableInfo::formTwoOperandAlias(StringRef Constraint) {
+ // Figure out which operands are aliased and mark them as tied.
+ std::pair<StringRef, StringRef> Ops =
+ parseTwoOperandConstraint(Constraint, TheDef->getLoc());
+
+ // Find the AsmOperands that refer to the operands we're aliasing.
+ int SrcAsmOperand = findAsmOperandNamed(Ops.first);
+ int DstAsmOperand = findAsmOperandNamed(Ops.second);
+ if (SrcAsmOperand == -1)
+ PrintFatalError(TheDef->getLoc(),
+ "unknown source two-operand alias operand '" + Ops.first +
+ "'.");
+ if (DstAsmOperand == -1)
+ PrintFatalError(TheDef->getLoc(),
+ "unknown destination two-operand alias operand '" +
+ Ops.second + "'.");
+
+ // Find the ResOperand that refers to the operand we're aliasing away
+ // and update it to refer to the combined operand instead.
+ for (ResOperand &Op : ResOperands) {
+ if (Op.Kind == ResOperand::RenderAsmOperand &&
+ Op.AsmOperandNum == (unsigned)SrcAsmOperand) {
+ Op.AsmOperandNum = DstAsmOperand;
+ break;
+ }
+ }
+ // Remove the AsmOperand for the alias operand.
+ AsmOperands.erase(AsmOperands.begin() + SrcAsmOperand);
+ // Adjust the ResOperand references to any AsmOperands that followed
+ // the one we just deleted.
+ for (ResOperand &Op : ResOperands) {
+ switch(Op.Kind) {
+ default:
+ // Nothing to do for operands that don't reference AsmOperands.
+ break;
+ case ResOperand::RenderAsmOperand:
+ if (Op.AsmOperandNum > (unsigned)SrcAsmOperand)
+ --Op.AsmOperandNum;
+ break;
+ }
+ }
+}
+
+/// extractSingletonRegisterForAsmOperand - Extract singleton register,
+/// if present, from specified token.
+static void
+extractSingletonRegisterForAsmOperand(MatchableInfo::AsmOperand &Op,
+ const AsmMatcherInfo &Info,
+ StringRef RegisterPrefix) {
+ StringRef Tok = Op.Token;
+
+ // If this token is not an isolated token, i.e., it isn't separated from
+ // other tokens (e.g. with whitespace), don't interpret it as a register name.
+ if (!Op.IsIsolatedToken)
+ return;
+
+ if (RegisterPrefix.empty()) {
+ std::string LoweredTok = Tok.lower();
+ if (const CodeGenRegister *Reg = Info.Target.getRegisterByName(LoweredTok))
+ Op.SingletonReg = Reg->TheDef;
+ return;
+ }
+
+ if (!Tok.startswith(RegisterPrefix))
+ return;
+
+ StringRef RegName = Tok.substr(RegisterPrefix.size());
+ if (const CodeGenRegister *Reg = Info.Target.getRegisterByName(RegName))
+ Op.SingletonReg = Reg->TheDef;
+
+ // If there is no register prefix (i.e. "%" in "%eax"), then this may
+ // be some random non-register token, just ignore it.
+}
+
+void MatchableInfo::initialize(const AsmMatcherInfo &Info,
+ SmallPtrSetImpl<Record*> &SingletonRegisters,
+ AsmVariantInfo const &Variant,
+ bool HasMnemonicFirst) {
+ AsmVariantID = Variant.AsmVariantNo;
+ AsmString =
+ CodeGenInstruction::FlattenAsmStringVariants(AsmString,
+ Variant.AsmVariantNo);
+
+ tokenizeAsmString(Info, Variant);
+
+ // The first token of the instruction is the mnemonic, which must be a
+ // simple string, not a $foo variable or a singleton register.
+ if (AsmOperands.empty())
+ PrintFatalError(TheDef->getLoc(),
+ "Instruction '" + TheDef->getName() + "' has no tokens");
+
+ assert(!AsmOperands[0].Token.empty());
+ if (HasMnemonicFirst) {
+ Mnemonic = AsmOperands[0].Token;
+ if (Mnemonic[0] == '$')
+ PrintFatalError(TheDef->getLoc(),
+ "Invalid instruction mnemonic '" + Mnemonic + "'!");
+
+ // Remove the first operand, it is tracked in the mnemonic field.
+ AsmOperands.erase(AsmOperands.begin());
+ } else if (AsmOperands[0].Token[0] != '$')
+ Mnemonic = AsmOperands[0].Token;
+
+ // Compute the require features.
+ for (Record *Predicate : TheDef->getValueAsListOfDefs("Predicates"))
+ if (const SubtargetFeatureInfo *Feature =
+ Info.getSubtargetFeature(Predicate))
+ RequiredFeatures.push_back(Feature);
+
+ // Collect singleton registers, if used.
+ for (MatchableInfo::AsmOperand &Op : AsmOperands) {
+ extractSingletonRegisterForAsmOperand(Op, Info, Variant.RegisterPrefix);
+ if (Record *Reg = Op.SingletonReg)
+ SingletonRegisters.insert(Reg);
+ }
+
+ const RecordVal *DepMask = TheDef->getValue("DeprecatedFeatureMask");
+ if (!DepMask)
+ DepMask = TheDef->getValue("ComplexDeprecationPredicate");
+
+ HasDeprecation =
+ DepMask ? !DepMask->getValue()->getAsUnquotedString().empty() : false;
+}
+
+/// Append an AsmOperand for the given substring of AsmString.
+void MatchableInfo::addAsmOperand(StringRef Token, bool IsIsolatedToken) {
+ AsmOperands.push_back(AsmOperand(IsIsolatedToken, Token));
+}
+
+/// tokenizeAsmString - Tokenize a simplified assembly string.
+void MatchableInfo::tokenizeAsmString(const AsmMatcherInfo &Info,
+ AsmVariantInfo const &Variant) {
+ StringRef String = AsmString;
+ size_t Prev = 0;
+ bool InTok = false;
+ bool IsIsolatedToken = true;
+ for (size_t i = 0, e = String.size(); i != e; ++i) {
+ char Char = String[i];
+ if (Variant.BreakCharacters.find(Char) != std::string::npos) {
+ if (InTok) {
+ addAsmOperand(String.slice(Prev, i), false);
+ Prev = i;
+ IsIsolatedToken = false;
+ }
+ InTok = true;
+ continue;
+ }
+ if (Variant.TokenizingCharacters.find(Char) != std::string::npos) {
+ if (InTok) {
+ addAsmOperand(String.slice(Prev, i), IsIsolatedToken);
+ InTok = false;
+ IsIsolatedToken = false;
+ }
+ addAsmOperand(String.slice(i, i + 1), IsIsolatedToken);
+ Prev = i + 1;
+ IsIsolatedToken = true;
+ continue;
+ }
+ if (Variant.SeparatorCharacters.find(Char) != std::string::npos) {
+ if (InTok) {
+ addAsmOperand(String.slice(Prev, i), IsIsolatedToken);
+ InTok = false;
+ }
+ Prev = i + 1;
+ IsIsolatedToken = true;
+ continue;
+ }
+
+ switch (Char) {
+ case '\\':
+ if (InTok) {
+ addAsmOperand(String.slice(Prev, i), false);
+ InTok = false;
+ IsIsolatedToken = false;
+ }
+ ++i;
+ assert(i != String.size() && "Invalid quoted character");
+ addAsmOperand(String.slice(i, i + 1), IsIsolatedToken);
+ Prev = i + 1;
+ IsIsolatedToken = false;
+ break;
+
+ case '$': {
+ if (InTok) {
+ addAsmOperand(String.slice(Prev, i), IsIsolatedToken);
+ InTok = false;
+ IsIsolatedToken = false;
+ }
+
+ // If this isn't "${", start new identifier looking like "$xxx"
+ if (i + 1 == String.size() || String[i + 1] != '{') {
+ Prev = i;
+ break;
+ }
+
+ size_t EndPos = String.find('}', i);
+ assert(EndPos != StringRef::npos &&
+ "Missing brace in operand reference!");
+ addAsmOperand(String.slice(i, EndPos+1), IsIsolatedToken);
+ Prev = EndPos + 1;
+ i = EndPos;
+ IsIsolatedToken = false;
+ break;
+ }
+
+ default:
+ InTok = true;
+ break;
+ }
+ }
+ if (InTok && Prev != String.size())
+ addAsmOperand(String.substr(Prev), IsIsolatedToken);
+}
+
+bool MatchableInfo::validate(StringRef CommentDelimiter, bool IsAlias) const {
+ // Reject matchables with no .s string.
+ if (AsmString.empty())
+ PrintFatalError(TheDef->getLoc(), "instruction with empty asm string");
+
+ // Reject any matchables with a newline in them, they should be marked
+ // isCodeGenOnly if they are pseudo instructions.
+ if (AsmString.find('\n') != std::string::npos)
+ PrintFatalError(TheDef->getLoc(),
+ "multiline instruction is not valid for the asmparser, "
+ "mark it isCodeGenOnly");
+
+ // Remove comments from the asm string. We know that the asmstring only
+ // has one line.
+ if (!CommentDelimiter.empty() &&
+ StringRef(AsmString).contains(CommentDelimiter))
+ PrintFatalError(TheDef->getLoc(),
+ "asmstring for instruction has comment character in it, "
+ "mark it isCodeGenOnly");
+
+ // Reject matchables with operand modifiers, these aren't something we can
+ // handle, the target should be refactored to use operands instead of
+ // modifiers.
+ //
+ // Also, check for instructions which reference the operand multiple times,
+ // if they don't define a custom AsmMatcher: this implies a constraint that
+ // the built-in matching code would not honor.
+ std::set<std::string> OperandNames;
+ for (const AsmOperand &Op : AsmOperands) {
+ StringRef Tok = Op.Token;
+ if (Tok[0] == '$' && Tok.contains(':'))
+ PrintFatalError(TheDef->getLoc(),
+ "matchable with operand modifier '" + Tok +
+ "' not supported by asm matcher. Mark isCodeGenOnly!");
+ // Verify that any operand is only mentioned once.
+ // We reject aliases and ignore instructions for now.
+ if (!IsAlias && TheDef->getValueAsString("AsmMatchConverter").empty() &&
+ Tok[0] == '$' && !OperandNames.insert(std::string(Tok)).second) {
+ LLVM_DEBUG({
+ errs() << "warning: '" << TheDef->getName() << "': "
+ << "ignoring instruction with tied operand '"
+ << Tok << "'\n";
+ });
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static std::string getEnumNameForToken(StringRef Str) {
+ std::string Res;
+
+ for (char C : Str) {
+ switch (C) {
+ case '*': Res += "_STAR_"; break;
+ case '%': Res += "_PCT_"; break;
+ case ':': Res += "_COLON_"; break;
+ case '!': Res += "_EXCLAIM_"; break;
+ case '.': Res += "_DOT_"; break;
+ case '<': Res += "_LT_"; break;
+ case '>': Res += "_GT_"; break;
+ case '-': Res += "_MINUS_"; break;
+ case '#': Res += "_HASH_"; break;
+ default:
+ if (isAlnum(C))
+ Res += C;
+ else
+ Res += "_" + utostr((unsigned)C) + "_";
+ }
+ }
+
+ return Res;
+}
+
+ClassInfo *AsmMatcherInfo::getTokenClass(StringRef Token) {
+ ClassInfo *&Entry = TokenClasses[std::string(Token)];
+
+ if (!Entry) {
+ Classes.emplace_front();
+ Entry = &Classes.front();
+ Entry->Kind = ClassInfo::Token;
+ Entry->ClassName = "Token";
+ Entry->Name = "MCK_" + getEnumNameForToken(Token);
+ Entry->ValueName = std::string(Token);
+ Entry->PredicateMethod = "<invalid>";
+ Entry->RenderMethod = "<invalid>";
+ Entry->ParserMethod = "";
+ Entry->DiagnosticType = "";
+ Entry->IsOptional = false;
+ Entry->DefaultMethod = "<invalid>";
+ }
+
+ return Entry;
+}
+
+ClassInfo *
+AsmMatcherInfo::getOperandClass(const CGIOperandList::OperandInfo &OI,
+ int SubOpIdx) {
+ Record *Rec = OI.Rec;
+ if (SubOpIdx != -1)
+ Rec = cast<DefInit>(OI.MIOperandInfo->getArg(SubOpIdx))->getDef();
+ return getOperandClass(Rec, SubOpIdx);
+}
+
+ClassInfo *
+AsmMatcherInfo::getOperandClass(Record *Rec, int SubOpIdx) {
+ if (Rec->isSubClassOf("RegisterOperand")) {
+ // RegisterOperand may have an associated ParserMatchClass. If it does,
+ // use it, else just fall back to the underlying register class.
+ const RecordVal *R = Rec->getValue("ParserMatchClass");
+ if (!R || !R->getValue())
+ PrintFatalError(Rec->getLoc(),
+ "Record `" + Rec->getName() +
+ "' does not have a ParserMatchClass!\n");
+
+ if (DefInit *DI= dyn_cast<DefInit>(R->getValue())) {
+ Record *MatchClass = DI->getDef();
+ if (ClassInfo *CI = AsmOperandClasses[MatchClass])
+ return CI;
+ }
+
+ // No custom match class. Just use the register class.
+ Record *ClassRec = Rec->getValueAsDef("RegClass");
+ if (!ClassRec)
+ PrintFatalError(Rec->getLoc(), "RegisterOperand `" + Rec->getName() +
+ "' has no associated register class!\n");
+ if (ClassInfo *CI = RegisterClassClasses[ClassRec])
+ return CI;
+ PrintFatalError(Rec->getLoc(), "register class has no class info!");
+ }
+
+ if (Rec->isSubClassOf("RegisterClass")) {
+ if (ClassInfo *CI = RegisterClassClasses[Rec])
+ return CI;
+ PrintFatalError(Rec->getLoc(), "register class has no class info!");
+ }
+
+ if (!Rec->isSubClassOf("Operand"))
+ PrintFatalError(Rec->getLoc(), "Operand `" + Rec->getName() +
+ "' does not derive from class Operand!\n");
+ Record *MatchClass = Rec->getValueAsDef("ParserMatchClass");
+ if (ClassInfo *CI = AsmOperandClasses[MatchClass])
+ return CI;
+
+ PrintFatalError(Rec->getLoc(), "operand has no match class!");
+}
+
+struct LessRegisterSet {
+ bool operator() (const RegisterSet &LHS, const RegisterSet & RHS) const {
+ // std::set<T> defines its own compariso "operator<", but it
+ // performs a lexicographical comparison by T's innate comparison
+ // for some reason. We don't want non-deterministic pointer
+ // comparisons so use this instead.
+ return std::lexicographical_compare(LHS.begin(), LHS.end(),
+ RHS.begin(), RHS.end(),
+ LessRecordByID());
+ }
+};
+
+void AsmMatcherInfo::
+buildRegisterClasses(SmallPtrSetImpl<Record*> &SingletonRegisters) {
+ const auto &Registers = Target.getRegBank().getRegisters();
+ auto &RegClassList = Target.getRegBank().getRegClasses();
+
+ typedef std::set<RegisterSet, LessRegisterSet> RegisterSetSet;
+
+ // The register sets used for matching.
+ RegisterSetSet RegisterSets;
+
+ // Gather the defined sets.
+ for (const CodeGenRegisterClass &RC : RegClassList)
+ RegisterSets.insert(
+ RegisterSet(RC.getOrder().begin(), RC.getOrder().end()));
+
+ // Add any required singleton sets.
+ for (Record *Rec : SingletonRegisters) {
+ RegisterSets.insert(RegisterSet(&Rec, &Rec + 1));
+ }
+
+ // Introduce derived sets where necessary (when a register does not determine
+ // a unique register set class), and build the mapping of registers to the set
+ // they should classify to.
+ std::map<Record*, RegisterSet> RegisterMap;
+ for (const CodeGenRegister &CGR : Registers) {
+ // Compute the intersection of all sets containing this register.
+ RegisterSet ContainingSet;
+
+ for (const RegisterSet &RS : RegisterSets) {
+ if (!RS.count(CGR.TheDef))
+ continue;
+
+ if (ContainingSet.empty()) {
+ ContainingSet = RS;
+ continue;
+ }
+
+ RegisterSet Tmp;
+ std::swap(Tmp, ContainingSet);
+ std::insert_iterator<RegisterSet> II(ContainingSet,
+ ContainingSet.begin());
+ std::set_intersection(Tmp.begin(), Tmp.end(), RS.begin(), RS.end(), II,
+ LessRecordByID());
+ }
+
+ if (!ContainingSet.empty()) {
+ RegisterSets.insert(ContainingSet);
+ RegisterMap.insert(std::make_pair(CGR.TheDef, ContainingSet));
+ }
+ }
+
+ // Construct the register classes.
+ std::map<RegisterSet, ClassInfo*, LessRegisterSet> RegisterSetClasses;
+ unsigned Index = 0;
+ for (const RegisterSet &RS : RegisterSets) {
+ Classes.emplace_front();
+ ClassInfo *CI = &Classes.front();
+ CI->Kind = ClassInfo::RegisterClass0 + Index;
+ CI->ClassName = "Reg" + utostr(Index);
+ CI->Name = "MCK_Reg" + utostr(Index);
+ CI->ValueName = "";
+ CI->PredicateMethod = ""; // unused
+ CI->RenderMethod = "addRegOperands";
+ CI->Registers = RS;
+ // FIXME: diagnostic type.
+ CI->DiagnosticType = "";
+ CI->IsOptional = false;
+ CI->DefaultMethod = ""; // unused
+ RegisterSetClasses.insert(std::make_pair(RS, CI));
+ ++Index;
+ }
+
+ // Find the superclasses; we could compute only the subgroup lattice edges,
+ // but there isn't really a point.
+ for (const RegisterSet &RS : RegisterSets) {
+ ClassInfo *CI = RegisterSetClasses[RS];
+ for (const RegisterSet &RS2 : RegisterSets)
+ if (RS != RS2 &&
+ std::includes(RS2.begin(), RS2.end(), RS.begin(), RS.end(),
+ LessRecordByID()))
+ CI->SuperClasses.push_back(RegisterSetClasses[RS2]);
+ }
+
+ // Name the register classes which correspond to a user defined RegisterClass.
+ for (const CodeGenRegisterClass &RC : RegClassList) {
+ // Def will be NULL for non-user defined register classes.
+ Record *Def = RC.getDef();
+ if (!Def)
+ continue;
+ ClassInfo *CI = RegisterSetClasses[RegisterSet(RC.getOrder().begin(),
+ RC.getOrder().end())];
+ if (CI->ValueName.empty()) {
+ CI->ClassName = RC.getName();
+ CI->Name = "MCK_" + RC.getName();
+ CI->ValueName = RC.getName();
+ } else
+ CI->ValueName = CI->ValueName + "," + RC.getName();
+
+ Init *DiagnosticType = Def->getValueInit("DiagnosticType");
+ if (StringInit *SI = dyn_cast<StringInit>(DiagnosticType))
+ CI->DiagnosticType = std::string(SI->getValue());
+
+ Init *DiagnosticString = Def->getValueInit("DiagnosticString");
+ if (StringInit *SI = dyn_cast<StringInit>(DiagnosticString))
+ CI->DiagnosticString = std::string(SI->getValue());
+
+ // If we have a diagnostic string but the diagnostic type is not specified
+ // explicitly, create an anonymous diagnostic type.
+ if (!CI->DiagnosticString.empty() && CI->DiagnosticType.empty())
+ CI->DiagnosticType = RC.getName();
+
+ RegisterClassClasses.insert(std::make_pair(Def, CI));
+ }
+
+ // Populate the map for individual registers.
+ for (auto &It : RegisterMap)
+ RegisterClasses[It.first] = RegisterSetClasses[It.second];
+
+ // Name the register classes which correspond to singleton registers.
+ for (Record *Rec : SingletonRegisters) {
+ ClassInfo *CI = RegisterClasses[Rec];
+ assert(CI && "Missing singleton register class info!");
+
+ if (CI->ValueName.empty()) {
+ CI->ClassName = std::string(Rec->getName());
+ CI->Name = "MCK_" + Rec->getName().str();
+ CI->ValueName = std::string(Rec->getName());
+ } else
+ CI->ValueName = CI->ValueName + "," + Rec->getName().str();
+ }
+}
+
+void AsmMatcherInfo::buildOperandClasses() {
+ std::vector<Record*> AsmOperands =
+ Records.getAllDerivedDefinitions("AsmOperandClass");
+
+ // Pre-populate AsmOperandClasses map.
+ for (Record *Rec : AsmOperands) {
+ Classes.emplace_front();
+ AsmOperandClasses[Rec] = &Classes.front();
+ }
+
+ unsigned Index = 0;
+ for (Record *Rec : AsmOperands) {
+ ClassInfo *CI = AsmOperandClasses[Rec];
+ CI->Kind = ClassInfo::UserClass0 + Index;
+
+ ListInit *Supers = Rec->getValueAsListInit("SuperClasses");
+ for (Init *I : Supers->getValues()) {
+ DefInit *DI = dyn_cast<DefInit>(I);
+ if (!DI) {
+ PrintError(Rec->getLoc(), "Invalid super class reference!");
+ continue;
+ }
+
+ ClassInfo *SC = AsmOperandClasses[DI->getDef()];
+ if (!SC)
+ PrintError(Rec->getLoc(), "Invalid super class reference!");
+ else
+ CI->SuperClasses.push_back(SC);
+ }
+ CI->ClassName = std::string(Rec->getValueAsString("Name"));
+ CI->Name = "MCK_" + CI->ClassName;
+ CI->ValueName = std::string(Rec->getName());
+
+ // Get or construct the predicate method name.
+ Init *PMName = Rec->getValueInit("PredicateMethod");
+ if (StringInit *SI = dyn_cast<StringInit>(PMName)) {
+ CI->PredicateMethod = std::string(SI->getValue());
+ } else {
+ assert(isa<UnsetInit>(PMName) && "Unexpected PredicateMethod field!");
+ CI->PredicateMethod = "is" + CI->ClassName;
+ }
+
+ // Get or construct the render method name.
+ Init *RMName = Rec->getValueInit("RenderMethod");
+ if (StringInit *SI = dyn_cast<StringInit>(RMName)) {
+ CI->RenderMethod = std::string(SI->getValue());
+ } else {
+ assert(isa<UnsetInit>(RMName) && "Unexpected RenderMethod field!");
+ CI->RenderMethod = "add" + CI->ClassName + "Operands";
+ }
+
+ // Get the parse method name or leave it as empty.
+ Init *PRMName = Rec->getValueInit("ParserMethod");
+ if (StringInit *SI = dyn_cast<StringInit>(PRMName))
+ CI->ParserMethod = std::string(SI->getValue());
+
+ // Get the diagnostic type and string or leave them as empty.
+ Init *DiagnosticType = Rec->getValueInit("DiagnosticType");
+ if (StringInit *SI = dyn_cast<StringInit>(DiagnosticType))
+ CI->DiagnosticType = std::string(SI->getValue());
+ Init *DiagnosticString = Rec->getValueInit("DiagnosticString");
+ if (StringInit *SI = dyn_cast<StringInit>(DiagnosticString))
+ CI->DiagnosticString = std::string(SI->getValue());
+ // If we have a DiagnosticString, we need a DiagnosticType for use within
+ // the matcher.
+ if (!CI->DiagnosticString.empty() && CI->DiagnosticType.empty())
+ CI->DiagnosticType = CI->ClassName;
+
+ Init *IsOptional = Rec->getValueInit("IsOptional");
+ if (BitInit *BI = dyn_cast<BitInit>(IsOptional))
+ CI->IsOptional = BI->getValue();
+
+ // Get or construct the default method name.
+ Init *DMName = Rec->getValueInit("DefaultMethod");
+ if (StringInit *SI = dyn_cast<StringInit>(DMName)) {
+ CI->DefaultMethod = std::string(SI->getValue());
+ } else {
+ assert(isa<UnsetInit>(DMName) && "Unexpected DefaultMethod field!");
+ CI->DefaultMethod = "default" + CI->ClassName + "Operands";
+ }
+
+ ++Index;
+ }
+}
+
+AsmMatcherInfo::AsmMatcherInfo(Record *asmParser,
+ CodeGenTarget &target,
+ RecordKeeper &records)
+ : Records(records), AsmParser(asmParser), Target(target) {
+}
+
+/// buildOperandMatchInfo - Build the necessary information to handle user
+/// defined operand parsing methods.
+void AsmMatcherInfo::buildOperandMatchInfo() {
+
+ /// Map containing a mask with all operands indices that can be found for
+ /// that class inside a instruction.
+ typedef std::map<ClassInfo *, unsigned, deref<std::less<>>> OpClassMaskTy;
+ OpClassMaskTy OpClassMask;
+
+ bool CallCustomParserForAllOperands =
+ AsmParser->getValueAsBit("CallCustomParserForAllOperands");
+ for (const auto &MI : Matchables) {
+ OpClassMask.clear();
+
+ // Keep track of all operands of this instructions which belong to the
+ // same class.
+ unsigned NumOptionalOps = 0;
+ for (unsigned i = 0, e = MI->AsmOperands.size(); i != e; ++i) {
+ const MatchableInfo::AsmOperand &Op = MI->AsmOperands[i];
+ if (CallCustomParserForAllOperands || !Op.Class->ParserMethod.empty()) {
+ unsigned &OperandMask = OpClassMask[Op.Class];
+ OperandMask |= maskTrailingOnes<unsigned>(NumOptionalOps + 1)
+ << (i - NumOptionalOps);
+ }
+ if (Op.Class->IsOptional)
+ ++NumOptionalOps;
+ }
+
+ // Generate operand match info for each mnemonic/operand class pair.
+ for (const auto &OCM : OpClassMask) {
+ unsigned OpMask = OCM.second;
+ ClassInfo *CI = OCM.first;
+ OperandMatchInfo.push_back(OperandMatchEntry::create(MI.get(), CI,
+ OpMask));
+ }
+ }
+}
+
+void AsmMatcherInfo::buildInfo() {
+ // Build information about all of the AssemblerPredicates.
+ const std::vector<std::pair<Record *, SubtargetFeatureInfo>>
+ &SubtargetFeaturePairs = SubtargetFeatureInfo::getAll(Records);
+ SubtargetFeatures.insert(SubtargetFeaturePairs.begin(),
+ SubtargetFeaturePairs.end());
+#ifndef NDEBUG
+ for (const auto &Pair : SubtargetFeatures)
+ LLVM_DEBUG(Pair.second.dump());
+#endif // NDEBUG
+
+ bool HasMnemonicFirst = AsmParser->getValueAsBit("HasMnemonicFirst");
+ bool ReportMultipleNearMisses =
+ AsmParser->getValueAsBit("ReportMultipleNearMisses");
+
+ // Parse the instructions; we need to do this first so that we can gather the
+ // singleton register classes.
+ SmallPtrSet<Record*, 16> SingletonRegisters;
+ unsigned VariantCount = Target.getAsmParserVariantCount();
+ for (unsigned VC = 0; VC != VariantCount; ++VC) {
+ Record *AsmVariant = Target.getAsmParserVariant(VC);
+ StringRef CommentDelimiter =
+ AsmVariant->getValueAsString("CommentDelimiter");
+ AsmVariantInfo Variant;
+ Variant.RegisterPrefix = AsmVariant->getValueAsString("RegisterPrefix");
+ Variant.TokenizingCharacters =
+ AsmVariant->getValueAsString("TokenizingCharacters");
+ Variant.SeparatorCharacters =
+ AsmVariant->getValueAsString("SeparatorCharacters");
+ Variant.BreakCharacters =
+ AsmVariant->getValueAsString("BreakCharacters");
+ Variant.Name = AsmVariant->getValueAsString("Name");
+ Variant.AsmVariantNo = AsmVariant->getValueAsInt("Variant");
+
+ for (const CodeGenInstruction *CGI : Target.getInstructionsByEnumValue()) {
+
+ // If the tblgen -match-prefix option is specified (for tblgen hackers),
+ // filter the set of instructions we consider.
+ if (!StringRef(CGI->TheDef->getName()).startswith(MatchPrefix))
+ continue;
+
+ // Ignore "codegen only" instructions.
+ if (CGI->TheDef->getValueAsBit("isCodeGenOnly"))
+ continue;
+
+ // Ignore instructions for different instructions
+ StringRef V = CGI->TheDef->getValueAsString("AsmVariantName");
+ if (!V.empty() && V != Variant.Name)
+ continue;
+
+ auto II = std::make_unique<MatchableInfo>(*CGI);
+
+ II->initialize(*this, SingletonRegisters, Variant, HasMnemonicFirst);
+
+ // Ignore instructions which shouldn't be matched and diagnose invalid
+ // instruction definitions with an error.
+ if (!II->validate(CommentDelimiter, false))
+ continue;
+
+ Matchables.push_back(std::move(II));
+ }
+
+ // Parse all of the InstAlias definitions and stick them in the list of
+ // matchables.
+ std::vector<Record*> AllInstAliases =
+ Records.getAllDerivedDefinitions("InstAlias");
+ for (Record *InstAlias : AllInstAliases) {
+ auto Alias = std::make_unique<CodeGenInstAlias>(InstAlias, Target);
+
+ // If the tblgen -match-prefix option is specified (for tblgen hackers),
+ // filter the set of instruction aliases we consider, based on the target
+ // instruction.
+ if (!StringRef(Alias->ResultInst->TheDef->getName())
+ .startswith( MatchPrefix))
+ continue;
+
+ StringRef V = Alias->TheDef->getValueAsString("AsmVariantName");
+ if (!V.empty() && V != Variant.Name)
+ continue;
+
+ auto II = std::make_unique<MatchableInfo>(std::move(Alias));
+
+ II->initialize(*this, SingletonRegisters, Variant, HasMnemonicFirst);
+
+ // Validate the alias definitions.
+ II->validate(CommentDelimiter, true);
+
+ Matchables.push_back(std::move(II));
+ }
+ }
+
+ // Build info for the register classes.
+ buildRegisterClasses(SingletonRegisters);
+
+ // Build info for the user defined assembly operand classes.
+ buildOperandClasses();
+
+ // Build the information about matchables, now that we have fully formed
+ // classes.
+ std::vector<std::unique_ptr<MatchableInfo>> NewMatchables;
+ for (auto &II : Matchables) {
+ // Parse the tokens after the mnemonic.
+ // Note: buildInstructionOperandReference may insert new AsmOperands, so
+ // don't precompute the loop bound.
+ for (unsigned i = 0; i != II->AsmOperands.size(); ++i) {
+ MatchableInfo::AsmOperand &Op = II->AsmOperands[i];
+ StringRef Token = Op.Token;
+
+ // Check for singleton registers.
+ if (Record *RegRecord = Op.SingletonReg) {
+ Op.Class = RegisterClasses[RegRecord];
+ assert(Op.Class && Op.Class->Registers.size() == 1 &&
+ "Unexpected class for singleton register");
+ continue;
+ }
+
+ // Check for simple tokens.
+ if (Token[0] != '$') {
+ Op.Class = getTokenClass(Token);
+ continue;
+ }
+
+ if (Token.size() > 1 && isdigit(Token[1])) {
+ Op.Class = getTokenClass(Token);
+ continue;
+ }
+
+ // Otherwise this is an operand reference.
+ StringRef OperandName;
+ if (Token[1] == '{')
+ OperandName = Token.substr(2, Token.size() - 3);
+ else
+ OperandName = Token.substr(1);
+
+ if (II->DefRec.is<const CodeGenInstruction*>())
+ buildInstructionOperandReference(II.get(), OperandName, i);
+ else
+ buildAliasOperandReference(II.get(), OperandName, Op);
+ }
+
+ if (II->DefRec.is<const CodeGenInstruction*>()) {
+ II->buildInstructionResultOperands();
+ // If the instruction has a two-operand alias, build up the
+ // matchable here. We'll add them in bulk at the end to avoid
+ // confusing this loop.
+ StringRef Constraint =
+ II->TheDef->getValueAsString("TwoOperandAliasConstraint");
+ if (Constraint != "") {
+ // Start by making a copy of the original matchable.
+ auto AliasII = std::make_unique<MatchableInfo>(*II);
+
+ // Adjust it to be a two-operand alias.
+ AliasII->formTwoOperandAlias(Constraint);
+
+ // Add the alias to the matchables list.
+ NewMatchables.push_back(std::move(AliasII));
+ }
+ } else
+ // FIXME: The tied operands checking is not yet integrated with the
+ // framework for reporting multiple near misses. To prevent invalid
+ // formats from being matched with an alias if a tied-operands check
+ // would otherwise have disallowed it, we just disallow such constructs
+ // in TableGen completely.
+ II->buildAliasResultOperands(!ReportMultipleNearMisses);
+ }
+ if (!NewMatchables.empty())
+ Matchables.insert(Matchables.end(),
+ std::make_move_iterator(NewMatchables.begin()),
+ std::make_move_iterator(NewMatchables.end()));
+
+ // Process token alias definitions and set up the associated superclass
+ // information.
+ std::vector<Record*> AllTokenAliases =
+ Records.getAllDerivedDefinitions("TokenAlias");
+ for (Record *Rec : AllTokenAliases) {
+ ClassInfo *FromClass = getTokenClass(Rec->getValueAsString("FromToken"));
+ ClassInfo *ToClass = getTokenClass(Rec->getValueAsString("ToToken"));
+ if (FromClass == ToClass)
+ PrintFatalError(Rec->getLoc(),
+ "error: Destination value identical to source value.");
+ FromClass->SuperClasses.push_back(ToClass);
+ }
+
+ // Reorder classes so that classes precede super classes.
+ Classes.sort();
+
+#ifdef EXPENSIVE_CHECKS
+ // Verify that the table is sorted and operator < works transitively.
+ for (auto I = Classes.begin(), E = Classes.end(); I != E; ++I) {
+ for (auto J = I; J != E; ++J) {
+ assert(!(*J < *I));
+ assert(I == J || !J->isSubsetOf(*I));
+ }
+ }
+#endif
+}
+
+/// buildInstructionOperandReference - The specified operand is a reference to a
+/// named operand such as $src. Resolve the Class and OperandInfo pointers.
+void AsmMatcherInfo::
+buildInstructionOperandReference(MatchableInfo *II,
+ StringRef OperandName,
+ unsigned AsmOpIdx) {
+ const CodeGenInstruction &CGI = *II->DefRec.get<const CodeGenInstruction*>();
+ const CGIOperandList &Operands = CGI.Operands;
+ MatchableInfo::AsmOperand *Op = &II->AsmOperands[AsmOpIdx];
+
+ // Map this token to an operand.
+ unsigned Idx;
+ if (!Operands.hasOperandNamed(OperandName, Idx))
+ PrintFatalError(II->TheDef->getLoc(),
+ "error: unable to find operand: '" + OperandName + "'");
+
+ // If the instruction operand has multiple suboperands, but the parser
+ // match class for the asm operand is still the default "ImmAsmOperand",
+ // then handle each suboperand separately.
+ if (Op->SubOpIdx == -1 && Operands[Idx].MINumOperands > 1) {
+ Record *Rec = Operands[Idx].Rec;
+ assert(Rec->isSubClassOf("Operand") && "Unexpected operand!");
+ Record *MatchClass = Rec->getValueAsDef("ParserMatchClass");
+ if (MatchClass && MatchClass->getValueAsString("Name") == "Imm") {
+ // Insert remaining suboperands after AsmOpIdx in II->AsmOperands.
+ StringRef Token = Op->Token; // save this in case Op gets moved
+ for (unsigned SI = 1, SE = Operands[Idx].MINumOperands; SI != SE; ++SI) {
+ MatchableInfo::AsmOperand NewAsmOp(/*IsIsolatedToken=*/true, Token);
+ NewAsmOp.SubOpIdx = SI;
+ II->AsmOperands.insert(II->AsmOperands.begin()+AsmOpIdx+SI, NewAsmOp);
+ }
+ // Replace Op with first suboperand.
+ Op = &II->AsmOperands[AsmOpIdx]; // update the pointer in case it moved
+ Op->SubOpIdx = 0;
+ }
+ }
+
+ // Set up the operand class.
+ Op->Class = getOperandClass(Operands[Idx], Op->SubOpIdx);
+ Op->OrigSrcOpName = OperandName;
+
+ // If the named operand is tied, canonicalize it to the untied operand.
+ // For example, something like:
+ // (outs GPR:$dst), (ins GPR:$src)
+ // with an asmstring of
+ // "inc $src"
+ // we want to canonicalize to:
+ // "inc $dst"
+ // so that we know how to provide the $dst operand when filling in the result.
+ int OITied = -1;
+ if (Operands[Idx].MINumOperands == 1)
+ OITied = Operands[Idx].getTiedRegister();
+ if (OITied != -1) {
+ // The tied operand index is an MIOperand index, find the operand that
+ // contains it.
+ std::pair<unsigned, unsigned> Idx = Operands.getSubOperandNumber(OITied);
+ OperandName = Operands[Idx.first].Name;
+ Op->SubOpIdx = Idx.second;
+ }
+
+ Op->SrcOpName = OperandName;
+}
+
+/// buildAliasOperandReference - When parsing an operand reference out of the
+/// matching string (e.g. "movsx $src, $dst"), determine what the class of the
+/// operand reference is by looking it up in the result pattern definition.
+void AsmMatcherInfo::buildAliasOperandReference(MatchableInfo *II,
+ StringRef OperandName,
+ MatchableInfo::AsmOperand &Op) {
+ const CodeGenInstAlias &CGA = *II->DefRec.get<const CodeGenInstAlias*>();
+
+ // Set up the operand class.
+ for (unsigned i = 0, e = CGA.ResultOperands.size(); i != e; ++i)
+ if (CGA.ResultOperands[i].isRecord() &&
+ CGA.ResultOperands[i].getName() == OperandName) {
+ // It's safe to go with the first one we find, because CodeGenInstAlias
+ // validates that all operands with the same name have the same record.
+ Op.SubOpIdx = CGA.ResultInstOperandIndex[i].second;
+ // Use the match class from the Alias definition, not the
+ // destination instruction, as we may have an immediate that's
+ // being munged by the match class.
+ Op.Class = getOperandClass(CGA.ResultOperands[i].getRecord(),
+ Op.SubOpIdx);
+ Op.SrcOpName = OperandName;
+ Op.OrigSrcOpName = OperandName;
+ return;
+ }
+
+ PrintFatalError(II->TheDef->getLoc(),
+ "error: unable to find operand: '" + OperandName + "'");
+}
+
+void MatchableInfo::buildInstructionResultOperands() {
+ const CodeGenInstruction *ResultInst = getResultInst();
+
+ // Loop over all operands of the result instruction, determining how to
+ // populate them.
+ for (const CGIOperandList::OperandInfo &OpInfo : ResultInst->Operands) {
+ // If this is a tied operand, just copy from the previously handled operand.
+ int TiedOp = -1;
+ if (OpInfo.MINumOperands == 1)
+ TiedOp = OpInfo.getTiedRegister();
+ if (TiedOp != -1) {
+ int TiedSrcOperand = findAsmOperandOriginallyNamed(OpInfo.Name);
+ if (TiedSrcOperand != -1 &&
+ ResOperands[TiedOp].Kind == ResOperand::RenderAsmOperand)
+ ResOperands.push_back(ResOperand::getTiedOp(
+ TiedOp, ResOperands[TiedOp].AsmOperandNum, TiedSrcOperand));
+ else
+ ResOperands.push_back(ResOperand::getTiedOp(TiedOp, 0, 0));
+ continue;
+ }
+
+ int SrcOperand = findAsmOperandNamed(OpInfo.Name);
+ if (OpInfo.Name.empty() || SrcOperand == -1) {
+ // This may happen for operands that are tied to a suboperand of a
+ // complex operand. Simply use a dummy value here; nobody should
+ // use this operand slot.
+ // FIXME: The long term goal is for the MCOperand list to not contain
+ // tied operands at all.
+ ResOperands.push_back(ResOperand::getImmOp(0));
+ continue;
+ }
+
+ // Check if the one AsmOperand populates the entire operand.
+ unsigned NumOperands = OpInfo.MINumOperands;
+ if (AsmOperands[SrcOperand].SubOpIdx == -1) {
+ ResOperands.push_back(ResOperand::getRenderedOp(SrcOperand, NumOperands));
+ continue;
+ }
+
+ // Add a separate ResOperand for each suboperand.
+ for (unsigned AI = 0; AI < NumOperands; ++AI) {
+ assert(AsmOperands[SrcOperand+AI].SubOpIdx == (int)AI &&
+ AsmOperands[SrcOperand+AI].SrcOpName == OpInfo.Name &&
+ "unexpected AsmOperands for suboperands");
+ ResOperands.push_back(ResOperand::getRenderedOp(SrcOperand + AI, 1));
+ }
+ }
+}
+
+void MatchableInfo::buildAliasResultOperands(bool AliasConstraintsAreChecked) {
+ const CodeGenInstAlias &CGA = *DefRec.get<const CodeGenInstAlias*>();
+ const CodeGenInstruction *ResultInst = getResultInst();
+
+ // Map of: $reg -> #lastref
+ // where $reg is the name of the operand in the asm string
+ // where #lastref is the last processed index where $reg was referenced in
+ // the asm string.
+ SmallDenseMap<StringRef, int> OperandRefs;
+
+ // Loop over all operands of the result instruction, determining how to
+ // populate them.
+ unsigned AliasOpNo = 0;
+ unsigned LastOpNo = CGA.ResultInstOperandIndex.size();
+ for (unsigned i = 0, e = ResultInst->Operands.size(); i != e; ++i) {
+ const CGIOperandList::OperandInfo *OpInfo = &ResultInst->Operands[i];
+
+ // If this is a tied operand, just copy from the previously handled operand.
+ int TiedOp = -1;
+ if (OpInfo->MINumOperands == 1)
+ TiedOp = OpInfo->getTiedRegister();
+ if (TiedOp != -1) {
+ unsigned SrcOp1 = 0;
+ unsigned SrcOp2 = 0;
+
+ // If an operand has been specified twice in the asm string,
+ // add the two source operand's indices to the TiedOp so that
+ // at runtime the 'tied' constraint is checked.
+ if (ResOperands[TiedOp].Kind == ResOperand::RenderAsmOperand) {
+ SrcOp1 = ResOperands[TiedOp].AsmOperandNum;
+
+ // Find the next operand (similarly named operand) in the string.
+ StringRef Name = AsmOperands[SrcOp1].SrcOpName;
+ auto Insert = OperandRefs.try_emplace(Name, SrcOp1);
+ SrcOp2 = findAsmOperandNamed(Name, Insert.first->second);
+
+ // Not updating the record in OperandRefs will cause TableGen
+ // to fail with an error at the end of this function.
+ if (AliasConstraintsAreChecked)
+ Insert.first->second = SrcOp2;
+
+ // In case it only has one reference in the asm string,
+ // it doesn't need to be checked for tied constraints.
+ SrcOp2 = (SrcOp2 == (unsigned)-1) ? SrcOp1 : SrcOp2;
+ }
+
+ // If the alias operand is of a different operand class, we only want
+ // to benefit from the tied-operands check and just match the operand
+ // as a normal, but not copy the original (TiedOp) to the result
+ // instruction. We do this by passing -1 as the tied operand to copy.
+ if (ResultInst->Operands[i].Rec->getName() !=
+ ResultInst->Operands[TiedOp].Rec->getName()) {
+ SrcOp1 = ResOperands[TiedOp].AsmOperandNum;
+ int SubIdx = CGA.ResultInstOperandIndex[AliasOpNo].second;
+ StringRef Name = CGA.ResultOperands[AliasOpNo].getName();
+ SrcOp2 = findAsmOperand(Name, SubIdx);
+ ResOperands.push_back(
+ ResOperand::getTiedOp((unsigned)-1, SrcOp1, SrcOp2));
+ } else {
+ ResOperands.push_back(ResOperand::getTiedOp(TiedOp, SrcOp1, SrcOp2));
+ continue;
+ }
+ }
+
+ // Handle all the suboperands for this operand.
+ const std::string &OpName = OpInfo->Name;
+ for ( ; AliasOpNo < LastOpNo &&
+ CGA.ResultInstOperandIndex[AliasOpNo].first == i; ++AliasOpNo) {
+ int SubIdx = CGA.ResultInstOperandIndex[AliasOpNo].second;
+
+ // Find out what operand from the asmparser that this MCInst operand
+ // comes from.
+ switch (CGA.ResultOperands[AliasOpNo].Kind) {
+ case CodeGenInstAlias::ResultOperand::K_Record: {
+ StringRef Name = CGA.ResultOperands[AliasOpNo].getName();
+ int SrcOperand = findAsmOperand(Name, SubIdx);
+ if (SrcOperand == -1)
+ PrintFatalError(TheDef->getLoc(), "Instruction '" +
+ TheDef->getName() + "' has operand '" + OpName +
+ "' that doesn't appear in asm string!");
+
+ // Add it to the operand references. If it is added a second time, the
+ // record won't be updated and it will fail later on.
+ OperandRefs.try_emplace(Name, SrcOperand);
+
+ unsigned NumOperands = (SubIdx == -1 ? OpInfo->MINumOperands : 1);
+ ResOperands.push_back(ResOperand::getRenderedOp(SrcOperand,
+ NumOperands));
+ break;
+ }
+ case CodeGenInstAlias::ResultOperand::K_Imm: {
+ int64_t ImmVal = CGA.ResultOperands[AliasOpNo].getImm();
+ ResOperands.push_back(ResOperand::getImmOp(ImmVal));
+ break;
+ }
+ case CodeGenInstAlias::ResultOperand::K_Reg: {
+ Record *Reg = CGA.ResultOperands[AliasOpNo].getRegister();
+ ResOperands.push_back(ResOperand::getRegOp(Reg));
+ break;
+ }
+ }
+ }
+ }
+
+ // Check that operands are not repeated more times than is supported.
+ for (auto &T : OperandRefs) {
+ if (T.second != -1 && findAsmOperandNamed(T.first, T.second) != -1)
+ PrintFatalError(TheDef->getLoc(),
+ "Operand '" + T.first + "' can never be matched");
+ }
+}
+
+static unsigned
+getConverterOperandID(const std::string &Name,
+ SmallSetVector<CachedHashString, 16> &Table,
+ bool &IsNew) {
+ IsNew = Table.insert(CachedHashString(Name));
+
+ unsigned ID = IsNew ? Table.size() - 1 : find(Table, Name) - Table.begin();
+
+ assert(ID < Table.size());
+
+ return ID;
+}
+
+static unsigned
+emitConvertFuncs(CodeGenTarget &Target, StringRef ClassName,
+ std::vector<std::unique_ptr<MatchableInfo>> &Infos,
+ bool HasMnemonicFirst, bool HasOptionalOperands,
+ raw_ostream &OS) {
+ SmallSetVector<CachedHashString, 16> OperandConversionKinds;
+ SmallSetVector<CachedHashString, 16> InstructionConversionKinds;
+ std::vector<std::vector<uint8_t> > ConversionTable;
+ size_t MaxRowLength = 2; // minimum is custom converter plus terminator.
+
+ // TargetOperandClass - This is the target's operand class, like X86Operand.
+ std::string TargetOperandClass = Target.getName().str() + "Operand";
+
+ // Write the convert function to a separate stream, so we can drop it after
+ // the enum. We'll build up the conversion handlers for the individual
+ // operand types opportunistically as we encounter them.
+ std::string ConvertFnBody;
+ raw_string_ostream CvtOS(ConvertFnBody);
+ // Start the unified conversion function.
+ if (HasOptionalOperands) {
+ CvtOS << "void " << Target.getName() << ClassName << "::\n"
+ << "convertToMCInst(unsigned Kind, MCInst &Inst, "
+ << "unsigned Opcode,\n"
+ << " const OperandVector &Operands,\n"
+ << " const SmallBitVector &OptionalOperandsMask) {\n";
+ } else {
+ CvtOS << "void " << Target.getName() << ClassName << "::\n"
+ << "convertToMCInst(unsigned Kind, MCInst &Inst, "
+ << "unsigned Opcode,\n"
+ << " const OperandVector &Operands) {\n";
+ }
+ CvtOS << " assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n";
+ CvtOS << " const uint8_t *Converter = ConversionTable[Kind];\n";
+ if (HasOptionalOperands) {
+ size_t MaxNumOperands = 0;
+ for (const auto &MI : Infos) {
+ MaxNumOperands = std::max(MaxNumOperands, MI->AsmOperands.size());
+ }
+ CvtOS << " unsigned DefaultsOffset[" << (MaxNumOperands + 1)
+ << "] = { 0 };\n";
+ CvtOS << " assert(OptionalOperandsMask.size() == " << (MaxNumOperands)
+ << ");\n";
+ CvtOS << " for (unsigned i = 0, NumDefaults = 0; i < " << (MaxNumOperands)
+ << "; ++i) {\n";
+ CvtOS << " DefaultsOffset[i + 1] = NumDefaults;\n";
+ CvtOS << " NumDefaults += (OptionalOperandsMask[i] ? 1 : 0);\n";
+ CvtOS << " }\n";
+ }
+ CvtOS << " unsigned OpIdx;\n";
+ CvtOS << " Inst.setOpcode(Opcode);\n";
+ CvtOS << " for (const uint8_t *p = Converter; *p; p += 2) {\n";
+ if (HasOptionalOperands) {
+ CvtOS << " OpIdx = *(p + 1) - DefaultsOffset[*(p + 1)];\n";
+ } else {
+ CvtOS << " OpIdx = *(p + 1);\n";
+ }
+ CvtOS << " switch (*p) {\n";
+ CvtOS << " default: llvm_unreachable(\"invalid conversion entry!\");\n";
+ CvtOS << " case CVT_Reg:\n";
+ CvtOS << " static_cast<" << TargetOperandClass
+ << " &>(*Operands[OpIdx]).addRegOperands(Inst, 1);\n";
+ CvtOS << " break;\n";
+ CvtOS << " case CVT_Tied: {\n";
+ CvtOS << " assert(OpIdx < (size_t)(std::end(TiedAsmOperandTable) -\n";
+ CvtOS << " std::begin(TiedAsmOperandTable)) &&\n";
+ CvtOS << " \"Tied operand not found\");\n";
+ CvtOS << " unsigned TiedResOpnd = TiedAsmOperandTable[OpIdx][0];\n";
+ CvtOS << " if (TiedResOpnd != (uint8_t)-1)\n";
+ CvtOS << " Inst.addOperand(Inst.getOperand(TiedResOpnd));\n";
+ CvtOS << " break;\n";
+ CvtOS << " }\n";
+
+ std::string OperandFnBody;
+ raw_string_ostream OpOS(OperandFnBody);
+ // Start the operand number lookup function.
+ OpOS << "void " << Target.getName() << ClassName << "::\n"
+ << "convertToMapAndConstraints(unsigned Kind,\n";
+ OpOS.indent(27);
+ OpOS << "const OperandVector &Operands) {\n"
+ << " assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n"
+ << " unsigned NumMCOperands = 0;\n"
+ << " const uint8_t *Converter = ConversionTable[Kind];\n"
+ << " for (const uint8_t *p = Converter; *p; p += 2) {\n"
+ << " switch (*p) {\n"
+ << " default: llvm_unreachable(\"invalid conversion entry!\");\n"
+ << " case CVT_Reg:\n"
+ << " Operands[*(p + 1)]->setMCOperandNum(NumMCOperands);\n"
+ << " Operands[*(p + 1)]->setConstraint(\"r\");\n"
+ << " ++NumMCOperands;\n"
+ << " break;\n"
+ << " case CVT_Tied:\n"
+ << " ++NumMCOperands;\n"
+ << " break;\n";
+
+ // Pre-populate the operand conversion kinds with the standard always
+ // available entries.
+ OperandConversionKinds.insert(CachedHashString("CVT_Done"));
+ OperandConversionKinds.insert(CachedHashString("CVT_Reg"));
+ OperandConversionKinds.insert(CachedHashString("CVT_Tied"));
+ enum { CVT_Done, CVT_Reg, CVT_Tied };
+
+ // Map of e.g. <0, 2, 3> -> "Tie_0_2_3" enum label.
+ std::map<std::tuple<uint8_t, uint8_t, uint8_t>, std::string>
+ TiedOperandsEnumMap;
+
+ for (auto &II : Infos) {
+ // Check if we have a custom match function.
+ StringRef AsmMatchConverter =
+ II->getResultInst()->TheDef->getValueAsString("AsmMatchConverter");
+ if (!AsmMatchConverter.empty() && II->UseInstAsmMatchConverter) {
+ std::string Signature = ("ConvertCustom_" + AsmMatchConverter).str();
+ II->ConversionFnKind = Signature;
+
+ // Check if we have already generated this signature.
+ if (!InstructionConversionKinds.insert(CachedHashString(Signature)))
+ continue;
+
+ // Remember this converter for the kind enum.
+ unsigned KindID = OperandConversionKinds.size();
+ OperandConversionKinds.insert(
+ CachedHashString("CVT_" + getEnumNameForToken(AsmMatchConverter)));
+
+ // Add the converter row for this instruction.
+ ConversionTable.emplace_back();
+ ConversionTable.back().push_back(KindID);
+ ConversionTable.back().push_back(CVT_Done);
+
+ // Add the handler to the conversion driver function.
+ CvtOS << " case CVT_"
+ << getEnumNameForToken(AsmMatchConverter) << ":\n"
+ << " " << AsmMatchConverter << "(Inst, Operands);\n"
+ << " break;\n";
+
+ // FIXME: Handle the operand number lookup for custom match functions.
+ continue;
+ }
+
+ // Build the conversion function signature.
+ std::string Signature = "Convert";
+
+ std::vector<uint8_t> ConversionRow;
+
+ // Compute the convert enum and the case body.
+ MaxRowLength = std::max(MaxRowLength, II->ResOperands.size()*2 + 1 );
+
+ for (unsigned i = 0, e = II->ResOperands.size(); i != e; ++i) {
+ const MatchableInfo::ResOperand &OpInfo = II->ResOperands[i];
+
+ // Generate code to populate each result operand.
+ switch (OpInfo.Kind) {
+ case MatchableInfo::ResOperand::RenderAsmOperand: {
+ // This comes from something we parsed.
+ const MatchableInfo::AsmOperand &Op =
+ II->AsmOperands[OpInfo.AsmOperandNum];
+
+ // Registers are always converted the same, don't duplicate the
+ // conversion function based on them.
+ Signature += "__";
+ std::string Class;
+ Class = Op.Class->isRegisterClass() ? "Reg" : Op.Class->ClassName;
+ Signature += Class;
+ Signature += utostr(OpInfo.MINumOperands);
+ Signature += "_" + itostr(OpInfo.AsmOperandNum);
+
+ // Add the conversion kind, if necessary, and get the associated ID
+ // the index of its entry in the vector).
+ std::string Name = "CVT_" + (Op.Class->isRegisterClass() ? "Reg" :
+ Op.Class->RenderMethod);
+ if (Op.Class->IsOptional) {
+ // For optional operands we must also care about DefaultMethod
+ assert(HasOptionalOperands);
+ Name += "_" + Op.Class->DefaultMethod;
+ }
+ Name = getEnumNameForToken(Name);
+
+ bool IsNewConverter = false;
+ unsigned ID = getConverterOperandID(Name, OperandConversionKinds,
+ IsNewConverter);
+
+ // Add the operand entry to the instruction kind conversion row.
+ ConversionRow.push_back(ID);
+ ConversionRow.push_back(OpInfo.AsmOperandNum + HasMnemonicFirst);
+
+ if (!IsNewConverter)
+ break;
+
+ // This is a new operand kind. Add a handler for it to the
+ // converter driver.
+ CvtOS << " case " << Name << ":\n";
+ if (Op.Class->IsOptional) {
+ // If optional operand is not present in actual instruction then we
+ // should call its DefaultMethod before RenderMethod
+ assert(HasOptionalOperands);
+ CvtOS << " if (OptionalOperandsMask[*(p + 1) - 1]) {\n"
+ << " " << Op.Class->DefaultMethod << "()"
+ << "->" << Op.Class->RenderMethod << "(Inst, "
+ << OpInfo.MINumOperands << ");\n"
+ << " } else {\n"
+ << " static_cast<" << TargetOperandClass
+ << " &>(*Operands[OpIdx])." << Op.Class->RenderMethod
+ << "(Inst, " << OpInfo.MINumOperands << ");\n"
+ << " }\n";
+ } else {
+ CvtOS << " static_cast<" << TargetOperandClass
+ << " &>(*Operands[OpIdx])." << Op.Class->RenderMethod
+ << "(Inst, " << OpInfo.MINumOperands << ");\n";
+ }
+ CvtOS << " break;\n";
+
+ // Add a handler for the operand number lookup.
+ OpOS << " case " << Name << ":\n"
+ << " Operands[*(p + 1)]->setMCOperandNum(NumMCOperands);\n";
+
+ if (Op.Class->isRegisterClass())
+ OpOS << " Operands[*(p + 1)]->setConstraint(\"r\");\n";
+ else
+ OpOS << " Operands[*(p + 1)]->setConstraint(\"m\");\n";
+ OpOS << " NumMCOperands += " << OpInfo.MINumOperands << ";\n"
+ << " break;\n";
+ break;
+ }
+ case MatchableInfo::ResOperand::TiedOperand: {
+ // If this operand is tied to a previous one, just copy the MCInst
+ // operand from the earlier one.We can only tie single MCOperand values.
+ assert(OpInfo.MINumOperands == 1 && "Not a singular MCOperand");
+ uint8_t TiedOp = OpInfo.TiedOperands.ResOpnd;
+ uint8_t SrcOp1 =
+ OpInfo.TiedOperands.SrcOpnd1Idx + HasMnemonicFirst;
+ uint8_t SrcOp2 =
+ OpInfo.TiedOperands.SrcOpnd2Idx + HasMnemonicFirst;
+ assert((i > TiedOp || TiedOp == (uint8_t)-1) &&
+ "Tied operand precedes its target!");
+ auto TiedTupleName = std::string("Tie") + utostr(TiedOp) + '_' +
+ utostr(SrcOp1) + '_' + utostr(SrcOp2);
+ Signature += "__" + TiedTupleName;
+ ConversionRow.push_back(CVT_Tied);
+ ConversionRow.push_back(TiedOp);
+ ConversionRow.push_back(SrcOp1);
+ ConversionRow.push_back(SrcOp2);
+
+ // Also create an 'enum' for this combination of tied operands.
+ auto Key = std::make_tuple(TiedOp, SrcOp1, SrcOp2);
+ TiedOperandsEnumMap.emplace(Key, TiedTupleName);
+ break;
+ }
+ case MatchableInfo::ResOperand::ImmOperand: {
+ int64_t Val = OpInfo.ImmVal;
+ std::string Ty = "imm_" + itostr(Val);
+ Ty = getEnumNameForToken(Ty);
+ Signature += "__" + Ty;
+
+ std::string Name = "CVT_" + Ty;
+ bool IsNewConverter = false;
+ unsigned ID = getConverterOperandID(Name, OperandConversionKinds,
+ IsNewConverter);
+ // Add the operand entry to the instruction kind conversion row.
+ ConversionRow.push_back(ID);
+ ConversionRow.push_back(0);
+
+ if (!IsNewConverter)
+ break;
+
+ CvtOS << " case " << Name << ":\n"
+ << " Inst.addOperand(MCOperand::createImm(" << Val << "));\n"
+ << " break;\n";
+
+ OpOS << " case " << Name << ":\n"
+ << " Operands[*(p + 1)]->setMCOperandNum(NumMCOperands);\n"
+ << " Operands[*(p + 1)]->setConstraint(\"\");\n"
+ << " ++NumMCOperands;\n"
+ << " break;\n";
+ break;
+ }
+ case MatchableInfo::ResOperand::RegOperand: {
+ std::string Reg, Name;
+ if (!OpInfo.Register) {
+ Name = "reg0";
+ Reg = "0";
+ } else {
+ Reg = getQualifiedName(OpInfo.Register);
+ Name = "reg" + OpInfo.Register->getName().str();
+ }
+ Signature += "__" + Name;
+ Name = "CVT_" + Name;
+ bool IsNewConverter = false;
+ unsigned ID = getConverterOperandID(Name, OperandConversionKinds,
+ IsNewConverter);
+ // Add the operand entry to the instruction kind conversion row.
+ ConversionRow.push_back(ID);
+ ConversionRow.push_back(0);
+
+ if (!IsNewConverter)
+ break;
+ CvtOS << " case " << Name << ":\n"
+ << " Inst.addOperand(MCOperand::createReg(" << Reg << "));\n"
+ << " break;\n";
+
+ OpOS << " case " << Name << ":\n"
+ << " Operands[*(p + 1)]->setMCOperandNum(NumMCOperands);\n"
+ << " Operands[*(p + 1)]->setConstraint(\"m\");\n"
+ << " ++NumMCOperands;\n"
+ << " break;\n";
+ }
+ }
+ }
+
+ // If there were no operands, add to the signature to that effect
+ if (Signature == "Convert")
+ Signature += "_NoOperands";
+
+ II->ConversionFnKind = Signature;
+
+ // Save the signature. If we already have it, don't add a new row
+ // to the table.
+ if (!InstructionConversionKinds.insert(CachedHashString(Signature)))
+ continue;
+
+ // Add the row to the table.
+ ConversionTable.push_back(std::move(ConversionRow));
+ }
+
+ // Finish up the converter driver function.
+ CvtOS << " }\n }\n}\n\n";
+
+ // Finish up the operand number lookup function.
+ OpOS << " }\n }\n}\n\n";
+
+ // Output a static table for tied operands.
+ if (TiedOperandsEnumMap.size()) {
+ // The number of tied operand combinations will be small in practice,
+ // but just add the assert to be sure.
+ assert(TiedOperandsEnumMap.size() <= 254 &&
+ "Too many tied-operand combinations to reference with "
+ "an 8bit offset from the conversion table, where index "
+ "'255' is reserved as operand not to be copied.");
+
+ OS << "enum {\n";
+ for (auto &KV : TiedOperandsEnumMap) {
+ OS << " " << KV.second << ",\n";
+ }
+ OS << "};\n\n";
+
+ OS << "static const uint8_t TiedAsmOperandTable[][3] = {\n";
+ for (auto &KV : TiedOperandsEnumMap) {
+ OS << " /* " << KV.second << " */ { "
+ << utostr(std::get<0>(KV.first)) << ", "
+ << utostr(std::get<1>(KV.first)) << ", "
+ << utostr(std::get<2>(KV.first)) << " },\n";
+ }
+ OS << "};\n\n";
+ } else
+ OS << "static const uint8_t TiedAsmOperandTable[][3] = "
+ "{ /* empty */ {0, 0, 0} };\n\n";
+
+ OS << "namespace {\n";
+
+ // Output the operand conversion kind enum.
+ OS << "enum OperatorConversionKind {\n";
+ for (const auto &Converter : OperandConversionKinds)
+ OS << " " << Converter << ",\n";
+ OS << " CVT_NUM_CONVERTERS\n";
+ OS << "};\n\n";
+
+ // Output the instruction conversion kind enum.
+ OS << "enum InstructionConversionKind {\n";
+ for (const auto &Signature : InstructionConversionKinds)
+ OS << " " << Signature << ",\n";
+ OS << " CVT_NUM_SIGNATURES\n";
+ OS << "};\n\n";
+
+ OS << "} // end anonymous namespace\n\n";
+
+ // Output the conversion table.
+ OS << "static const uint8_t ConversionTable[CVT_NUM_SIGNATURES]["
+ << MaxRowLength << "] = {\n";
+
+ for (unsigned Row = 0, ERow = ConversionTable.size(); Row != ERow; ++Row) {
+ assert(ConversionTable[Row].size() % 2 == 0 && "bad conversion row!");
+ OS << " // " << InstructionConversionKinds[Row] << "\n";
+ OS << " { ";
+ for (unsigned i = 0, e = ConversionTable[Row].size(); i != e; i += 2) {
+ OS << OperandConversionKinds[ConversionTable[Row][i]] << ", ";
+ if (OperandConversionKinds[ConversionTable[Row][i]] !=
+ CachedHashString("CVT_Tied")) {
+ OS << (unsigned)(ConversionTable[Row][i + 1]) << ", ";
+ continue;
+ }
+
+ // For a tied operand, emit a reference to the TiedAsmOperandTable
+ // that contains the operand to copy, and the parsed operands to
+ // check for their tied constraints.
+ auto Key = std::make_tuple((uint8_t)ConversionTable[Row][i + 1],
+ (uint8_t)ConversionTable[Row][i + 2],
+ (uint8_t)ConversionTable[Row][i + 3]);
+ auto TiedOpndEnum = TiedOperandsEnumMap.find(Key);
+ assert(TiedOpndEnum != TiedOperandsEnumMap.end() &&
+ "No record for tied operand pair");
+ OS << TiedOpndEnum->second << ", ";
+ i += 2;
+ }
+ OS << "CVT_Done },\n";
+ }
+
+ OS << "};\n\n";
+
+ // Spit out the conversion driver function.
+ OS << CvtOS.str();
+
+ // Spit out the operand number lookup function.
+ OS << OpOS.str();
+
+ return ConversionTable.size();
+}
+
+/// emitMatchClassEnumeration - Emit the enumeration for match class kinds.
+static void emitMatchClassEnumeration(CodeGenTarget &Target,
+ std::forward_list<ClassInfo> &Infos,
+ raw_ostream &OS) {
+ OS << "namespace {\n\n";
+
+ OS << "/// MatchClassKind - The kinds of classes which participate in\n"
+ << "/// instruction matching.\n";
+ OS << "enum MatchClassKind {\n";
+ OS << " InvalidMatchClass = 0,\n";
+ OS << " OptionalMatchClass = 1,\n";
+ ClassInfo::ClassInfoKind LastKind = ClassInfo::Token;
+ StringRef LastName = "OptionalMatchClass";
+ for (const auto &CI : Infos) {
+ if (LastKind == ClassInfo::Token && CI.Kind != ClassInfo::Token) {
+ OS << " MCK_LAST_TOKEN = " << LastName << ",\n";
+ } else if (LastKind < ClassInfo::UserClass0 &&
+ CI.Kind >= ClassInfo::UserClass0) {
+ OS << " MCK_LAST_REGISTER = " << LastName << ",\n";
+ }
+ LastKind = (ClassInfo::ClassInfoKind)CI.Kind;
+ LastName = CI.Name;
+
+ OS << " " << CI.Name << ", // ";
+ if (CI.Kind == ClassInfo::Token) {
+ OS << "'" << CI.ValueName << "'\n";
+ } else if (CI.isRegisterClass()) {
+ if (!CI.ValueName.empty())
+ OS << "register class '" << CI.ValueName << "'\n";
+ else
+ OS << "derived register class\n";
+ } else {
+ OS << "user defined class '" << CI.ValueName << "'\n";
+ }
+ }
+ OS << " NumMatchClassKinds\n";
+ OS << "};\n\n";
+
+ OS << "} // end anonymous namespace\n\n";
+}
+
+/// emitMatchClassDiagStrings - Emit a function to get the diagnostic text to be
+/// used when an assembly operand does not match the expected operand class.
+static void emitOperandMatchErrorDiagStrings(AsmMatcherInfo &Info, raw_ostream &OS) {
+ // If the target does not use DiagnosticString for any operands, don't emit
+ // an unused function.
+ if (llvm::all_of(Info.Classes, [](const ClassInfo &CI) {
+ return CI.DiagnosticString.empty();
+ }))
+ return;
+
+ OS << "static const char *getMatchKindDiag(" << Info.Target.getName()
+ << "AsmParser::" << Info.Target.getName()
+ << "MatchResultTy MatchResult) {\n";
+ OS << " switch (MatchResult) {\n";
+
+ for (const auto &CI: Info.Classes) {
+ if (!CI.DiagnosticString.empty()) {
+ assert(!CI.DiagnosticType.empty() &&
+ "DiagnosticString set without DiagnosticType");
+ OS << " case " << Info.Target.getName()
+ << "AsmParser::Match_" << CI.DiagnosticType << ":\n";
+ OS << " return \"" << CI.DiagnosticString << "\";\n";
+ }
+ }
+
+ OS << " default:\n";
+ OS << " return nullptr;\n";
+
+ OS << " }\n";
+ OS << "}\n\n";
+}
+
+static void emitRegisterMatchErrorFunc(AsmMatcherInfo &Info, raw_ostream &OS) {
+ OS << "static unsigned getDiagKindFromRegisterClass(MatchClassKind "
+ "RegisterClass) {\n";
+ if (none_of(Info.Classes, [](const ClassInfo &CI) {
+ return CI.isRegisterClass() && !CI.DiagnosticType.empty();
+ })) {
+ OS << " return MCTargetAsmParser::Match_InvalidOperand;\n";
+ } else {
+ OS << " switch (RegisterClass) {\n";
+ for (const auto &CI: Info.Classes) {
+ if (CI.isRegisterClass() && !CI.DiagnosticType.empty()) {
+ OS << " case " << CI.Name << ":\n";
+ OS << " return " << Info.Target.getName() << "AsmParser::Match_"
+ << CI.DiagnosticType << ";\n";
+ }
+ }
+
+ OS << " default:\n";
+ OS << " return MCTargetAsmParser::Match_InvalidOperand;\n";
+
+ OS << " }\n";
+ }
+ OS << "}\n\n";
+}
+
+/// emitValidateOperandClass - Emit the function to validate an operand class.
+static void emitValidateOperandClass(AsmMatcherInfo &Info,
+ raw_ostream &OS) {
+ OS << "static unsigned validateOperandClass(MCParsedAsmOperand &GOp, "
+ << "MatchClassKind Kind) {\n";
+ OS << " " << Info.Target.getName() << "Operand &Operand = ("
+ << Info.Target.getName() << "Operand &)GOp;\n";
+
+ // The InvalidMatchClass is not to match any operand.
+ OS << " if (Kind == InvalidMatchClass)\n";
+ OS << " return MCTargetAsmParser::Match_InvalidOperand;\n\n";
+
+ // Check for Token operands first.
+ // FIXME: Use a more specific diagnostic type.
+ OS << " if (Operand.isToken() && Kind <= MCK_LAST_TOKEN)\n";
+ OS << " return isSubclass(matchTokenString(Operand.getToken()), Kind) ?\n"
+ << " MCTargetAsmParser::Match_Success :\n"
+ << " MCTargetAsmParser::Match_InvalidOperand;\n\n";
+
+ // Check the user classes. We don't care what order since we're only
+ // actually matching against one of them.
+ OS << " switch (Kind) {\n"
+ " default: break;\n";
+ for (const auto &CI : Info.Classes) {
+ if (!CI.isUserClass())
+ continue;
+
+ OS << " // '" << CI.ClassName << "' class\n";
+ OS << " case " << CI.Name << ": {\n";
+ OS << " DiagnosticPredicate DP(Operand." << CI.PredicateMethod
+ << "());\n";
+ OS << " if (DP.isMatch())\n";
+ OS << " return MCTargetAsmParser::Match_Success;\n";
+ if (!CI.DiagnosticType.empty()) {
+ OS << " if (DP.isNearMatch())\n";
+ OS << " return " << Info.Target.getName() << "AsmParser::Match_"
+ << CI.DiagnosticType << ";\n";
+ OS << " break;\n";
+ }
+ else
+ OS << " break;\n";
+ OS << " }\n";
+ }
+ OS << " } // end switch (Kind)\n\n";
+
+ // Check for register operands, including sub-classes.
+ OS << " if (Operand.isReg()) {\n";
+ OS << " MatchClassKind OpKind;\n";
+ OS << " switch (Operand.getReg()) {\n";
+ OS << " default: OpKind = InvalidMatchClass; break;\n";
+ for (const auto &RC : Info.RegisterClasses)
+ OS << " case " << RC.first->getValueAsString("Namespace") << "::"
+ << RC.first->getName() << ": OpKind = " << RC.second->Name
+ << "; break;\n";
+ OS << " }\n";
+ OS << " return isSubclass(OpKind, Kind) ? "
+ << "(unsigned)MCTargetAsmParser::Match_Success :\n "
+ << " getDiagKindFromRegisterClass(Kind);\n }\n\n";
+
+ // Expected operand is a register, but actual is not.
+ OS << " if (Kind > MCK_LAST_TOKEN && Kind <= MCK_LAST_REGISTER)\n";
+ OS << " return getDiagKindFromRegisterClass(Kind);\n\n";
+
+ // Generic fallthrough match failure case for operands that don't have
+ // specialized diagnostic types.
+ OS << " return MCTargetAsmParser::Match_InvalidOperand;\n";
+ OS << "}\n\n";
+}
+
+/// emitIsSubclass - Emit the subclass predicate function.
+static void emitIsSubclass(CodeGenTarget &Target,
+ std::forward_list<ClassInfo> &Infos,
+ raw_ostream &OS) {
+ OS << "/// isSubclass - Compute whether \\p A is a subclass of \\p B.\n";
+ OS << "static bool isSubclass(MatchClassKind A, MatchClassKind B) {\n";
+ OS << " if (A == B)\n";
+ OS << " return true;\n\n";
+
+ bool EmittedSwitch = false;
+ for (const auto &A : Infos) {
+ std::vector<StringRef> SuperClasses;
+ if (A.IsOptional)
+ SuperClasses.push_back("OptionalMatchClass");
+ for (const auto &B : Infos) {
+ if (&A != &B && A.isSubsetOf(B))
+ SuperClasses.push_back(B.Name);
+ }
+
+ if (SuperClasses.empty())
+ continue;
+
+ // If this is the first SuperClass, emit the switch header.
+ if (!EmittedSwitch) {
+ OS << " switch (A) {\n";
+ OS << " default:\n";
+ OS << " return false;\n";
+ EmittedSwitch = true;
+ }
+
+ OS << "\n case " << A.Name << ":\n";
+
+ if (SuperClasses.size() == 1) {
+ OS << " return B == " << SuperClasses.back() << ";\n";
+ continue;
+ }
+
+ if (!SuperClasses.empty()) {
+ OS << " switch (B) {\n";
+ OS << " default: return false;\n";
+ for (StringRef SC : SuperClasses)
+ OS << " case " << SC << ": return true;\n";
+ OS << " }\n";
+ } else {
+ // No case statement to emit
+ OS << " return false;\n";
+ }
+ }
+
+ // If there were case statements emitted into the string stream write the
+ // default.
+ if (EmittedSwitch)
+ OS << " }\n";
+ else
+ OS << " return false;\n";
+
+ OS << "}\n\n";
+}
+
+/// emitMatchTokenString - Emit the function to match a token string to the
+/// appropriate match class value.
+static void emitMatchTokenString(CodeGenTarget &Target,
+ std::forward_list<ClassInfo> &Infos,
+ raw_ostream &OS) {
+ // Construct the match list.
+ std::vector<StringMatcher::StringPair> Matches;
+ for (const auto &CI : Infos) {
+ if (CI.Kind == ClassInfo::Token)
+ Matches.emplace_back(CI.ValueName, "return " + CI.Name + ";");
+ }
+
+ OS << "static MatchClassKind matchTokenString(StringRef Name) {\n";
+
+ StringMatcher("Name", Matches, OS).Emit();
+
+ OS << " return InvalidMatchClass;\n";
+ OS << "}\n\n";
+}
+
+/// emitMatchRegisterName - Emit the function to match a string to the target
+/// specific register enum.
+static void emitMatchRegisterName(CodeGenTarget &Target, Record *AsmParser,
+ raw_ostream &OS) {
+ // Construct the match list.
+ std::vector<StringMatcher::StringPair> Matches;
+ const auto &Regs = Target.getRegBank().getRegisters();
+ for (const CodeGenRegister &Reg : Regs) {
+ if (Reg.TheDef->getValueAsString("AsmName").empty())
+ continue;
+
+ Matches.emplace_back(std::string(Reg.TheDef->getValueAsString("AsmName")),
+ "return " + utostr(Reg.EnumValue) + ";");
+ }
+
+ OS << "static unsigned MatchRegisterName(StringRef Name) {\n";
+
+ bool IgnoreDuplicates =
+ AsmParser->getValueAsBit("AllowDuplicateRegisterNames");
+ StringMatcher("Name", Matches, OS).Emit(0, IgnoreDuplicates);
+
+ OS << " return 0;\n";
+ OS << "}\n\n";
+}
+
+/// Emit the function to match a string to the target
+/// specific register enum.
+static void emitMatchRegisterAltName(CodeGenTarget &Target, Record *AsmParser,
+ raw_ostream &OS) {
+ // Construct the match list.
+ std::vector<StringMatcher::StringPair> Matches;
+ const auto &Regs = Target.getRegBank().getRegisters();
+ for (const CodeGenRegister &Reg : Regs) {
+
+ auto AltNames = Reg.TheDef->getValueAsListOfStrings("AltNames");
+
+ for (auto AltName : AltNames) {
+ AltName = StringRef(AltName).trim();
+
+ // don't handle empty alternative names
+ if (AltName.empty())
+ continue;
+
+ Matches.emplace_back(std::string(AltName),
+ "return " + utostr(Reg.EnumValue) + ";");
+ }
+ }
+
+ OS << "static unsigned MatchRegisterAltName(StringRef Name) {\n";
+
+ bool IgnoreDuplicates =
+ AsmParser->getValueAsBit("AllowDuplicateRegisterNames");
+ StringMatcher("Name", Matches, OS).Emit(0, IgnoreDuplicates);
+
+ OS << " return 0;\n";
+ OS << "}\n\n";
+}
+
+/// emitOperandDiagnosticTypes - Emit the operand matching diagnostic types.
+static void emitOperandDiagnosticTypes(AsmMatcherInfo &Info, raw_ostream &OS) {
+ // Get the set of diagnostic types from all of the operand classes.
+ std::set<StringRef> Types;
+ for (const auto &OpClassEntry : Info.AsmOperandClasses) {
+ if (!OpClassEntry.second->DiagnosticType.empty())
+ Types.insert(OpClassEntry.second->DiagnosticType);
+ }
+ for (const auto &OpClassEntry : Info.RegisterClassClasses) {
+ if (!OpClassEntry.second->DiagnosticType.empty())
+ Types.insert(OpClassEntry.second->DiagnosticType);
+ }
+
+ if (Types.empty()) return;
+
+ // Now emit the enum entries.
+ for (StringRef Type : Types)
+ OS << " Match_" << Type << ",\n";
+ OS << " END_OPERAND_DIAGNOSTIC_TYPES\n";
+}
+
+/// emitGetSubtargetFeatureName - Emit the helper function to get the
+/// user-level name for a subtarget feature.
+static void emitGetSubtargetFeatureName(AsmMatcherInfo &Info, raw_ostream &OS) {
+ OS << "// User-level names for subtarget features that participate in\n"
+ << "// instruction matching.\n"
+ << "static const char *getSubtargetFeatureName(uint64_t Val) {\n";
+ if (!Info.SubtargetFeatures.empty()) {
+ OS << " switch(Val) {\n";
+ for (const auto &SF : Info.SubtargetFeatures) {
+ const SubtargetFeatureInfo &SFI = SF.second;
+ // FIXME: Totally just a placeholder name to get the algorithm working.
+ OS << " case " << SFI.getEnumBitName() << ": return \""
+ << SFI.TheDef->getValueAsString("PredicateName") << "\";\n";
+ }
+ OS << " default: return \"(unknown)\";\n";
+ OS << " }\n";
+ } else {
+ // Nothing to emit, so skip the switch
+ OS << " return \"(unknown)\";\n";
+ }
+ OS << "}\n\n";
+}
+
+static std::string GetAliasRequiredFeatures(Record *R,
+ const AsmMatcherInfo &Info) {
+ std::vector<Record*> ReqFeatures = R->getValueAsListOfDefs("Predicates");
+ std::string Result;
+
+ if (ReqFeatures.empty())
+ return Result;
+
+ for (unsigned i = 0, e = ReqFeatures.size(); i != e; ++i) {
+ const SubtargetFeatureInfo *F = Info.getSubtargetFeature(ReqFeatures[i]);
+
+ if (!F)
+ PrintFatalError(R->getLoc(), "Predicate '" + ReqFeatures[i]->getName() +
+ "' is not marked as an AssemblerPredicate!");
+
+ if (i)
+ Result += " && ";
+
+ Result += "Features.test(" + F->getEnumBitName() + ')';
+ }
+
+ return Result;
+}
+
+static void emitMnemonicAliasVariant(raw_ostream &OS,const AsmMatcherInfo &Info,
+ std::vector<Record*> &Aliases,
+ unsigned Indent = 0,
+ StringRef AsmParserVariantName = StringRef()){
+ // Keep track of all the aliases from a mnemonic. Use an std::map so that the
+ // iteration order of the map is stable.
+ std::map<std::string, std::vector<Record*> > AliasesFromMnemonic;
+
+ for (Record *R : Aliases) {
+ // FIXME: Allow AssemblerVariantName to be a comma separated list.
+ StringRef AsmVariantName = R->getValueAsString("AsmVariantName");
+ if (AsmVariantName != AsmParserVariantName)
+ continue;
+ AliasesFromMnemonic[R->getValueAsString("FromMnemonic").lower()]
+ .push_back(R);
+ }
+ if (AliasesFromMnemonic.empty())
+ return;
+
+ // Process each alias a "from" mnemonic at a time, building the code executed
+ // by the string remapper.
+ std::vector<StringMatcher::StringPair> Cases;
+ for (const auto &AliasEntry : AliasesFromMnemonic) {
+ const std::vector<Record*> &ToVec = AliasEntry.second;
+
+ // Loop through each alias and emit code that handles each case. If there
+ // are two instructions without predicates, emit an error. If there is one,
+ // emit it last.
+ std::string MatchCode;
+ int AliasWithNoPredicate = -1;
+
+ for (unsigned i = 0, e = ToVec.size(); i != e; ++i) {
+ Record *R = ToVec[i];
+ std::string FeatureMask = GetAliasRequiredFeatures(R, Info);
+
+ // If this unconditionally matches, remember it for later and diagnose
+ // duplicates.
+ if (FeatureMask.empty()) {
+ if (AliasWithNoPredicate != -1 &&
+ R->getValueAsString("ToMnemonic") !=
+ ToVec[AliasWithNoPredicate]->getValueAsString("ToMnemonic")) {
+ // We can't have two different aliases from the same mnemonic with no
+ // predicate.
+ PrintError(
+ ToVec[AliasWithNoPredicate]->getLoc(),
+ "two different MnemonicAliases with the same 'from' mnemonic!");
+ PrintFatalError(R->getLoc(), "this is the other MnemonicAlias.");
+ }
+
+ AliasWithNoPredicate = i;
+ continue;
+ }
+ if (R->getValueAsString("ToMnemonic") == AliasEntry.first)
+ PrintFatalError(R->getLoc(), "MnemonicAlias to the same string");
+
+ if (!MatchCode.empty())
+ MatchCode += "else ";
+ MatchCode += "if (" + FeatureMask + ")\n";
+ MatchCode += " Mnemonic = \"";
+ MatchCode += R->getValueAsString("ToMnemonic").lower();
+ MatchCode += "\";\n";
+ }
+
+ if (AliasWithNoPredicate != -1) {
+ Record *R = ToVec[AliasWithNoPredicate];
+ if (!MatchCode.empty())
+ MatchCode += "else\n ";
+ MatchCode += "Mnemonic = \"";
+ MatchCode += R->getValueAsString("ToMnemonic").lower();
+ MatchCode += "\";\n";
+ }
+
+ MatchCode += "return;";
+
+ Cases.push_back(std::make_pair(AliasEntry.first, MatchCode));
+ }
+ StringMatcher("Mnemonic", Cases, OS).Emit(Indent);
+}
+
+/// emitMnemonicAliases - If the target has any MnemonicAlias<> definitions,
+/// emit a function for them and return true, otherwise return false.
+static bool emitMnemonicAliases(raw_ostream &OS, const AsmMatcherInfo &Info,
+ CodeGenTarget &Target) {
+ // Ignore aliases when match-prefix is set.
+ if (!MatchPrefix.empty())
+ return false;
+
+ std::vector<Record*> Aliases =
+ Info.getRecords().getAllDerivedDefinitions("MnemonicAlias");
+ if (Aliases.empty()) return false;
+
+ OS << "static void applyMnemonicAliases(StringRef &Mnemonic, "
+ "const FeatureBitset &Features, unsigned VariantID) {\n";
+ OS << " switch (VariantID) {\n";
+ unsigned VariantCount = Target.getAsmParserVariantCount();
+ for (unsigned VC = 0; VC != VariantCount; ++VC) {
+ Record *AsmVariant = Target.getAsmParserVariant(VC);
+ int AsmParserVariantNo = AsmVariant->getValueAsInt("Variant");
+ StringRef AsmParserVariantName = AsmVariant->getValueAsString("Name");
+ OS << " case " << AsmParserVariantNo << ":\n";
+ emitMnemonicAliasVariant(OS, Info, Aliases, /*Indent=*/2,
+ AsmParserVariantName);
+ OS << " break;\n";
+ }
+ OS << " }\n";
+
+ // Emit aliases that apply to all variants.
+ emitMnemonicAliasVariant(OS, Info, Aliases);
+
+ OS << "}\n\n";
+
+ return true;
+}
+
+static void
+emitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target,
+ const AsmMatcherInfo &Info, StringRef ClassName,
+ StringToOffsetTable &StringTable,
+ unsigned MaxMnemonicIndex, unsigned MaxFeaturesIndex,
+ bool HasMnemonicFirst, const Record &AsmParser) {
+ unsigned MaxMask = 0;
+ for (const OperandMatchEntry &OMI : Info.OperandMatchInfo) {
+ MaxMask |= OMI.OperandMask;
+ }
+
+ // Emit the static custom operand parsing table;
+ OS << "namespace {\n";
+ OS << " struct OperandMatchEntry {\n";
+ OS << " " << getMinimalTypeForRange(MaxMnemonicIndex)
+ << " Mnemonic;\n";
+ OS << " " << getMinimalTypeForRange(MaxMask)
+ << " OperandMask;\n";
+ OS << " " << getMinimalTypeForRange(std::distance(
+ Info.Classes.begin(), Info.Classes.end())) << " Class;\n";
+ OS << " " << getMinimalTypeForRange(MaxFeaturesIndex)
+ << " RequiredFeaturesIdx;\n\n";
+ OS << " StringRef getMnemonic() const {\n";
+ OS << " return StringRef(MnemonicTable + Mnemonic + 1,\n";
+ OS << " MnemonicTable[Mnemonic]);\n";
+ OS << " }\n";
+ OS << " };\n\n";
+
+ OS << " // Predicate for searching for an opcode.\n";
+ OS << " struct LessOpcodeOperand {\n";
+ OS << " bool operator()(const OperandMatchEntry &LHS, StringRef RHS) {\n";
+ OS << " return LHS.getMnemonic() < RHS;\n";
+ OS << " }\n";
+ OS << " bool operator()(StringRef LHS, const OperandMatchEntry &RHS) {\n";
+ OS << " return LHS < RHS.getMnemonic();\n";
+ OS << " }\n";
+ OS << " bool operator()(const OperandMatchEntry &LHS,";
+ OS << " const OperandMatchEntry &RHS) {\n";
+ OS << " return LHS.getMnemonic() < RHS.getMnemonic();\n";
+ OS << " }\n";
+ OS << " };\n";
+
+ OS << "} // end anonymous namespace\n\n";
+
+ OS << "static const OperandMatchEntry OperandMatchTable["
+ << Info.OperandMatchInfo.size() << "] = {\n";
+
+ OS << " /* Operand List Mnemonic, Mask, Operand Class, Features */\n";
+ for (const OperandMatchEntry &OMI : Info.OperandMatchInfo) {
+ const MatchableInfo &II = *OMI.MI;
+
+ OS << " { ";
+
+ // Store a pascal-style length byte in the mnemonic.
+ std::string LenMnemonic = char(II.Mnemonic.size()) + II.Mnemonic.lower();
+ OS << StringTable.GetOrAddStringOffset(LenMnemonic, false)
+ << " /* " << II.Mnemonic << " */, ";
+
+ OS << OMI.OperandMask;
+ OS << " /* ";
+ ListSeparator LS;
+ for (int i = 0, e = 31; i !=e; ++i)
+ if (OMI.OperandMask & (1 << i))
+ OS << LS << i;
+ OS << " */, ";
+
+ OS << OMI.CI->Name;
+
+ // Write the required features mask.
+ OS << ", AMFBS";
+ if (II.RequiredFeatures.empty())
+ OS << "_None";
+ else
+ for (unsigned i = 0, e = II.RequiredFeatures.size(); i != e; ++i)
+ OS << '_' << II.RequiredFeatures[i]->TheDef->getName();
+
+ OS << " },\n";
+ }
+ OS << "};\n\n";
+
+ // Emit the operand class switch to call the correct custom parser for
+ // the found operand class.
+ OS << "OperandMatchResultTy " << Target.getName() << ClassName << "::\n"
+ << "tryCustomParseOperand(OperandVector"
+ << " &Operands,\n unsigned MCK) {\n\n"
+ << " switch(MCK) {\n";
+
+ for (const auto &CI : Info.Classes) {
+ if (CI.ParserMethod.empty())
+ continue;
+ OS << " case " << CI.Name << ":\n"
+ << " return " << CI.ParserMethod << "(Operands);\n";
+ }
+
+ OS << " default:\n";
+ OS << " return MatchOperand_NoMatch;\n";
+ OS << " }\n";
+ OS << " return MatchOperand_NoMatch;\n";
+ OS << "}\n\n";
+
+ // Emit the static custom operand parser. This code is very similar with
+ // the other matcher. Also use MatchResultTy here just in case we go for
+ // a better error handling.
+ OS << "OperandMatchResultTy " << Target.getName() << ClassName << "::\n"
+ << "MatchOperandParserImpl(OperandVector"
+ << " &Operands,\n StringRef Mnemonic,\n"
+ << " bool ParseForAllFeatures) {\n";
+
+ // Emit code to get the available features.
+ OS << " // Get the current feature set.\n";
+ OS << " const FeatureBitset &AvailableFeatures = getAvailableFeatures();\n\n";
+
+ OS << " // Get the next operand index.\n";
+ OS << " unsigned NextOpNum = Operands.size()"
+ << (HasMnemonicFirst ? " - 1" : "") << ";\n";
+
+ // Emit code to search the table.
+ OS << " // Search the table.\n";
+ if (HasMnemonicFirst) {
+ OS << " auto MnemonicRange =\n";
+ OS << " std::equal_range(std::begin(OperandMatchTable), "
+ "std::end(OperandMatchTable),\n";
+ OS << " Mnemonic, LessOpcodeOperand());\n\n";
+ } else {
+ OS << " auto MnemonicRange = std::make_pair(std::begin(OperandMatchTable),"
+ " std::end(OperandMatchTable));\n";
+ OS << " if (!Mnemonic.empty())\n";
+ OS << " MnemonicRange =\n";
+ OS << " std::equal_range(std::begin(OperandMatchTable), "
+ "std::end(OperandMatchTable),\n";
+ OS << " Mnemonic, LessOpcodeOperand());\n\n";
+ }
+
+ OS << " if (MnemonicRange.first == MnemonicRange.second)\n";
+ OS << " return MatchOperand_NoMatch;\n\n";
+
+ OS << " for (const OperandMatchEntry *it = MnemonicRange.first,\n"
+ << " *ie = MnemonicRange.second; it != ie; ++it) {\n";
+
+ OS << " // equal_range guarantees that instruction mnemonic matches.\n";
+ OS << " assert(Mnemonic == it->getMnemonic());\n\n";
+
+ // Emit check that the required features are available.
+ OS << " // check if the available features match\n";
+ OS << " const FeatureBitset &RequiredFeatures = "
+ "FeatureBitsets[it->RequiredFeaturesIdx];\n";
+ OS << " if (!ParseForAllFeatures && (AvailableFeatures & "
+ "RequiredFeatures) != RequiredFeatures)\n";
+ OS << " continue;\n\n";
+
+ // Emit check to ensure the operand number matches.
+ OS << " // check if the operand in question has a custom parser.\n";
+ OS << " if (!(it->OperandMask & (1 << NextOpNum)))\n";
+ OS << " continue;\n\n";
+
+ // Emit call to the custom parser method
+ StringRef ParserName = AsmParser.getValueAsString("OperandParserMethod");
+ if (ParserName.empty())
+ ParserName = "tryCustomParseOperand";
+ OS << " // call custom parse method to handle the operand\n";
+ OS << " OperandMatchResultTy Result = " << ParserName
+ << "(Operands, it->Class);\n";
+ OS << " if (Result != MatchOperand_NoMatch)\n";
+ OS << " return Result;\n";
+ OS << " }\n\n";
+
+ OS << " // Okay, we had no match.\n";
+ OS << " return MatchOperand_NoMatch;\n";
+ OS << "}\n\n";
+}
+
+static void emitAsmTiedOperandConstraints(CodeGenTarget &Target,
+ AsmMatcherInfo &Info,
+ raw_ostream &OS) {
+ std::string AsmParserName =
+ std::string(Info.AsmParser->getValueAsString("AsmParserClassName"));
+ OS << "static bool ";
+ OS << "checkAsmTiedOperandConstraints(const " << Target.getName()
+ << AsmParserName << "&AsmParser,\n";
+ OS << " unsigned Kind,\n";
+ OS << " const OperandVector &Operands,\n";
+ OS << " uint64_t &ErrorInfo) {\n";
+ OS << " assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n";
+ OS << " const uint8_t *Converter = ConversionTable[Kind];\n";
+ OS << " for (const uint8_t *p = Converter; *p; p += 2) {\n";
+ OS << " switch (*p) {\n";
+ OS << " case CVT_Tied: {\n";
+ OS << " unsigned OpIdx = *(p + 1);\n";
+ OS << " assert(OpIdx < (size_t)(std::end(TiedAsmOperandTable) -\n";
+ OS << " std::begin(TiedAsmOperandTable)) &&\n";
+ OS << " \"Tied operand not found\");\n";
+ OS << " unsigned OpndNum1 = TiedAsmOperandTable[OpIdx][1];\n";
+ OS << " unsigned OpndNum2 = TiedAsmOperandTable[OpIdx][2];\n";
+ OS << " if (OpndNum1 != OpndNum2) {\n";
+ OS << " auto &SrcOp1 = Operands[OpndNum1];\n";
+ OS << " auto &SrcOp2 = Operands[OpndNum2];\n";
+ OS << " if (!AsmParser.areEqualRegs(*SrcOp1, *SrcOp2)) {\n";
+ OS << " ErrorInfo = OpndNum2;\n";
+ OS << " return false;\n";
+ OS << " }\n";
+ OS << " }\n";
+ OS << " break;\n";
+ OS << " }\n";
+ OS << " default:\n";
+ OS << " break;\n";
+ OS << " }\n";
+ OS << " }\n";
+ OS << " return true;\n";
+ OS << "}\n\n";
+}
+
+static void emitMnemonicSpellChecker(raw_ostream &OS, CodeGenTarget &Target,
+ unsigned VariantCount) {
+ OS << "static std::string " << Target.getName()
+ << "MnemonicSpellCheck(StringRef S, const FeatureBitset &FBS,"
+ << " unsigned VariantID) {\n";
+ if (!VariantCount)
+ OS << " return \"\";";
+ else {
+ OS << " const unsigned MaxEditDist = 2;\n";
+ OS << " std::vector<StringRef> Candidates;\n";
+ OS << " StringRef Prev = \"\";\n\n";
+
+ OS << " // Find the appropriate table for this asm variant.\n";
+ OS << " const MatchEntry *Start, *End;\n";
+ OS << " switch (VariantID) {\n";
+ OS << " default: llvm_unreachable(\"invalid variant!\");\n";
+ for (unsigned VC = 0; VC != VariantCount; ++VC) {
+ Record *AsmVariant = Target.getAsmParserVariant(VC);
+ int AsmVariantNo = AsmVariant->getValueAsInt("Variant");
+ OS << " case " << AsmVariantNo << ": Start = std::begin(MatchTable" << VC
+ << "); End = std::end(MatchTable" << VC << "); break;\n";
+ }
+ OS << " }\n\n";
+ OS << " for (auto I = Start; I < End; I++) {\n";
+ OS << " // Ignore unsupported instructions.\n";
+ OS << " const FeatureBitset &RequiredFeatures = "
+ "FeatureBitsets[I->RequiredFeaturesIdx];\n";
+ OS << " if ((FBS & RequiredFeatures) != RequiredFeatures)\n";
+ OS << " continue;\n";
+ OS << "\n";
+ OS << " StringRef T = I->getMnemonic();\n";
+ OS << " // Avoid recomputing the edit distance for the same string.\n";
+ OS << " if (T.equals(Prev))\n";
+ OS << " continue;\n";
+ OS << "\n";
+ OS << " Prev = T;\n";
+ OS << " unsigned Dist = S.edit_distance(T, false, MaxEditDist);\n";
+ OS << " if (Dist <= MaxEditDist)\n";
+ OS << " Candidates.push_back(T);\n";
+ OS << " }\n";
+ OS << "\n";
+ OS << " if (Candidates.empty())\n";
+ OS << " return \"\";\n";
+ OS << "\n";
+ OS << " std::string Res = \", did you mean: \";\n";
+ OS << " unsigned i = 0;\n";
+ OS << " for (; i < Candidates.size() - 1; i++)\n";
+ OS << " Res += Candidates[i].str() + \", \";\n";
+ OS << " return Res + Candidates[i].str() + \"?\";\n";
+ }
+ OS << "}\n";
+ OS << "\n";
+}
+
+static void emitMnemonicChecker(raw_ostream &OS,
+ CodeGenTarget &Target,
+ unsigned VariantCount,
+ bool HasMnemonicFirst,
+ bool HasMnemonicAliases) {
+ OS << "static bool " << Target.getName()
+ << "CheckMnemonic(StringRef Mnemonic,\n";
+ OS << " "
+ << "const FeatureBitset &AvailableFeatures,\n";
+ OS << " "
+ << "unsigned VariantID) {\n";
+
+ if (!VariantCount) {
+ OS << " return false;\n";
+ } else {
+ if (HasMnemonicAliases) {
+ OS << " // Process all MnemonicAliases to remap the mnemonic.\n";
+ OS << " applyMnemonicAliases(Mnemonic, AvailableFeatures, VariantID);";
+ OS << "\n\n";
+ }
+ OS << " // Find the appropriate table for this asm variant.\n";
+ OS << " const MatchEntry *Start, *End;\n";
+ OS << " switch (VariantID) {\n";
+ OS << " default: llvm_unreachable(\"invalid variant!\");\n";
+ for (unsigned VC = 0; VC != VariantCount; ++VC) {
+ Record *AsmVariant = Target.getAsmParserVariant(VC);
+ int AsmVariantNo = AsmVariant->getValueAsInt("Variant");
+ OS << " case " << AsmVariantNo << ": Start = std::begin(MatchTable" << VC
+ << "); End = std::end(MatchTable" << VC << "); break;\n";
+ }
+ OS << " }\n\n";
+
+ OS << " // Search the table.\n";
+ if (HasMnemonicFirst) {
+ OS << " auto MnemonicRange = "
+ "std::equal_range(Start, End, Mnemonic, LessOpcode());\n\n";
+ } else {
+ OS << " auto MnemonicRange = std::make_pair(Start, End);\n";
+ OS << " unsigned SIndex = Mnemonic.empty() ? 0 : 1;\n";
+ OS << " if (!Mnemonic.empty())\n";
+ OS << " MnemonicRange = "
+ << "std::equal_range(Start, End, Mnemonic.lower(), LessOpcode());\n\n";
+ }
+
+ OS << " if (MnemonicRange.first == MnemonicRange.second)\n";
+ OS << " return false;\n\n";
+
+ OS << " for (const MatchEntry *it = MnemonicRange.first, "
+ << "*ie = MnemonicRange.second;\n";
+ OS << " it != ie; ++it) {\n";
+ OS << " const FeatureBitset &RequiredFeatures =\n";
+ OS << " FeatureBitsets[it->RequiredFeaturesIdx];\n";
+ OS << " if ((AvailableFeatures & RequiredFeatures) == ";
+ OS << "RequiredFeatures)\n";
+ OS << " return true;\n";
+ OS << " }\n";
+ OS << " return false;\n";
+ }
+ OS << "}\n";
+ OS << "\n";
+}
+
+// Emit a function mapping match classes to strings, for debugging.
+static void emitMatchClassKindNames(std::forward_list<ClassInfo> &Infos,
+ raw_ostream &OS) {
+ OS << "#ifndef NDEBUG\n";
+ OS << "const char *getMatchClassName(MatchClassKind Kind) {\n";
+ OS << " switch (Kind) {\n";
+
+ OS << " case InvalidMatchClass: return \"InvalidMatchClass\";\n";
+ OS << " case OptionalMatchClass: return \"OptionalMatchClass\";\n";
+ for (const auto &CI : Infos) {
+ OS << " case " << CI.Name << ": return \"" << CI.Name << "\";\n";
+ }
+ OS << " case NumMatchClassKinds: return \"NumMatchClassKinds\";\n";
+
+ OS << " }\n";
+ OS << " llvm_unreachable(\"unhandled MatchClassKind!\");\n";
+ OS << "}\n\n";
+ OS << "#endif // NDEBUG\n";
+}
+
+static std::string
+getNameForFeatureBitset(const std::vector<Record *> &FeatureBitset) {
+ std::string Name = "AMFBS";
+ for (const auto &Feature : FeatureBitset)
+ Name += ("_" + Feature->getName()).str();
+ return Name;
+}
+
+void AsmMatcherEmitter::run(raw_ostream &OS) {
+ CodeGenTarget Target(Records);
+ Record *AsmParser = Target.getAsmParser();
+ StringRef ClassName = AsmParser->getValueAsString("AsmParserClassName");
+
+ // Compute the information on the instructions to match.
+ AsmMatcherInfo Info(AsmParser, Target, Records);
+ Info.buildInfo();
+
+ // Sort the instruction table using the partial order on classes. We use
+ // stable_sort to ensure that ambiguous instructions are still
+ // deterministically ordered.
+ llvm::stable_sort(
+ Info.Matchables,
+ [](const std::unique_ptr<MatchableInfo> &a,
+ const std::unique_ptr<MatchableInfo> &b) { return *a < *b; });
+
+#ifdef EXPENSIVE_CHECKS
+ // Verify that the table is sorted and operator < works transitively.
+ for (auto I = Info.Matchables.begin(), E = Info.Matchables.end(); I != E;
+ ++I) {
+ for (auto J = I; J != E; ++J) {
+ assert(!(**J < **I));
+ }
+ }
+#endif
+
+ DEBUG_WITH_TYPE("instruction_info", {
+ for (const auto &MI : Info.Matchables)
+ MI->dump();
+ });
+
+ // Check for ambiguous matchables.
+ DEBUG_WITH_TYPE("ambiguous_instrs", {
+ unsigned NumAmbiguous = 0;
+ for (auto I = Info.Matchables.begin(), E = Info.Matchables.end(); I != E;
+ ++I) {
+ for (auto J = std::next(I); J != E; ++J) {
+ const MatchableInfo &A = **I;
+ const MatchableInfo &B = **J;
+
+ if (A.couldMatchAmbiguouslyWith(B)) {
+ errs() << "warning: ambiguous matchables:\n";
+ A.dump();
+ errs() << "\nis incomparable with:\n";
+ B.dump();
+ errs() << "\n\n";
+ ++NumAmbiguous;
+ }
+ }
+ }
+ if (NumAmbiguous)
+ errs() << "warning: " << NumAmbiguous
+ << " ambiguous matchables!\n";
+ });
+
+ // Compute the information on the custom operand parsing.
+ Info.buildOperandMatchInfo();
+
+ bool HasMnemonicFirst = AsmParser->getValueAsBit("HasMnemonicFirst");
+ bool HasOptionalOperands = Info.hasOptionalOperands();
+ bool ReportMultipleNearMisses =
+ AsmParser->getValueAsBit("ReportMultipleNearMisses");
+
+ // Write the output.
+
+ // Information for the class declaration.
+ OS << "\n#ifdef GET_ASSEMBLER_HEADER\n";
+ OS << "#undef GET_ASSEMBLER_HEADER\n";
+ OS << " // This should be included into the middle of the declaration of\n";
+ OS << " // your subclasses implementation of MCTargetAsmParser.\n";
+ OS << " FeatureBitset ComputeAvailableFeatures(const FeatureBitset &FB) const;\n";
+ if (HasOptionalOperands) {
+ OS << " void convertToMCInst(unsigned Kind, MCInst &Inst, "
+ << "unsigned Opcode,\n"
+ << " const OperandVector &Operands,\n"
+ << " const SmallBitVector &OptionalOperandsMask);\n";
+ } else {
+ OS << " void convertToMCInst(unsigned Kind, MCInst &Inst, "
+ << "unsigned Opcode,\n"
+ << " const OperandVector &Operands);\n";
+ }
+ OS << " void convertToMapAndConstraints(unsigned Kind,\n ";
+ OS << " const OperandVector &Operands) override;\n";
+ OS << " unsigned MatchInstructionImpl(const OperandVector &Operands,\n"
+ << " MCInst &Inst,\n";
+ if (ReportMultipleNearMisses)
+ OS << " SmallVectorImpl<NearMissInfo> *NearMisses,\n";
+ else
+ OS << " uint64_t &ErrorInfo,\n"
+ << " FeatureBitset &MissingFeatures,\n";
+ OS << " bool matchingInlineAsm,\n"
+ << " unsigned VariantID = 0);\n";
+ if (!ReportMultipleNearMisses)
+ OS << " unsigned MatchInstructionImpl(const OperandVector &Operands,\n"
+ << " MCInst &Inst,\n"
+ << " uint64_t &ErrorInfo,\n"
+ << " bool matchingInlineAsm,\n"
+ << " unsigned VariantID = 0) {\n"
+ << " FeatureBitset MissingFeatures;\n"
+ << " return MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,\n"
+ << " matchingInlineAsm, VariantID);\n"
+ << " }\n\n";
+
+
+ if (!Info.OperandMatchInfo.empty()) {
+ OS << " OperandMatchResultTy MatchOperandParserImpl(\n";
+ OS << " OperandVector &Operands,\n";
+ OS << " StringRef Mnemonic,\n";
+ OS << " bool ParseForAllFeatures = false);\n";
+
+ OS << " OperandMatchResultTy tryCustomParseOperand(\n";
+ OS << " OperandVector &Operands,\n";
+ OS << " unsigned MCK);\n\n";
+ }
+
+ OS << "#endif // GET_ASSEMBLER_HEADER_INFO\n\n";
+
+ // Emit the operand match diagnostic enum names.
+ OS << "\n#ifdef GET_OPERAND_DIAGNOSTIC_TYPES\n";
+ OS << "#undef GET_OPERAND_DIAGNOSTIC_TYPES\n\n";
+ emitOperandDiagnosticTypes(Info, OS);
+ OS << "#endif // GET_OPERAND_DIAGNOSTIC_TYPES\n\n";
+
+ OS << "\n#ifdef GET_REGISTER_MATCHER\n";
+ OS << "#undef GET_REGISTER_MATCHER\n\n";
+
+ // Emit the subtarget feature enumeration.
+ SubtargetFeatureInfo::emitSubtargetFeatureBitEnumeration(
+ Info.SubtargetFeatures, OS);
+
+ // Emit the function to match a register name to number.
+ // This should be omitted for Mips target
+ if (AsmParser->getValueAsBit("ShouldEmitMatchRegisterName"))
+ emitMatchRegisterName(Target, AsmParser, OS);
+
+ if (AsmParser->getValueAsBit("ShouldEmitMatchRegisterAltName"))
+ emitMatchRegisterAltName(Target, AsmParser, OS);
+
+ OS << "#endif // GET_REGISTER_MATCHER\n\n";
+
+ OS << "\n#ifdef GET_SUBTARGET_FEATURE_NAME\n";
+ OS << "#undef GET_SUBTARGET_FEATURE_NAME\n\n";
+
+ // Generate the helper function to get the names for subtarget features.
+ emitGetSubtargetFeatureName(Info, OS);
+
+ OS << "#endif // GET_SUBTARGET_FEATURE_NAME\n\n";
+
+ OS << "\n#ifdef GET_MATCHER_IMPLEMENTATION\n";
+ OS << "#undef GET_MATCHER_IMPLEMENTATION\n\n";
+
+ // Generate the function that remaps for mnemonic aliases.
+ bool HasMnemonicAliases = emitMnemonicAliases(OS, Info, Target);
+
+ // Generate the convertToMCInst function to convert operands into an MCInst.
+ // Also, generate the convertToMapAndConstraints function for MS-style inline
+ // assembly. The latter doesn't actually generate a MCInst.
+ unsigned NumConverters = emitConvertFuncs(Target, ClassName, Info.Matchables,
+ HasMnemonicFirst,
+ HasOptionalOperands, OS);
+
+ // Emit the enumeration for classes which participate in matching.
+ emitMatchClassEnumeration(Target, Info.Classes, OS);
+
+ // Emit a function to get the user-visible string to describe an operand
+ // match failure in diagnostics.
+ emitOperandMatchErrorDiagStrings(Info, OS);
+
+ // Emit a function to map register classes to operand match failure codes.
+ emitRegisterMatchErrorFunc(Info, OS);
+
+ // Emit the routine to match token strings to their match class.
+ emitMatchTokenString(Target, Info.Classes, OS);
+
+ // Emit the subclass predicate routine.
+ emitIsSubclass(Target, Info.Classes, OS);
+
+ // Emit the routine to validate an operand against a match class.
+ emitValidateOperandClass(Info, OS);
+
+ emitMatchClassKindNames(Info.Classes, OS);
+
+ // Emit the available features compute function.
+ SubtargetFeatureInfo::emitComputeAssemblerAvailableFeatures(
+ Info.Target.getName(), ClassName, "ComputeAvailableFeatures",
+ Info.SubtargetFeatures, OS);
+
+ if (!ReportMultipleNearMisses)
+ emitAsmTiedOperandConstraints(Target, Info, OS);
+
+ StringToOffsetTable StringTable;
+
+ size_t MaxNumOperands = 0;
+ unsigned MaxMnemonicIndex = 0;
+ bool HasDeprecation = false;
+ for (const auto &MI : Info.Matchables) {
+ MaxNumOperands = std::max(MaxNumOperands, MI->AsmOperands.size());
+ HasDeprecation |= MI->HasDeprecation;
+
+ // Store a pascal-style length byte in the mnemonic.
+ std::string LenMnemonic = char(MI->Mnemonic.size()) + MI->Mnemonic.lower();
+ MaxMnemonicIndex = std::max(MaxMnemonicIndex,
+ StringTable.GetOrAddStringOffset(LenMnemonic, false));
+ }
+
+ OS << "static const char MnemonicTable[] =\n";
+ StringTable.EmitString(OS);
+ OS << ";\n\n";
+
+ std::vector<std::vector<Record *>> FeatureBitsets;
+ for (const auto &MI : Info.Matchables) {
+ if (MI->RequiredFeatures.empty())
+ continue;
+ FeatureBitsets.emplace_back();
+ for (unsigned I = 0, E = MI->RequiredFeatures.size(); I != E; ++I)
+ FeatureBitsets.back().push_back(MI->RequiredFeatures[I]->TheDef);
+ }
+
+ llvm::sort(FeatureBitsets, [&](const std::vector<Record *> &A,
+ const std::vector<Record *> &B) {
+ if (A.size() < B.size())
+ return true;
+ if (A.size() > B.size())
+ return false;
+ for (auto Pair : zip(A, B)) {
+ if (std::get<0>(Pair)->getName() < std::get<1>(Pair)->getName())
+ return true;
+ if (std::get<0>(Pair)->getName() > std::get<1>(Pair)->getName())
+ return false;
+ }
+ return false;
+ });
+ FeatureBitsets.erase(
+ std::unique(FeatureBitsets.begin(), FeatureBitsets.end()),
+ FeatureBitsets.end());
+ OS << "// Feature bitsets.\n"
+ << "enum : " << getMinimalTypeForRange(FeatureBitsets.size()) << " {\n"
+ << " AMFBS_None,\n";
+ for (const auto &FeatureBitset : FeatureBitsets) {
+ if (FeatureBitset.empty())
+ continue;
+ OS << " " << getNameForFeatureBitset(FeatureBitset) << ",\n";
+ }
+ OS << "};\n\n"
+ << "static constexpr FeatureBitset FeatureBitsets[] = {\n"
+ << " {}, // AMFBS_None\n";
+ for (const auto &FeatureBitset : FeatureBitsets) {
+ if (FeatureBitset.empty())
+ continue;
+ OS << " {";
+ for (const auto &Feature : FeatureBitset) {
+ const auto &I = Info.SubtargetFeatures.find(Feature);
+ assert(I != Info.SubtargetFeatures.end() && "Didn't import predicate?");
+ OS << I->second.getEnumBitName() << ", ";
+ }
+ OS << "},\n";
+ }
+ OS << "};\n\n";
+
+ // Emit the static match table; unused classes get initialized to 0 which is
+ // guaranteed to be InvalidMatchClass.
+ //
+ // FIXME: We can reduce the size of this table very easily. First, we change
+ // it so that store the kinds in separate bit-fields for each index, which
+ // only needs to be the max width used for classes at that index (we also need
+ // to reject based on this during classification). If we then make sure to
+ // order the match kinds appropriately (putting mnemonics last), then we
+ // should only end up using a few bits for each class, especially the ones
+ // following the mnemonic.
+ OS << "namespace {\n";
+ OS << " struct MatchEntry {\n";
+ OS << " " << getMinimalTypeForRange(MaxMnemonicIndex)
+ << " Mnemonic;\n";
+ OS << " uint16_t Opcode;\n";
+ OS << " " << getMinimalTypeForRange(NumConverters)
+ << " ConvertFn;\n";
+ OS << " " << getMinimalTypeForRange(FeatureBitsets.size())
+ << " RequiredFeaturesIdx;\n";
+ OS << " " << getMinimalTypeForRange(
+ std::distance(Info.Classes.begin(), Info.Classes.end()))
+ << " Classes[" << MaxNumOperands << "];\n";
+ OS << " StringRef getMnemonic() const {\n";
+ OS << " return StringRef(MnemonicTable + Mnemonic + 1,\n";
+ OS << " MnemonicTable[Mnemonic]);\n";
+ OS << " }\n";
+ OS << " };\n\n";
+
+ OS << " // Predicate for searching for an opcode.\n";
+ OS << " struct LessOpcode {\n";
+ OS << " bool operator()(const MatchEntry &LHS, StringRef RHS) {\n";
+ OS << " return LHS.getMnemonic() < RHS;\n";
+ OS << " }\n";
+ OS << " bool operator()(StringRef LHS, const MatchEntry &RHS) {\n";
+ OS << " return LHS < RHS.getMnemonic();\n";
+ OS << " }\n";
+ OS << " bool operator()(const MatchEntry &LHS, const MatchEntry &RHS) {\n";
+ OS << " return LHS.getMnemonic() < RHS.getMnemonic();\n";
+ OS << " }\n";
+ OS << " };\n";
+
+ OS << "} // end anonymous namespace\n\n";
+
+ unsigned VariantCount = Target.getAsmParserVariantCount();
+ for (unsigned VC = 0; VC != VariantCount; ++VC) {
+ Record *AsmVariant = Target.getAsmParserVariant(VC);
+ int AsmVariantNo = AsmVariant->getValueAsInt("Variant");
+
+ OS << "static const MatchEntry MatchTable" << VC << "[] = {\n";
+
+ for (const auto &MI : Info.Matchables) {
+ if (MI->AsmVariantID != AsmVariantNo)
+ continue;
+
+ // Store a pascal-style length byte in the mnemonic.
+ std::string LenMnemonic =
+ char(MI->Mnemonic.size()) + MI->Mnemonic.lower();
+ OS << " { " << StringTable.GetOrAddStringOffset(LenMnemonic, false)
+ << " /* " << MI->Mnemonic << " */, "
+ << Target.getInstNamespace() << "::"
+ << MI->getResultInst()->TheDef->getName() << ", "
+ << MI->ConversionFnKind << ", ";
+
+ // Write the required features mask.
+ OS << "AMFBS";
+ if (MI->RequiredFeatures.empty())
+ OS << "_None";
+ else
+ for (unsigned i = 0, e = MI->RequiredFeatures.size(); i != e; ++i)
+ OS << '_' << MI->RequiredFeatures[i]->TheDef->getName();
+
+ OS << ", { ";
+ ListSeparator LS;
+ for (const MatchableInfo::AsmOperand &Op : MI->AsmOperands)
+ OS << LS << Op.Class->Name;
+ OS << " }, },\n";
+ }
+
+ OS << "};\n\n";
+ }
+
+ OS << "#include \"llvm/Support/Debug.h\"\n";
+ OS << "#include \"llvm/Support/Format.h\"\n\n";
+
+ // Finally, build the match function.
+ OS << "unsigned " << Target.getName() << ClassName << "::\n"
+ << "MatchInstructionImpl(const OperandVector &Operands,\n";
+ OS << " MCInst &Inst,\n";
+ if (ReportMultipleNearMisses)
+ OS << " SmallVectorImpl<NearMissInfo> *NearMisses,\n";
+ else
+ OS << " uint64_t &ErrorInfo,\n"
+ << " FeatureBitset &MissingFeatures,\n";
+ OS << " bool matchingInlineAsm, unsigned VariantID) {\n";
+
+ if (!ReportMultipleNearMisses) {
+ OS << " // Eliminate obvious mismatches.\n";
+ OS << " if (Operands.size() > "
+ << (MaxNumOperands + HasMnemonicFirst) << ") {\n";
+ OS << " ErrorInfo = "
+ << (MaxNumOperands + HasMnemonicFirst) << ";\n";
+ OS << " return Match_InvalidOperand;\n";
+ OS << " }\n\n";
+ }
+
+ // Emit code to get the available features.
+ OS << " // Get the current feature set.\n";
+ OS << " const FeatureBitset &AvailableFeatures = getAvailableFeatures();\n\n";
+
+ OS << " // Get the instruction mnemonic, which is the first token.\n";
+ if (HasMnemonicFirst) {
+ OS << " StringRef Mnemonic = ((" << Target.getName()
+ << "Operand &)*Operands[0]).getToken();\n\n";
+ } else {
+ OS << " StringRef Mnemonic;\n";
+ OS << " if (Operands[0]->isToken())\n";
+ OS << " Mnemonic = ((" << Target.getName()
+ << "Operand &)*Operands[0]).getToken();\n\n";
+ }
+
+ if (HasMnemonicAliases) {
+ OS << " // Process all MnemonicAliases to remap the mnemonic.\n";
+ OS << " applyMnemonicAliases(Mnemonic, AvailableFeatures, VariantID);\n\n";
+ }
+
+ // Emit code to compute the class list for this operand vector.
+ if (!ReportMultipleNearMisses) {
+ OS << " // Some state to try to produce better error messages.\n";
+ OS << " bool HadMatchOtherThanFeatures = false;\n";
+ OS << " bool HadMatchOtherThanPredicate = false;\n";
+ OS << " unsigned RetCode = Match_InvalidOperand;\n";
+ OS << " MissingFeatures.set();\n";
+ OS << " // Set ErrorInfo to the operand that mismatches if it is\n";
+ OS << " // wrong for all instances of the instruction.\n";
+ OS << " ErrorInfo = ~0ULL;\n";
+ }
+
+ if (HasOptionalOperands) {
+ OS << " SmallBitVector OptionalOperandsMask(" << MaxNumOperands << ");\n";
+ }
+
+ // Emit code to search the table.
+ OS << " // Find the appropriate table for this asm variant.\n";
+ OS << " const MatchEntry *Start, *End;\n";
+ OS << " switch (VariantID) {\n";
+ OS << " default: llvm_unreachable(\"invalid variant!\");\n";
+ for (unsigned VC = 0; VC != VariantCount; ++VC) {
+ Record *AsmVariant = Target.getAsmParserVariant(VC);
+ int AsmVariantNo = AsmVariant->getValueAsInt("Variant");
+ OS << " case " << AsmVariantNo << ": Start = std::begin(MatchTable" << VC
+ << "); End = std::end(MatchTable" << VC << "); break;\n";
+ }
+ OS << " }\n";
+
+ OS << " // Search the table.\n";
+ if (HasMnemonicFirst) {
+ OS << " auto MnemonicRange = "
+ "std::equal_range(Start, End, Mnemonic, LessOpcode());\n\n";
+ } else {
+ OS << " auto MnemonicRange = std::make_pair(Start, End);\n";
+ OS << " unsigned SIndex = Mnemonic.empty() ? 0 : 1;\n";
+ OS << " if (!Mnemonic.empty())\n";
+ OS << " MnemonicRange = "
+ "std::equal_range(Start, End, Mnemonic.lower(), LessOpcode());\n\n";
+ }
+
+ OS << " DEBUG_WITH_TYPE(\"asm-matcher\", dbgs() << \"AsmMatcher: found \" <<\n"
+ << " std::distance(MnemonicRange.first, MnemonicRange.second) <<\n"
+ << " \" encodings with mnemonic '\" << Mnemonic << \"'\\n\");\n\n";
+
+ OS << " // Return a more specific error code if no mnemonics match.\n";
+ OS << " if (MnemonicRange.first == MnemonicRange.second)\n";
+ OS << " return Match_MnemonicFail;\n\n";
+
+ OS << " for (const MatchEntry *it = MnemonicRange.first, "
+ << "*ie = MnemonicRange.second;\n";
+ OS << " it != ie; ++it) {\n";
+ OS << " const FeatureBitset &RequiredFeatures = "
+ "FeatureBitsets[it->RequiredFeaturesIdx];\n";
+ OS << " bool HasRequiredFeatures =\n";
+ OS << " (AvailableFeatures & RequiredFeatures) == RequiredFeatures;\n";
+ OS << " DEBUG_WITH_TYPE(\"asm-matcher\", dbgs() << \"Trying to match opcode \"\n";
+ OS << " << MII.getName(it->Opcode) << \"\\n\");\n";
+
+ if (ReportMultipleNearMisses) {
+ OS << " // Some state to record ways in which this instruction did not match.\n";
+ OS << " NearMissInfo OperandNearMiss = NearMissInfo::getSuccess();\n";
+ OS << " NearMissInfo FeaturesNearMiss = NearMissInfo::getSuccess();\n";
+ OS << " NearMissInfo EarlyPredicateNearMiss = NearMissInfo::getSuccess();\n";
+ OS << " NearMissInfo LatePredicateNearMiss = NearMissInfo::getSuccess();\n";
+ OS << " bool MultipleInvalidOperands = false;\n";
+ }
+
+ if (HasMnemonicFirst) {
+ OS << " // equal_range guarantees that instruction mnemonic matches.\n";
+ OS << " assert(Mnemonic == it->getMnemonic());\n";
+ }
+
+ // Emit check that the subclasses match.
+ if (!ReportMultipleNearMisses)
+ OS << " bool OperandsValid = true;\n";
+ if (HasOptionalOperands) {
+ OS << " OptionalOperandsMask.reset(0, " << MaxNumOperands << ");\n";
+ }
+ OS << " for (unsigned FormalIdx = " << (HasMnemonicFirst ? "0" : "SIndex")
+ << ", ActualIdx = " << (HasMnemonicFirst ? "1" : "SIndex")
+ << "; FormalIdx != " << MaxNumOperands << "; ++FormalIdx) {\n";
+ OS << " auto Formal = "
+ << "static_cast<MatchClassKind>(it->Classes[FormalIdx]);\n";
+ OS << " DEBUG_WITH_TYPE(\"asm-matcher\",\n";
+ OS << " dbgs() << \" Matching formal operand class \" << getMatchClassName(Formal)\n";
+ OS << " << \" against actual operand at index \" << ActualIdx);\n";
+ OS << " if (ActualIdx < Operands.size())\n";
+ OS << " DEBUG_WITH_TYPE(\"asm-matcher\", dbgs() << \" (\";\n";
+ OS << " Operands[ActualIdx]->print(dbgs()); dbgs() << \"): \");\n";
+ OS << " else\n";
+ OS << " DEBUG_WITH_TYPE(\"asm-matcher\", dbgs() << \": \");\n";
+ OS << " if (ActualIdx >= Operands.size()) {\n";
+ OS << " DEBUG_WITH_TYPE(\"asm-matcher\", dbgs() << \"actual operand "
+ "index out of range\\n\");\n";
+ if (ReportMultipleNearMisses) {
+ OS << " bool ThisOperandValid = (Formal == " <<"InvalidMatchClass) || "
+ "isSubclass(Formal, OptionalMatchClass);\n";
+ OS << " if (!ThisOperandValid) {\n";
+ OS << " if (!OperandNearMiss) {\n";
+ OS << " // Record info about match failure for later use.\n";
+ OS << " DEBUG_WITH_TYPE(\"asm-matcher\", dbgs() << \"recording too-few-operands near miss\\n\");\n";
+ OS << " OperandNearMiss =\n";
+ OS << " NearMissInfo::getTooFewOperands(Formal, it->Opcode);\n";
+ OS << " } else if (OperandNearMiss.getKind() != NearMissInfo::NearMissTooFewOperands) {\n";
+ OS << " // If more than one operand is invalid, give up on this match entry.\n";
+ OS << " DEBUG_WITH_TYPE(\n";
+ OS << " \"asm-matcher\",\n";
+ OS << " dbgs() << \"second invalid operand, giving up on this opcode\\n\");\n";
+ OS << " MultipleInvalidOperands = true;\n";
+ OS << " break;\n";
+ OS << " }\n";
+ OS << " } else {\n";
+ OS << " DEBUG_WITH_TYPE(\"asm-matcher\", dbgs() << \"but formal "
+ "operand not required\\n\");\n";
+ OS << " }\n";
+ OS << " continue;\n";
+ } else {
+ OS << " if (Formal == InvalidMatchClass) {\n";
+ if (HasOptionalOperands) {
+ OS << " OptionalOperandsMask.set(FormalIdx, " << MaxNumOperands
+ << ");\n";
+ }
+ OS << " break;\n";
+ OS << " }\n";
+ OS << " if (isSubclass(Formal, OptionalMatchClass)) {\n";
+ if (HasOptionalOperands) {
+ OS << " OptionalOperandsMask.set(FormalIdx);\n";
+ }
+ OS << " continue;\n";
+ OS << " }\n";
+ OS << " OperandsValid = false;\n";
+ OS << " ErrorInfo = ActualIdx;\n";
+ OS << " break;\n";
+ }
+ OS << " }\n";
+ OS << " MCParsedAsmOperand &Actual = *Operands[ActualIdx];\n";
+ OS << " unsigned Diag = validateOperandClass(Actual, Formal);\n";
+ OS << " if (Diag == Match_Success) {\n";
+ OS << " DEBUG_WITH_TYPE(\"asm-matcher\",\n";
+ OS << " dbgs() << \"match success using generic matcher\\n\");\n";
+ OS << " ++ActualIdx;\n";
+ OS << " continue;\n";
+ OS << " }\n";
+ OS << " // If the generic handler indicates an invalid operand\n";
+ OS << " // failure, check for a special case.\n";
+ OS << " if (Diag != Match_Success) {\n";
+ OS << " unsigned TargetDiag = validateTargetOperandClass(Actual, Formal);\n";
+ OS << " if (TargetDiag == Match_Success) {\n";
+ OS << " DEBUG_WITH_TYPE(\"asm-matcher\",\n";
+ OS << " dbgs() << \"match success using target matcher\\n\");\n";
+ OS << " ++ActualIdx;\n";
+ OS << " continue;\n";
+ OS << " }\n";
+ OS << " // If the target matcher returned a specific error code use\n";
+ OS << " // that, else use the one from the generic matcher.\n";
+ OS << " if (TargetDiag != Match_InvalidOperand && "
+ "HasRequiredFeatures)\n";
+ OS << " Diag = TargetDiag;\n";
+ OS << " }\n";
+ OS << " // If current formal operand wasn't matched and it is optional\n"
+ << " // then try to match next formal operand\n";
+ OS << " if (Diag == Match_InvalidOperand "
+ << "&& isSubclass(Formal, OptionalMatchClass)) {\n";
+ if (HasOptionalOperands) {
+ OS << " OptionalOperandsMask.set(FormalIdx);\n";
+ }
+ OS << " DEBUG_WITH_TYPE(\"asm-matcher\", dbgs() << \"ignoring optional operand\\n\");\n";
+ OS << " continue;\n";
+ OS << " }\n";
+
+ if (ReportMultipleNearMisses) {
+ OS << " if (!OperandNearMiss) {\n";
+ OS << " // If this is the first invalid operand we have seen, record some\n";
+ OS << " // information about it.\n";
+ OS << " DEBUG_WITH_TYPE(\n";
+ OS << " \"asm-matcher\",\n";
+ OS << " dbgs()\n";
+ OS << " << \"operand match failed, recording near-miss with diag code \"\n";
+ OS << " << Diag << \"\\n\");\n";
+ OS << " OperandNearMiss =\n";
+ OS << " NearMissInfo::getMissedOperand(Diag, Formal, it->Opcode, ActualIdx);\n";
+ OS << " ++ActualIdx;\n";
+ OS << " } else {\n";
+ OS << " // If more than one operand is invalid, give up on this match entry.\n";
+ OS << " DEBUG_WITH_TYPE(\n";
+ OS << " \"asm-matcher\",\n";
+ OS << " dbgs() << \"second operand mismatch, skipping this opcode\\n\");\n";
+ OS << " MultipleInvalidOperands = true;\n";
+ OS << " break;\n";
+ OS << " }\n";
+ OS << " }\n\n";
+ } else {
+ OS << " // If this operand is broken for all of the instances of this\n";
+ OS << " // mnemonic, keep track of it so we can report loc info.\n";
+ OS << " // If we already had a match that only failed due to a\n";
+ OS << " // target predicate, that diagnostic is preferred.\n";
+ OS << " if (!HadMatchOtherThanPredicate &&\n";
+ OS << " (it == MnemonicRange.first || ErrorInfo <= ActualIdx)) {\n";
+ OS << " if (HasRequiredFeatures && (ErrorInfo != ActualIdx || Diag "
+ "!= Match_InvalidOperand))\n";
+ OS << " RetCode = Diag;\n";
+ OS << " ErrorInfo = ActualIdx;\n";
+ OS << " }\n";
+ OS << " // Otherwise, just reject this instance of the mnemonic.\n";
+ OS << " OperandsValid = false;\n";
+ OS << " break;\n";
+ OS << " }\n\n";
+ }
+
+ if (ReportMultipleNearMisses)
+ OS << " if (MultipleInvalidOperands) {\n";
+ else
+ OS << " if (!OperandsValid) {\n";
+ OS << " DEBUG_WITH_TYPE(\"asm-matcher\", dbgs() << \"Opcode result: multiple \"\n";
+ OS << " \"operand mismatches, ignoring \"\n";
+ OS << " \"this opcode\\n\");\n";
+ OS << " continue;\n";
+ OS << " }\n";
+
+ // Emit check that the required features are available.
+ OS << " if (!HasRequiredFeatures) {\n";
+ if (!ReportMultipleNearMisses)
+ OS << " HadMatchOtherThanFeatures = true;\n";
+ OS << " FeatureBitset NewMissingFeatures = RequiredFeatures & "
+ "~AvailableFeatures;\n";
+ OS << " DEBUG_WITH_TYPE(\"asm-matcher\", dbgs() << \"Missing target features:\";\n";
+ OS << " for (unsigned I = 0, E = NewMissingFeatures.size(); I != E; ++I)\n";
+ OS << " if (NewMissingFeatures[I])\n";
+ OS << " dbgs() << ' ' << I;\n";
+ OS << " dbgs() << \"\\n\");\n";
+ if (ReportMultipleNearMisses) {
+ OS << " FeaturesNearMiss = NearMissInfo::getMissedFeature(NewMissingFeatures);\n";
+ } else {
+ OS << " if (NewMissingFeatures.count() <=\n"
+ " MissingFeatures.count())\n";
+ OS << " MissingFeatures = NewMissingFeatures;\n";
+ OS << " continue;\n";
+ }
+ OS << " }\n";
+ OS << "\n";
+ OS << " Inst.clear();\n\n";
+ OS << " Inst.setOpcode(it->Opcode);\n";
+ // Verify the instruction with the target-specific match predicate function.
+ OS << " // We have a potential match but have not rendered the operands.\n"
+ << " // Check the target predicate to handle any context sensitive\n"
+ " // constraints.\n"
+ << " // For example, Ties that are referenced multiple times must be\n"
+ " // checked here to ensure the input is the same for each match\n"
+ " // constraints. If we leave it any later the ties will have been\n"
+ " // canonicalized\n"
+ << " unsigned MatchResult;\n"
+ << " if ((MatchResult = checkEarlyTargetMatchPredicate(Inst, "
+ "Operands)) != Match_Success) {\n"
+ << " Inst.clear();\n";
+ OS << " DEBUG_WITH_TYPE(\n";
+ OS << " \"asm-matcher\",\n";
+ OS << " dbgs() << \"Early target match predicate failed with diag code \"\n";
+ OS << " << MatchResult << \"\\n\");\n";
+ if (ReportMultipleNearMisses) {
+ OS << " EarlyPredicateNearMiss = NearMissInfo::getMissedPredicate(MatchResult);\n";
+ } else {
+ OS << " RetCode = MatchResult;\n"
+ << " HadMatchOtherThanPredicate = true;\n"
+ << " continue;\n";
+ }
+ OS << " }\n\n";
+
+ if (ReportMultipleNearMisses) {
+ OS << " // If we did not successfully match the operands, then we can't convert to\n";
+ OS << " // an MCInst, so bail out on this instruction variant now.\n";
+ OS << " if (OperandNearMiss) {\n";
+ OS << " // If the operand mismatch was the only problem, reprrt it as a near-miss.\n";
+ OS << " if (NearMisses && !FeaturesNearMiss && !EarlyPredicateNearMiss) {\n";
+ OS << " DEBUG_WITH_TYPE(\n";
+ OS << " \"asm-matcher\",\n";
+ OS << " dbgs()\n";
+ OS << " << \"Opcode result: one mismatched operand, adding near-miss\\n\");\n";
+ OS << " NearMisses->push_back(OperandNearMiss);\n";
+ OS << " } else {\n";
+ OS << " DEBUG_WITH_TYPE(\"asm-matcher\", dbgs() << \"Opcode result: multiple \"\n";
+ OS << " \"types of mismatch, so not \"\n";
+ OS << " \"reporting near-miss\\n\");\n";
+ OS << " }\n";
+ OS << " continue;\n";
+ OS << " }\n\n";
+ }
+
+ OS << " if (matchingInlineAsm) {\n";
+ OS << " convertToMapAndConstraints(it->ConvertFn, Operands);\n";
+ if (!ReportMultipleNearMisses) {
+ OS << " if (!checkAsmTiedOperandConstraints(*this, it->ConvertFn, "
+ "Operands, ErrorInfo))\n";
+ OS << " return Match_InvalidTiedOperand;\n";
+ OS << "\n";
+ }
+ OS << " return Match_Success;\n";
+ OS << " }\n\n";
+ OS << " // We have selected a definite instruction, convert the parsed\n"
+ << " // operands into the appropriate MCInst.\n";
+ if (HasOptionalOperands) {
+ OS << " convertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands,\n"
+ << " OptionalOperandsMask);\n";
+ } else {
+ OS << " convertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands);\n";
+ }
+ OS << "\n";
+
+ // Verify the instruction with the target-specific match predicate function.
+ OS << " // We have a potential match. Check the target predicate to\n"
+ << " // handle any context sensitive constraints.\n"
+ << " if ((MatchResult = checkTargetMatchPredicate(Inst)) !="
+ << " Match_Success) {\n"
+ << " DEBUG_WITH_TYPE(\"asm-matcher\",\n"
+ << " dbgs() << \"Target match predicate failed with diag code \"\n"
+ << " << MatchResult << \"\\n\");\n"
+ << " Inst.clear();\n";
+ if (ReportMultipleNearMisses) {
+ OS << " LatePredicateNearMiss = NearMissInfo::getMissedPredicate(MatchResult);\n";
+ } else {
+ OS << " RetCode = MatchResult;\n"
+ << " HadMatchOtherThanPredicate = true;\n"
+ << " continue;\n";
+ }
+ OS << " }\n\n";
+
+ if (ReportMultipleNearMisses) {
+ OS << " int NumNearMisses = ((int)(bool)OperandNearMiss +\n";
+ OS << " (int)(bool)FeaturesNearMiss +\n";
+ OS << " (int)(bool)EarlyPredicateNearMiss +\n";
+ OS << " (int)(bool)LatePredicateNearMiss);\n";
+ OS << " if (NumNearMisses == 1) {\n";
+ OS << " // We had exactly one type of near-miss, so add that to the list.\n";
+ OS << " assert(!OperandNearMiss && \"OperandNearMiss was handled earlier\");\n";
+ OS << " DEBUG_WITH_TYPE(\"asm-matcher\", dbgs() << \"Opcode result: found one type of \"\n";
+ OS << " \"mismatch, so reporting a \"\n";
+ OS << " \"near-miss\\n\");\n";
+ OS << " if (NearMisses && FeaturesNearMiss)\n";
+ OS << " NearMisses->push_back(FeaturesNearMiss);\n";
+ OS << " else if (NearMisses && EarlyPredicateNearMiss)\n";
+ OS << " NearMisses->push_back(EarlyPredicateNearMiss);\n";
+ OS << " else if (NearMisses && LatePredicateNearMiss)\n";
+ OS << " NearMisses->push_back(LatePredicateNearMiss);\n";
+ OS << "\n";
+ OS << " continue;\n";
+ OS << " } else if (NumNearMisses > 1) {\n";
+ OS << " // This instruction missed in more than one way, so ignore it.\n";
+ OS << " DEBUG_WITH_TYPE(\"asm-matcher\", dbgs() << \"Opcode result: multiple \"\n";
+ OS << " \"types of mismatch, so not \"\n";
+ OS << " \"reporting near-miss\\n\");\n";
+ OS << " continue;\n";
+ OS << " }\n";
+ }
+
+ // Call the post-processing function, if used.
+ StringRef InsnCleanupFn = AsmParser->getValueAsString("AsmParserInstCleanup");
+ if (!InsnCleanupFn.empty())
+ OS << " " << InsnCleanupFn << "(Inst);\n";
+
+ if (HasDeprecation) {
+ OS << " std::string Info;\n";
+ OS << " if (!getParser().getTargetParser().getTargetOptions().MCNoDeprecatedWarn &&\n";
+ OS << " MII.getDeprecatedInfo(Inst, getSTI(), Info)) {\n";
+ OS << " SMLoc Loc = ((" << Target.getName()
+ << "Operand &)*Operands[0]).getStartLoc();\n";
+ OS << " getParser().Warning(Loc, Info, std::nullopt);\n";
+ OS << " }\n";
+ }
+
+ if (!ReportMultipleNearMisses) {
+ OS << " if (!checkAsmTiedOperandConstraints(*this, it->ConvertFn, "
+ "Operands, ErrorInfo))\n";
+ OS << " return Match_InvalidTiedOperand;\n";
+ OS << "\n";
+ }
+
+ OS << " DEBUG_WITH_TYPE(\n";
+ OS << " \"asm-matcher\",\n";
+ OS << " dbgs() << \"Opcode result: complete match, selecting this opcode\\n\");\n";
+ OS << " return Match_Success;\n";
+ OS << " }\n\n";
+
+ if (ReportMultipleNearMisses) {
+ OS << " // No instruction variants matched exactly.\n";
+ OS << " return Match_NearMisses;\n";
+ } else {
+ OS << " // Okay, we had no match. Try to return a useful error code.\n";
+ OS << " if (HadMatchOtherThanPredicate || !HadMatchOtherThanFeatures)\n";
+ OS << " return RetCode;\n\n";
+ OS << " ErrorInfo = 0;\n";
+ OS << " return Match_MissingFeature;\n";
+ }
+ OS << "}\n\n";
+
+ if (!Info.OperandMatchInfo.empty())
+ emitCustomOperandParsing(OS, Target, Info, ClassName, StringTable,
+ MaxMnemonicIndex, FeatureBitsets.size(),
+ HasMnemonicFirst, *AsmParser);
+
+ OS << "#endif // GET_MATCHER_IMPLEMENTATION\n\n";
+
+ OS << "\n#ifdef GET_MNEMONIC_SPELL_CHECKER\n";
+ OS << "#undef GET_MNEMONIC_SPELL_CHECKER\n\n";
+
+ emitMnemonicSpellChecker(OS, Target, VariantCount);
+
+ OS << "#endif // GET_MNEMONIC_SPELL_CHECKER\n\n";
+
+ OS << "\n#ifdef GET_MNEMONIC_CHECKER\n";
+ OS << "#undef GET_MNEMONIC_CHECKER\n\n";
+
+ emitMnemonicChecker(OS, Target, VariantCount,
+ HasMnemonicFirst, HasMnemonicAliases);
+
+ OS << "#endif // GET_MNEMONIC_CHECKER\n\n";
+}
+
+namespace llvm {
+
+void EmitAsmMatcher(RecordKeeper &RK, raw_ostream &OS) {
+ emitSourceFileHeader("Assembly Matcher Source Fragment", OS);
+ AsmMatcherEmitter(RK).run(OS);
+}
+
+} // end namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/AsmWriterEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/AsmWriterEmitter.cpp
new file mode 100644
index 0000000000..f2e4d15a2c
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/AsmWriterEmitter.cpp
@@ -0,0 +1,1314 @@
+//===- AsmWriterEmitter.cpp - Generate an assembly writer -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend emits an assembly printer for the current target.
+// Note that this is currently fairly skeletal, but will grow over time.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AsmWriterInst.h"
+#include "CodeGenInstruction.h"
+#include "CodeGenRegisters.h"
+#include "CodeGenTarget.h"
+#include "SequenceToOffsetTable.h"
+#include "Types.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <deque>
+#include <iterator>
+#include <map>
+#include <set>
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "asm-writer-emitter"
+
+namespace {
+
+class AsmWriterEmitter {
+ RecordKeeper &Records;
+ CodeGenTarget Target;
+ ArrayRef<const CodeGenInstruction *> NumberedInstructions;
+ std::vector<AsmWriterInst> Instructions;
+
+public:
+ AsmWriterEmitter(RecordKeeper &R);
+
+ void run(raw_ostream &o);
+private:
+ void EmitGetMnemonic(
+ raw_ostream &o,
+ std::vector<std::vector<std::string>> &TableDrivenOperandPrinters,
+ unsigned &BitsLeft, unsigned &AsmStrBits);
+ void EmitPrintInstruction(
+ raw_ostream &o,
+ std::vector<std::vector<std::string>> &TableDrivenOperandPrinters,
+ unsigned &BitsLeft, unsigned &AsmStrBits);
+ void EmitGetRegisterName(raw_ostream &o);
+ void EmitPrintAliasInstruction(raw_ostream &O);
+
+ void FindUniqueOperandCommands(std::vector<std::string> &UOC,
+ std::vector<std::vector<unsigned>> &InstIdxs,
+ std::vector<unsigned> &InstOpsUsed,
+ bool PassSubtarget) const;
+};
+
+} // end anonymous namespace
+
+static void PrintCases(std::vector<std::pair<std::string,
+ AsmWriterOperand>> &OpsToPrint, raw_ostream &O,
+ bool PassSubtarget) {
+ O << " case " << OpsToPrint.back().first << ":";
+ AsmWriterOperand TheOp = OpsToPrint.back().second;
+ OpsToPrint.pop_back();
+
+ // Check to see if any other operands are identical in this list, and if so,
+ // emit a case label for them.
+ for (unsigned i = OpsToPrint.size(); i != 0; --i)
+ if (OpsToPrint[i-1].second == TheOp) {
+ O << "\n case " << OpsToPrint[i-1].first << ":";
+ OpsToPrint.erase(OpsToPrint.begin()+i-1);
+ }
+
+ // Finally, emit the code.
+ O << "\n " << TheOp.getCode(PassSubtarget);
+ O << "\n break;\n";
+}
+
+/// EmitInstructions - Emit the last instruction in the vector and any other
+/// instructions that are suitably similar to it.
+static void EmitInstructions(std::vector<AsmWriterInst> &Insts,
+ raw_ostream &O, bool PassSubtarget) {
+ AsmWriterInst FirstInst = Insts.back();
+ Insts.pop_back();
+
+ std::vector<AsmWriterInst> SimilarInsts;
+ unsigned DifferingOperand = ~0;
+ for (unsigned i = Insts.size(); i != 0; --i) {
+ unsigned DiffOp = Insts[i-1].MatchesAllButOneOp(FirstInst);
+ if (DiffOp != ~1U) {
+ if (DifferingOperand == ~0U) // First match!
+ DifferingOperand = DiffOp;
+
+ // If this differs in the same operand as the rest of the instructions in
+ // this class, move it to the SimilarInsts list.
+ if (DifferingOperand == DiffOp || DiffOp == ~0U) {
+ SimilarInsts.push_back(Insts[i-1]);
+ Insts.erase(Insts.begin()+i-1);
+ }
+ }
+ }
+
+ O << " case " << FirstInst.CGI->Namespace << "::"
+ << FirstInst.CGI->TheDef->getName() << ":\n";
+ for (const AsmWriterInst &AWI : SimilarInsts)
+ O << " case " << AWI.CGI->Namespace << "::"
+ << AWI.CGI->TheDef->getName() << ":\n";
+ for (unsigned i = 0, e = FirstInst.Operands.size(); i != e; ++i) {
+ if (i != DifferingOperand) {
+ // If the operand is the same for all instructions, just print it.
+ O << " " << FirstInst.Operands[i].getCode(PassSubtarget);
+ } else {
+ // If this is the operand that varies between all of the instructions,
+ // emit a switch for just this operand now.
+ O << " switch (MI->getOpcode()) {\n";
+ O << " default: llvm_unreachable(\"Unexpected opcode.\");\n";
+ std::vector<std::pair<std::string, AsmWriterOperand>> OpsToPrint;
+ OpsToPrint.push_back(std::make_pair(FirstInst.CGI->Namespace.str() + "::" +
+ FirstInst.CGI->TheDef->getName().str(),
+ FirstInst.Operands[i]));
+
+ for (const AsmWriterInst &AWI : SimilarInsts) {
+ OpsToPrint.push_back(std::make_pair(AWI.CGI->Namespace.str()+"::" +
+ AWI.CGI->TheDef->getName().str(),
+ AWI.Operands[i]));
+ }
+ std::reverse(OpsToPrint.begin(), OpsToPrint.end());
+ while (!OpsToPrint.empty())
+ PrintCases(OpsToPrint, O, PassSubtarget);
+ O << " }";
+ }
+ O << "\n";
+ }
+ O << " break;\n";
+}
+
+void AsmWriterEmitter::
+FindUniqueOperandCommands(std::vector<std::string> &UniqueOperandCommands,
+ std::vector<std::vector<unsigned>> &InstIdxs,
+ std::vector<unsigned> &InstOpsUsed,
+ bool PassSubtarget) const {
+ // This vector parallels UniqueOperandCommands, keeping track of which
+ // instructions each case are used for. It is a comma separated string of
+ // enums.
+ std::vector<std::string> InstrsForCase;
+ InstrsForCase.resize(UniqueOperandCommands.size());
+ InstOpsUsed.assign(UniqueOperandCommands.size(), 0);
+
+ for (size_t i = 0, e = Instructions.size(); i != e; ++i) {
+ const AsmWriterInst &Inst = Instructions[i];
+ if (Inst.Operands.empty())
+ continue; // Instruction already done.
+
+ std::string Command = " "+Inst.Operands[0].getCode(PassSubtarget)+"\n";
+
+ // Check to see if we already have 'Command' in UniqueOperandCommands.
+ // If not, add it.
+ auto I = llvm::find(UniqueOperandCommands, Command);
+ if (I != UniqueOperandCommands.end()) {
+ size_t idx = I - UniqueOperandCommands.begin();
+ InstrsForCase[idx] += ", ";
+ InstrsForCase[idx] += Inst.CGI->TheDef->getName();
+ InstIdxs[idx].push_back(i);
+ } else {
+ UniqueOperandCommands.push_back(std::move(Command));
+ InstrsForCase.push_back(std::string(Inst.CGI->TheDef->getName()));
+ InstIdxs.emplace_back();
+ InstIdxs.back().push_back(i);
+
+ // This command matches one operand so far.
+ InstOpsUsed.push_back(1);
+ }
+ }
+
+ // For each entry of UniqueOperandCommands, there is a set of instructions
+ // that uses it. If the next command of all instructions in the set are
+ // identical, fold it into the command.
+ for (size_t CommandIdx = 0, e = UniqueOperandCommands.size();
+ CommandIdx != e; ++CommandIdx) {
+
+ const auto &Idxs = InstIdxs[CommandIdx];
+
+ for (unsigned Op = 1; ; ++Op) {
+ // Find the first instruction in the set.
+ const AsmWriterInst &FirstInst = Instructions[Idxs.front()];
+ // If this instruction has no more operands, we isn't anything to merge
+ // into this command.
+ if (FirstInst.Operands.size() == Op)
+ break;
+
+ // Otherwise, scan to see if all of the other instructions in this command
+ // set share the operand.
+ if (any_of(drop_begin(Idxs), [&](unsigned Idx) {
+ const AsmWriterInst &OtherInst = Instructions[Idx];
+ return OtherInst.Operands.size() == Op ||
+ OtherInst.Operands[Op] != FirstInst.Operands[Op];
+ }))
+ break;
+
+ // Okay, everything in this command set has the same next operand. Add it
+ // to UniqueOperandCommands and remember that it was consumed.
+ std::string Command = " " +
+ FirstInst.Operands[Op].getCode(PassSubtarget) + "\n";
+
+ UniqueOperandCommands[CommandIdx] += Command;
+ InstOpsUsed[CommandIdx]++;
+ }
+ }
+
+ // Prepend some of the instructions each case is used for onto the case val.
+ for (unsigned i = 0, e = InstrsForCase.size(); i != e; ++i) {
+ std::string Instrs = InstrsForCase[i];
+ if (Instrs.size() > 70) {
+ Instrs.erase(Instrs.begin()+70, Instrs.end());
+ Instrs += "...";
+ }
+
+ if (!Instrs.empty())
+ UniqueOperandCommands[i] = " // " + Instrs + "\n" +
+ UniqueOperandCommands[i];
+ }
+}
+
+static void UnescapeString(std::string &Str) {
+ for (unsigned i = 0; i != Str.size(); ++i) {
+ if (Str[i] == '\\' && i != Str.size()-1) {
+ switch (Str[i+1]) {
+ default: continue; // Don't execute the code after the switch.
+ case 'a': Str[i] = '\a'; break;
+ case 'b': Str[i] = '\b'; break;
+ case 'e': Str[i] = 27; break;
+ case 'f': Str[i] = '\f'; break;
+ case 'n': Str[i] = '\n'; break;
+ case 'r': Str[i] = '\r'; break;
+ case 't': Str[i] = '\t'; break;
+ case 'v': Str[i] = '\v'; break;
+ case '"': Str[i] = '\"'; break;
+ case '\'': Str[i] = '\''; break;
+ case '\\': Str[i] = '\\'; break;
+ }
+ // Nuke the second character.
+ Str.erase(Str.begin()+i+1);
+ }
+ }
+}
+
+/// UnescapeAliasString - Supports literal braces in InstAlias asm string which
+/// are escaped with '\\' to avoid being interpreted as variants. Braces must
+/// be unescaped before c++ code is generated as (e.g.):
+///
+/// AsmString = "foo \{$\x01\}";
+///
+/// causes non-standard escape character warnings.
+static void UnescapeAliasString(std::string &Str) {
+ for (unsigned i = 0; i != Str.size(); ++i) {
+ if (Str[i] == '\\' && i != Str.size()-1) {
+ switch (Str[i+1]) {
+ default: continue; // Don't execute the code after the switch.
+ case '{': Str[i] = '{'; break;
+ case '}': Str[i] = '}'; break;
+ }
+ // Nuke the second character.
+ Str.erase(Str.begin()+i+1);
+ }
+ }
+}
+
+void AsmWriterEmitter::EmitGetMnemonic(
+ raw_ostream &O,
+ std::vector<std::vector<std::string>> &TableDrivenOperandPrinters,
+ unsigned &BitsLeft, unsigned &AsmStrBits) {
+ Record *AsmWriter = Target.getAsmWriter();
+ StringRef ClassName = AsmWriter->getValueAsString("AsmWriterClassName");
+ bool PassSubtarget = AsmWriter->getValueAsInt("PassSubtarget");
+
+ O << "/// getMnemonic - This method is automatically generated by "
+ "tablegen\n"
+ "/// from the instruction set description.\n"
+ "std::pair<const char *, uint64_t> "
+ << Target.getName() << ClassName << "::getMnemonic(const MCInst *MI) {\n";
+
+ // Build an aggregate string, and build a table of offsets into it.
+ SequenceToOffsetTable<std::string> StringTable;
+
+ /// OpcodeInfo - This encodes the index of the string to use for the first
+ /// chunk of the output as well as indices used for operand printing.
+ std::vector<uint64_t> OpcodeInfo(NumberedInstructions.size());
+ const unsigned OpcodeInfoBits = 64;
+
+ // Add all strings to the string table upfront so it can generate an optimized
+ // representation.
+ for (AsmWriterInst &AWI : Instructions) {
+ if (AWI.Operands[0].OperandType ==
+ AsmWriterOperand::isLiteralTextOperand &&
+ !AWI.Operands[0].Str.empty()) {
+ std::string Str = AWI.Operands[0].Str;
+ UnescapeString(Str);
+ StringTable.add(Str);
+ }
+ }
+
+ StringTable.layout();
+
+ unsigned MaxStringIdx = 0;
+ for (AsmWriterInst &AWI : Instructions) {
+ unsigned Idx;
+ if (AWI.Operands[0].OperandType != AsmWriterOperand::isLiteralTextOperand ||
+ AWI.Operands[0].Str.empty()) {
+ // Something handled by the asmwriter printer, but with no leading string.
+ Idx = StringTable.get("");
+ } else {
+ std::string Str = AWI.Operands[0].Str;
+ UnescapeString(Str);
+ Idx = StringTable.get(Str);
+ MaxStringIdx = std::max(MaxStringIdx, Idx);
+
+ // Nuke the string from the operand list. It is now handled!
+ AWI.Operands.erase(AWI.Operands.begin());
+ }
+
+ // Bias offset by one since we want 0 as a sentinel.
+ OpcodeInfo[AWI.CGIIndex] = Idx+1;
+ }
+
+ // Figure out how many bits we used for the string index.
+ AsmStrBits = Log2_32_Ceil(MaxStringIdx + 2);
+
+ // To reduce code size, we compactify common instructions into a few bits
+ // in the opcode-indexed table.
+ BitsLeft = OpcodeInfoBits - AsmStrBits;
+
+ while (true) {
+ std::vector<std::string> UniqueOperandCommands;
+ std::vector<std::vector<unsigned>> InstIdxs;
+ std::vector<unsigned> NumInstOpsHandled;
+ FindUniqueOperandCommands(UniqueOperandCommands, InstIdxs,
+ NumInstOpsHandled, PassSubtarget);
+
+ // If we ran out of operands to print, we're done.
+ if (UniqueOperandCommands.empty()) break;
+
+ // Compute the number of bits we need to represent these cases, this is
+ // ceil(log2(numentries)).
+ unsigned NumBits = Log2_32_Ceil(UniqueOperandCommands.size());
+
+ // If we don't have enough bits for this operand, don't include it.
+ if (NumBits > BitsLeft) {
+ LLVM_DEBUG(errs() << "Not enough bits to densely encode " << NumBits
+ << " more bits\n");
+ break;
+ }
+
+ // Otherwise, we can include this in the initial lookup table. Add it in.
+ for (size_t i = 0, e = InstIdxs.size(); i != e; ++i) {
+ unsigned NumOps = NumInstOpsHandled[i];
+ for (unsigned Idx : InstIdxs[i]) {
+ OpcodeInfo[Instructions[Idx].CGIIndex] |=
+ (uint64_t)i << (OpcodeInfoBits-BitsLeft);
+ // Remove the info about this operand from the instruction.
+ AsmWriterInst &Inst = Instructions[Idx];
+ if (!Inst.Operands.empty()) {
+ assert(NumOps <= Inst.Operands.size() &&
+ "Can't remove this many ops!");
+ Inst.Operands.erase(Inst.Operands.begin(),
+ Inst.Operands.begin()+NumOps);
+ }
+ }
+ }
+ BitsLeft -= NumBits;
+
+ // Remember the handlers for this set of operands.
+ TableDrivenOperandPrinters.push_back(std::move(UniqueOperandCommands));
+ }
+
+ // Emit the string table itself.
+ StringTable.emitStringLiteralDef(O, " static const char AsmStrs[]");
+
+ // Emit the lookup tables in pieces to minimize wasted bytes.
+ unsigned BytesNeeded = ((OpcodeInfoBits - BitsLeft) + 7) / 8;
+ unsigned Table = 0, Shift = 0;
+ SmallString<128> BitsString;
+ raw_svector_ostream BitsOS(BitsString);
+ // If the total bits is more than 32-bits we need to use a 64-bit type.
+ BitsOS << " uint" << ((BitsLeft < (OpcodeInfoBits - 32)) ? 64 : 32)
+ << "_t Bits = 0;\n";
+ while (BytesNeeded != 0) {
+ // Figure out how big this table section needs to be, but no bigger than 4.
+ unsigned TableSize = std::min(llvm::bit_floor(BytesNeeded), 4u);
+ BytesNeeded -= TableSize;
+ TableSize *= 8; // Convert to bits;
+ uint64_t Mask = (1ULL << TableSize) - 1;
+ O << " static const uint" << TableSize << "_t OpInfo" << Table
+ << "[] = {\n";
+ for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
+ O << " " << ((OpcodeInfo[i] >> Shift) & Mask) << "U,\t// "
+ << NumberedInstructions[i]->TheDef->getName() << "\n";
+ }
+ O << " };\n\n";
+ // Emit string to combine the individual table lookups.
+ BitsOS << " Bits |= ";
+ // If the total bits is more than 32-bits we need to use a 64-bit type.
+ if (BitsLeft < (OpcodeInfoBits - 32))
+ BitsOS << "(uint64_t)";
+ BitsOS << "OpInfo" << Table << "[MI->getOpcode()] << " << Shift << ";\n";
+ // Prepare the shift for the next iteration and increment the table count.
+ Shift += TableSize;
+ ++Table;
+ }
+
+ O << " // Emit the opcode for the instruction.\n";
+ O << BitsString;
+
+ // Return mnemonic string and bits.
+ O << " return {AsmStrs+(Bits & " << (1 << AsmStrBits) - 1
+ << ")-1, Bits};\n\n";
+
+ O << "}\n";
+}
+
+/// EmitPrintInstruction - Generate the code for the "printInstruction" method
+/// implementation. Destroys all instances of AsmWriterInst information, by
+/// clearing the Instructions vector.
+void AsmWriterEmitter::EmitPrintInstruction(
+ raw_ostream &O,
+ std::vector<std::vector<std::string>> &TableDrivenOperandPrinters,
+ unsigned &BitsLeft, unsigned &AsmStrBits) {
+ const unsigned OpcodeInfoBits = 64;
+ Record *AsmWriter = Target.getAsmWriter();
+ StringRef ClassName = AsmWriter->getValueAsString("AsmWriterClassName");
+ bool PassSubtarget = AsmWriter->getValueAsInt("PassSubtarget");
+
+ // This function has some huge switch statements that causing excessive
+ // compile time in LLVM profile instrumenation build. This print function
+ // usually is not frequently called in compilation. Here we disable the
+ // profile instrumenation for this function.
+ O << "/// printInstruction - This method is automatically generated by "
+ "tablegen\n"
+ "/// from the instruction set description.\n"
+ "LLVM_NO_PROFILE_INSTRUMENT_FUNCTION\n"
+ "void "
+ << Target.getName() << ClassName
+ << "::printInstruction(const MCInst *MI, uint64_t Address, "
+ << (PassSubtarget ? "const MCSubtargetInfo &STI, " : "")
+ << "raw_ostream &O) {\n";
+
+ // Emit the initial tab character.
+ O << " O << \"\\t\";\n\n";
+
+ // Emit the starting string.
+ O << " auto MnemonicInfo = getMnemonic(MI);\n\n";
+ O << " O << MnemonicInfo.first;\n\n";
+
+ O << " uint" << ((BitsLeft < (OpcodeInfoBits - 32)) ? 64 : 32)
+ << "_t Bits = MnemonicInfo.second;\n"
+ << " assert(Bits != 0 && \"Cannot print this instruction.\");\n";
+
+ // Output the table driven operand information.
+ BitsLeft = OpcodeInfoBits-AsmStrBits;
+ for (unsigned i = 0, e = TableDrivenOperandPrinters.size(); i != e; ++i) {
+ std::vector<std::string> &Commands = TableDrivenOperandPrinters[i];
+
+ // Compute the number of bits we need to represent these cases, this is
+ // ceil(log2(numentries)).
+ unsigned NumBits = Log2_32_Ceil(Commands.size());
+ assert(NumBits <= BitsLeft && "consistency error");
+
+ // Emit code to extract this field from Bits.
+ O << "\n // Fragment " << i << " encoded into " << NumBits
+ << " bits for " << Commands.size() << " unique commands.\n";
+
+ if (Commands.size() == 2) {
+ // Emit two possibilitys with if/else.
+ O << " if ((Bits >> "
+ << (OpcodeInfoBits-BitsLeft) << ") & "
+ << ((1 << NumBits)-1) << ") {\n"
+ << Commands[1]
+ << " } else {\n"
+ << Commands[0]
+ << " }\n\n";
+ } else if (Commands.size() == 1) {
+ // Emit a single possibility.
+ O << Commands[0] << "\n\n";
+ } else {
+ O << " switch ((Bits >> "
+ << (OpcodeInfoBits-BitsLeft) << ") & "
+ << ((1 << NumBits)-1) << ") {\n"
+ << " default: llvm_unreachable(\"Invalid command number.\");\n";
+
+ // Print out all the cases.
+ for (unsigned j = 0, e = Commands.size(); j != e; ++j) {
+ O << " case " << j << ":\n";
+ O << Commands[j];
+ O << " break;\n";
+ }
+ O << " }\n\n";
+ }
+ BitsLeft -= NumBits;
+ }
+
+ // Okay, delete instructions with no operand info left.
+ llvm::erase_if(Instructions,
+ [](AsmWriterInst &Inst) { return Inst.Operands.empty(); });
+
+ // Because this is a vector, we want to emit from the end. Reverse all of the
+ // elements in the vector.
+ std::reverse(Instructions.begin(), Instructions.end());
+
+
+ // Now that we've emitted all of the operand info that fit into 64 bits, emit
+ // information for those instructions that are left. This is a less dense
+ // encoding, but we expect the main 64-bit table to handle the majority of
+ // instructions.
+ if (!Instructions.empty()) {
+ // Find the opcode # of inline asm.
+ O << " switch (MI->getOpcode()) {\n";
+ O << " default: llvm_unreachable(\"Unexpected opcode.\");\n";
+ while (!Instructions.empty())
+ EmitInstructions(Instructions, O, PassSubtarget);
+
+ O << " }\n";
+ }
+
+ O << "}\n";
+}
+
+static void
+emitRegisterNameString(raw_ostream &O, StringRef AltName,
+ const std::deque<CodeGenRegister> &Registers) {
+ SequenceToOffsetTable<std::string> StringTable;
+ SmallVector<std::string, 4> AsmNames(Registers.size());
+ unsigned i = 0;
+ for (const auto &Reg : Registers) {
+ std::string &AsmName = AsmNames[i++];
+
+ // "NoRegAltName" is special. We don't need to do a lookup for that,
+ // as it's just a reference to the default register name.
+ if (AltName == "" || AltName == "NoRegAltName") {
+ AsmName = std::string(Reg.TheDef->getValueAsString("AsmName"));
+ if (AsmName.empty())
+ AsmName = std::string(Reg.getName());
+ } else {
+ // Make sure the register has an alternate name for this index.
+ std::vector<Record*> AltNameList =
+ Reg.TheDef->getValueAsListOfDefs("RegAltNameIndices");
+ unsigned Idx = 0, e;
+ for (e = AltNameList.size();
+ Idx < e && (AltNameList[Idx]->getName() != AltName);
+ ++Idx)
+ ;
+ // If the register has an alternate name for this index, use it.
+ // Otherwise, leave it empty as an error flag.
+ if (Idx < e) {
+ std::vector<StringRef> AltNames =
+ Reg.TheDef->getValueAsListOfStrings("AltNames");
+ if (AltNames.size() <= Idx)
+ PrintFatalError(Reg.TheDef->getLoc(),
+ "Register definition missing alt name for '" +
+ AltName + "'.");
+ AsmName = std::string(AltNames[Idx]);
+ }
+ }
+ StringTable.add(AsmName);
+ }
+
+ StringTable.layout();
+ StringTable.emitStringLiteralDef(O, Twine(" static const char AsmStrs") +
+ AltName + "[]");
+
+ O << " static const " << getMinimalTypeForRange(StringTable.size() - 1, 32)
+ << " RegAsmOffset" << AltName << "[] = {";
+ for (unsigned i = 0, e = Registers.size(); i != e; ++i) {
+ if ((i % 14) == 0)
+ O << "\n ";
+ O << StringTable.get(AsmNames[i]) << ", ";
+ }
+ O << "\n };\n"
+ << "\n";
+}
+
+void AsmWriterEmitter::EmitGetRegisterName(raw_ostream &O) {
+ Record *AsmWriter = Target.getAsmWriter();
+ StringRef ClassName = AsmWriter->getValueAsString("AsmWriterClassName");
+ const auto &Registers = Target.getRegBank().getRegisters();
+ const std::vector<Record*> &AltNameIndices = Target.getRegAltNameIndices();
+ bool hasAltNames = AltNameIndices.size() > 1;
+ StringRef Namespace = Registers.front().TheDef->getValueAsString("Namespace");
+
+ O <<
+ "\n\n/// getRegisterName - This method is automatically generated by tblgen\n"
+ "/// from the register set description. This returns the assembler name\n"
+ "/// for the specified register.\n"
+ "const char *" << Target.getName() << ClassName << "::";
+ if (hasAltNames)
+ O << "\ngetRegisterName(MCRegister Reg, unsigned AltIdx) {\n";
+ else
+ O << "getRegisterName(MCRegister Reg) {\n";
+ O << " unsigned RegNo = Reg.id();\n"
+ << " assert(RegNo && RegNo < " << (Registers.size() + 1)
+ << " && \"Invalid register number!\");\n"
+ << "\n";
+
+ if (hasAltNames) {
+ for (const Record *R : AltNameIndices)
+ emitRegisterNameString(O, R->getName(), Registers);
+ } else
+ emitRegisterNameString(O, "", Registers);
+
+ if (hasAltNames) {
+ O << " switch(AltIdx) {\n"
+ << " default: llvm_unreachable(\"Invalid register alt name index!\");\n";
+ for (const Record *R : AltNameIndices) {
+ StringRef AltName = R->getName();
+ O << " case ";
+ if (!Namespace.empty())
+ O << Namespace << "::";
+ O << AltName << ":\n";
+ if (R->isValueUnset("FallbackRegAltNameIndex"))
+ O << " assert(*(AsmStrs" << AltName << "+RegAsmOffset" << AltName
+ << "[RegNo-1]) &&\n"
+ << " \"Invalid alt name index for register!\");\n";
+ else {
+ O << " if (!*(AsmStrs" << AltName << "+RegAsmOffset" << AltName
+ << "[RegNo-1]))\n"
+ << " return getRegisterName(RegNo, ";
+ if (!Namespace.empty())
+ O << Namespace << "::";
+ O << R->getValueAsDef("FallbackRegAltNameIndex")->getName() << ");\n";
+ }
+ O << " return AsmStrs" << AltName << "+RegAsmOffset" << AltName
+ << "[RegNo-1];\n";
+ }
+ O << " }\n";
+ } else {
+ O << " assert (*(AsmStrs+RegAsmOffset[RegNo-1]) &&\n"
+ << " \"Invalid alt name index for register!\");\n"
+ << " return AsmStrs+RegAsmOffset[RegNo-1];\n";
+ }
+ O << "}\n";
+}
+
+namespace {
+
+// IAPrinter - Holds information about an InstAlias. Two InstAliases match if
+// they both have the same conditionals. In which case, we cannot print out the
+// alias for that pattern.
+class IAPrinter {
+ std::map<StringRef, std::pair<int, int>> OpMap;
+
+ std::vector<std::string> Conds;
+
+ std::string Result;
+ std::string AsmString;
+
+ unsigned NumMIOps;
+
+public:
+ IAPrinter(std::string R, std::string AS, unsigned NumMIOps)
+ : Result(std::move(R)), AsmString(std::move(AS)), NumMIOps(NumMIOps) {}
+
+ void addCond(std::string C) { Conds.push_back(std::move(C)); }
+ ArrayRef<std::string> getConds() const { return Conds; }
+ size_t getCondCount() const { return Conds.size(); }
+
+ void addOperand(StringRef Op, int OpIdx, int PrintMethodIdx = -1) {
+ assert(OpIdx >= 0 && OpIdx < 0xFE && "Idx out of range");
+ assert(PrintMethodIdx >= -1 && PrintMethodIdx < 0xFF &&
+ "Idx out of range");
+ OpMap[Op] = std::make_pair(OpIdx, PrintMethodIdx);
+ }
+
+ unsigned getNumMIOps() { return NumMIOps; }
+
+ StringRef getResult() { return Result; }
+
+ bool isOpMapped(StringRef Op) { return OpMap.find(Op) != OpMap.end(); }
+ int getOpIndex(StringRef Op) { return OpMap[Op].first; }
+ std::pair<int, int> &getOpData(StringRef Op) { return OpMap[Op]; }
+
+ std::pair<StringRef, StringRef::iterator> parseName(StringRef::iterator Start,
+ StringRef::iterator End) {
+ StringRef::iterator I = Start;
+ StringRef::iterator Next;
+ if (*I == '{') {
+ // ${some_name}
+ Start = ++I;
+ while (I != End && *I != '}')
+ ++I;
+ Next = I;
+ // eat the final '}'
+ if (Next != End)
+ ++Next;
+ } else {
+ // $name, just eat the usual suspects.
+ while (I != End && (isAlnum(*I) || *I == '_'))
+ ++I;
+ Next = I;
+ }
+
+ return std::make_pair(StringRef(Start, I - Start), Next);
+ }
+
+ std::string formatAliasString(uint32_t &UnescapedSize) {
+ // Directly mangle mapped operands into the string. Each operand is
+ // identified by a '$' sign followed by a byte identifying the number of the
+ // operand. We add one to the index to avoid zero bytes.
+ StringRef ASM(AsmString);
+ std::string OutString;
+ raw_string_ostream OS(OutString);
+ for (StringRef::iterator I = ASM.begin(), E = ASM.end(); I != E;) {
+ OS << *I;
+ ++UnescapedSize;
+ if (*I == '$') {
+ StringRef Name;
+ std::tie(Name, I) = parseName(++I, E);
+ assert(isOpMapped(Name) && "Unmapped operand!");
+
+ int OpIndex, PrintIndex;
+ std::tie(OpIndex, PrintIndex) = getOpData(Name);
+ if (PrintIndex == -1) {
+ // Can use the default printOperand route.
+ OS << format("\\x%02X", (unsigned char)OpIndex + 1);
+ ++UnescapedSize;
+ } else {
+ // 3 bytes if a PrintMethod is needed: 0xFF, the MCInst operand
+ // number, and which of our pre-detected Methods to call.
+ OS << format("\\xFF\\x%02X\\x%02X", OpIndex + 1, PrintIndex + 1);
+ UnescapedSize += 3;
+ }
+ } else {
+ ++I;
+ }
+ }
+ return OutString;
+ }
+
+ bool operator==(const IAPrinter &RHS) const {
+ if (NumMIOps != RHS.NumMIOps)
+ return false;
+ if (Conds.size() != RHS.Conds.size())
+ return false;
+
+ unsigned Idx = 0;
+ for (const auto &str : Conds)
+ if (str != RHS.Conds[Idx++])
+ return false;
+
+ return true;
+ }
+};
+
+} // end anonymous namespace
+
+static unsigned CountNumOperands(StringRef AsmString, unsigned Variant) {
+ return AsmString.count(' ') + AsmString.count('\t');
+}
+
+namespace {
+
+struct AliasPriorityComparator {
+ typedef std::pair<CodeGenInstAlias, int> ValueType;
+ bool operator()(const ValueType &LHS, const ValueType &RHS) const {
+ if (LHS.second == RHS.second) {
+ // We don't actually care about the order, but for consistency it
+ // shouldn't depend on pointer comparisons.
+ return LessRecordByID()(LHS.first.TheDef, RHS.first.TheDef);
+ }
+
+ // Aliases with larger priorities should be considered first.
+ return LHS.second > RHS.second;
+ }
+};
+
+} // end anonymous namespace
+
+void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
+ Record *AsmWriter = Target.getAsmWriter();
+
+ O << "\n#ifdef PRINT_ALIAS_INSTR\n";
+ O << "#undef PRINT_ALIAS_INSTR\n\n";
+
+ //////////////////////////////
+ // Gather information about aliases we need to print
+ //////////////////////////////
+
+ // Emit the method that prints the alias instruction.
+ StringRef ClassName = AsmWriter->getValueAsString("AsmWriterClassName");
+ unsigned Variant = AsmWriter->getValueAsInt("Variant");
+ bool PassSubtarget = AsmWriter->getValueAsInt("PassSubtarget");
+
+ std::vector<Record*> AllInstAliases =
+ Records.getAllDerivedDefinitions("InstAlias");
+
+ // Create a map from the qualified name to a list of potential matches.
+ typedef std::set<std::pair<CodeGenInstAlias, int>, AliasPriorityComparator>
+ AliasWithPriority;
+ std::map<std::string, AliasWithPriority> AliasMap;
+ for (Record *R : AllInstAliases) {
+ int Priority = R->getValueAsInt("EmitPriority");
+ if (Priority < 1)
+ continue; // Aliases with priority 0 are never emitted.
+
+ const DagInit *DI = R->getValueAsDag("ResultInst");
+ AliasMap[getQualifiedName(DI->getOperatorAsDef(R->getLoc()))].insert(
+ std::make_pair(CodeGenInstAlias(R, Target), Priority));
+ }
+
+ // A map of which conditions need to be met for each instruction operand
+ // before it can be matched to the mnemonic.
+ std::map<std::string, std::vector<IAPrinter>> IAPrinterMap;
+
+ std::vector<std::pair<std::string, bool>> PrintMethods;
+
+ // A list of MCOperandPredicates for all operands in use, and the reverse map
+ std::vector<const Record*> MCOpPredicates;
+ DenseMap<const Record*, unsigned> MCOpPredicateMap;
+
+ for (auto &Aliases : AliasMap) {
+ // Collection of instruction alias rules. May contain ambiguous rules.
+ std::vector<IAPrinter> IAPs;
+
+ for (auto &Alias : Aliases.second) {
+ const CodeGenInstAlias &CGA = Alias.first;
+ unsigned LastOpNo = CGA.ResultInstOperandIndex.size();
+ std::string FlatInstAsmString =
+ CodeGenInstruction::FlattenAsmStringVariants(CGA.ResultInst->AsmString,
+ Variant);
+ unsigned NumResultOps = CountNumOperands(FlatInstAsmString, Variant);
+
+ std::string FlatAliasAsmString =
+ CodeGenInstruction::FlattenAsmStringVariants(CGA.AsmString, Variant);
+ UnescapeAliasString(FlatAliasAsmString);
+
+ // Don't emit the alias if it has more operands than what it's aliasing.
+ if (NumResultOps < CountNumOperands(FlatAliasAsmString, Variant))
+ continue;
+
+ StringRef Namespace = Target.getName();
+ unsigned NumMIOps = 0;
+ for (auto &ResultInstOpnd : CGA.ResultInst->Operands)
+ NumMIOps += ResultInstOpnd.MINumOperands;
+
+ IAPrinter IAP(CGA.Result->getAsString(), FlatAliasAsmString, NumMIOps);
+
+ unsigned MIOpNum = 0;
+ for (unsigned i = 0, e = LastOpNo; i != e; ++i) {
+ // Skip over tied operands as they're not part of an alias declaration.
+ auto &Operands = CGA.ResultInst->Operands;
+ while (true) {
+ unsigned OpNum = Operands.getSubOperandNumber(MIOpNum).first;
+ if (Operands[OpNum].MINumOperands == 1 &&
+ Operands[OpNum].getTiedRegister() != -1) {
+ // Tied operands of different RegisterClass should be explicit within
+ // an instruction's syntax and so cannot be skipped.
+ int TiedOpNum = Operands[OpNum].getTiedRegister();
+ if (Operands[OpNum].Rec->getName() ==
+ Operands[TiedOpNum].Rec->getName()) {
+ ++MIOpNum;
+ continue;
+ }
+ }
+ break;
+ }
+
+ // Ignore unchecked result operands.
+ while (IAP.getCondCount() < MIOpNum)
+ IAP.addCond("AliasPatternCond::K_Ignore, 0");
+
+ const CodeGenInstAlias::ResultOperand &RO = CGA.ResultOperands[i];
+
+ switch (RO.Kind) {
+ case CodeGenInstAlias::ResultOperand::K_Record: {
+ const Record *Rec = RO.getRecord();
+ StringRef ROName = RO.getName();
+ int PrintMethodIdx = -1;
+
+ // These two may have a PrintMethod, which we want to record (if it's
+ // the first time we've seen it) and provide an index for the aliasing
+ // code to use.
+ if (Rec->isSubClassOf("RegisterOperand") ||
+ Rec->isSubClassOf("Operand")) {
+ StringRef PrintMethod = Rec->getValueAsString("PrintMethod");
+ bool IsPCRel =
+ Rec->getValueAsString("OperandType") == "OPERAND_PCREL";
+ if (PrintMethod != "" && PrintMethod != "printOperand") {
+ PrintMethodIdx = llvm::find_if(PrintMethods,
+ [&](auto &X) {
+ return X.first == PrintMethod;
+ }) -
+ PrintMethods.begin();
+ if (static_cast<unsigned>(PrintMethodIdx) == PrintMethods.size())
+ PrintMethods.emplace_back(std::string(PrintMethod), IsPCRel);
+ }
+ }
+
+ if (Rec->isSubClassOf("RegisterOperand"))
+ Rec = Rec->getValueAsDef("RegClass");
+ if (Rec->isSubClassOf("RegisterClass")) {
+ if (!IAP.isOpMapped(ROName)) {
+ IAP.addOperand(ROName, MIOpNum, PrintMethodIdx);
+ Record *R = CGA.ResultOperands[i].getRecord();
+ if (R->isSubClassOf("RegisterOperand"))
+ R = R->getValueAsDef("RegClass");
+ IAP.addCond(std::string(
+ formatv("AliasPatternCond::K_RegClass, {0}::{1}RegClassID",
+ Namespace, R->getName())));
+ } else {
+ IAP.addCond(std::string(formatv(
+ "AliasPatternCond::K_TiedReg, {0}", IAP.getOpIndex(ROName))));
+ }
+ } else {
+ // Assume all printable operands are desired for now. This can be
+ // overridden in the InstAlias instantiation if necessary.
+ IAP.addOperand(ROName, MIOpNum, PrintMethodIdx);
+
+ // There might be an additional predicate on the MCOperand
+ unsigned Entry = MCOpPredicateMap[Rec];
+ if (!Entry) {
+ if (!Rec->isValueUnset("MCOperandPredicate")) {
+ MCOpPredicates.push_back(Rec);
+ Entry = MCOpPredicates.size();
+ MCOpPredicateMap[Rec] = Entry;
+ } else
+ break; // No conditions on this operand at all
+ }
+ IAP.addCond(
+ std::string(formatv("AliasPatternCond::K_Custom, {0}", Entry)));
+ }
+ break;
+ }
+ case CodeGenInstAlias::ResultOperand::K_Imm: {
+ // Just because the alias has an immediate result, doesn't mean the
+ // MCInst will. An MCExpr could be present, for example.
+ auto Imm = CGA.ResultOperands[i].getImm();
+ int32_t Imm32 = int32_t(Imm);
+ if (Imm != Imm32)
+ PrintFatalError("Matching an alias with an immediate out of the "
+ "range of int32_t is not supported");
+ IAP.addCond(std::string(
+ formatv("AliasPatternCond::K_Imm, uint32_t({0})", Imm32)));
+ break;
+ }
+ case CodeGenInstAlias::ResultOperand::K_Reg:
+ if (!CGA.ResultOperands[i].getRegister()) {
+ IAP.addCond(std::string(formatv(
+ "AliasPatternCond::K_Reg, {0}::NoRegister", Namespace)));
+ break;
+ }
+
+ StringRef Reg = CGA.ResultOperands[i].getRegister()->getName();
+ IAP.addCond(std::string(
+ formatv("AliasPatternCond::K_Reg, {0}::{1}", Namespace, Reg)));
+ break;
+ }
+
+ MIOpNum += RO.getMINumOperands();
+ }
+
+ std::vector<Record *> ReqFeatures;
+ if (PassSubtarget) {
+ // We only consider ReqFeatures predicates if PassSubtarget
+ std::vector<Record *> RF =
+ CGA.TheDef->getValueAsListOfDefs("Predicates");
+ copy_if(RF, std::back_inserter(ReqFeatures), [](Record *R) {
+ return R->getValueAsBit("AssemblerMatcherPredicate");
+ });
+ }
+
+ for (Record *const R : ReqFeatures) {
+ const DagInit *D = R->getValueAsDag("AssemblerCondDag");
+ std::string CombineType = D->getOperator()->getAsString();
+ if (CombineType != "any_of" && CombineType != "all_of")
+ PrintFatalError(R->getLoc(), "Invalid AssemblerCondDag!");
+ if (D->getNumArgs() == 0)
+ PrintFatalError(R->getLoc(), "Invalid AssemblerCondDag!");
+ bool IsOr = CombineType == "any_of";
+ // Change (any_of FeatureAll, (any_of ...)) to (any_of FeatureAll, ...).
+ if (IsOr && D->getNumArgs() == 2 && isa<DagInit>(D->getArg(1))) {
+ DagInit *RHS = dyn_cast<DagInit>(D->getArg(1));
+ SmallVector<Init *> Args{D->getArg(0)};
+ SmallVector<StringInit *> ArgNames{D->getArgName(0)};
+ for (unsigned i = 0, e = RHS->getNumArgs(); i != e; ++i) {
+ Args.push_back(RHS->getArg(i));
+ ArgNames.push_back(RHS->getArgName(i));
+ }
+ D = DagInit::get(D->getOperator(), nullptr, Args, ArgNames);
+ }
+
+ for (auto *Arg : D->getArgs()) {
+ bool IsNeg = false;
+ if (auto *NotArg = dyn_cast<DagInit>(Arg)) {
+ if (NotArg->getOperator()->getAsString() != "not" ||
+ NotArg->getNumArgs() != 1)
+ PrintFatalError(R->getLoc(), "Invalid AssemblerCondDag!");
+ Arg = NotArg->getArg(0);
+ IsNeg = true;
+ }
+ if (!isa<DefInit>(Arg) ||
+ !cast<DefInit>(Arg)->getDef()->isSubClassOf("SubtargetFeature"))
+ PrintFatalError(R->getLoc(), "Invalid AssemblerCondDag!");
+
+ IAP.addCond(std::string(formatv(
+ "AliasPatternCond::K_{0}{1}Feature, {2}::{3}", IsOr ? "Or" : "",
+ IsNeg ? "Neg" : "", Namespace, Arg->getAsString())));
+ }
+ // If an AssemblerPredicate with ors is used, note end of list should
+ // these be combined.
+ if (IsOr)
+ IAP.addCond("AliasPatternCond::K_EndOrFeatures, 0");
+ }
+
+ IAPrinterMap[Aliases.first].push_back(std::move(IAP));
+ }
+ }
+
+ //////////////////////////////
+ // Write out the printAliasInstr function
+ //////////////////////////////
+
+ std::string Header;
+ raw_string_ostream HeaderO(Header);
+
+ HeaderO << "bool " << Target.getName() << ClassName
+ << "::printAliasInstr(const MCInst"
+ << " *MI, uint64_t Address, "
+ << (PassSubtarget ? "const MCSubtargetInfo &STI, " : "")
+ << "raw_ostream &OS) {\n";
+
+ std::string PatternsForOpcode;
+ raw_string_ostream OpcodeO(PatternsForOpcode);
+
+ unsigned PatternCount = 0;
+ std::string Patterns;
+ raw_string_ostream PatternO(Patterns);
+
+ unsigned CondCount = 0;
+ std::string Conds;
+ raw_string_ostream CondO(Conds);
+
+ // All flattened alias strings.
+ std::map<std::string, uint32_t> AsmStringOffsets;
+ std::vector<std::pair<uint32_t, std::string>> AsmStrings;
+ size_t AsmStringsSize = 0;
+
+ // Iterate over the opcodes in enum order so they are sorted by opcode for
+ // binary search.
+ for (const CodeGenInstruction *Inst : NumberedInstructions) {
+ auto It = IAPrinterMap.find(getQualifiedName(Inst->TheDef));
+ if (It == IAPrinterMap.end())
+ continue;
+ std::vector<IAPrinter> &IAPs = It->second;
+ std::vector<IAPrinter*> UniqueIAPs;
+
+ // Remove any ambiguous alias rules.
+ for (auto &LHS : IAPs) {
+ bool IsDup = false;
+ for (const auto &RHS : IAPs) {
+ if (&LHS != &RHS && LHS == RHS) {
+ IsDup = true;
+ break;
+ }
+ }
+
+ if (!IsDup)
+ UniqueIAPs.push_back(&LHS);
+ }
+
+ if (UniqueIAPs.empty()) continue;
+
+ unsigned PatternStart = PatternCount;
+
+ // Insert the pattern start and opcode in the pattern list for debugging.
+ PatternO << formatv(" // {0} - {1}\n", It->first, PatternStart);
+
+ for (IAPrinter *IAP : UniqueIAPs) {
+ // Start each condition list with a comment of the resulting pattern that
+ // we're trying to match.
+ unsigned CondStart = CondCount;
+ CondO << formatv(" // {0} - {1}\n", IAP->getResult(), CondStart);
+ for (const auto &Cond : IAP->getConds())
+ CondO << " {" << Cond << "},\n";
+ CondCount += IAP->getCondCount();
+
+ // After operands have been examined, re-encode the alias string with
+ // escapes indicating how operands should be printed.
+ uint32_t UnescapedSize = 0;
+ std::string EncodedAsmString = IAP->formatAliasString(UnescapedSize);
+ auto Insertion =
+ AsmStringOffsets.insert({EncodedAsmString, AsmStringsSize});
+ if (Insertion.second) {
+ // If the string is new, add it to the vector.
+ AsmStrings.push_back({AsmStringsSize, EncodedAsmString});
+ AsmStringsSize += UnescapedSize + 1;
+ }
+ unsigned AsmStrOffset = Insertion.first->second;
+
+ PatternO << formatv(" {{{0}, {1}, {2}, {3} },\n", AsmStrOffset,
+ CondStart, IAP->getNumMIOps(), IAP->getCondCount());
+ ++PatternCount;
+ }
+
+ OpcodeO << formatv(" {{{0}, {1}, {2} },\n", It->first, PatternStart,
+ PatternCount - PatternStart);
+ }
+
+ if (OpcodeO.str().empty()) {
+ O << HeaderO.str();
+ O << " return false;\n";
+ O << "}\n\n";
+ O << "#endif // PRINT_ALIAS_INSTR\n";
+ return;
+ }
+
+ // Forward declare the validation method if needed.
+ if (!MCOpPredicates.empty())
+ O << "static bool " << Target.getName() << ClassName
+ << "ValidateMCOperand(const MCOperand &MCOp,\n"
+ << " const MCSubtargetInfo &STI,\n"
+ << " unsigned PredicateIndex);\n";
+
+ O << HeaderO.str();
+ O.indent(2) << "static const PatternsForOpcode OpToPatterns[] = {\n";
+ O << OpcodeO.str();
+ O.indent(2) << "};\n\n";
+ O.indent(2) << "static const AliasPattern Patterns[] = {\n";
+ O << PatternO.str();
+ O.indent(2) << "};\n\n";
+ O.indent(2) << "static const AliasPatternCond Conds[] = {\n";
+ O << CondO.str();
+ O.indent(2) << "};\n\n";
+ O.indent(2) << "static const char AsmStrings[] =\n";
+ for (const auto &P : AsmStrings) {
+ O.indent(4) << "/* " << P.first << " */ \"" << P.second << "\\0\"\n";
+ }
+
+ O.indent(2) << ";\n\n";
+
+ // Assert that the opcode table is sorted. Use a static local constructor to
+ // ensure that the check only happens once on first run.
+ O << "#ifndef NDEBUG\n";
+ O.indent(2) << "static struct SortCheck {\n";
+ O.indent(2) << " SortCheck(ArrayRef<PatternsForOpcode> OpToPatterns) {\n";
+ O.indent(2) << " assert(std::is_sorted(\n";
+ O.indent(2) << " OpToPatterns.begin(), OpToPatterns.end(),\n";
+ O.indent(2) << " [](const PatternsForOpcode &L, const "
+ "PatternsForOpcode &R) {\n";
+ O.indent(2) << " return L.Opcode < R.Opcode;\n";
+ O.indent(2) << " }) &&\n";
+ O.indent(2) << " \"tablegen failed to sort opcode patterns\");\n";
+ O.indent(2) << " }\n";
+ O.indent(2) << "} sortCheckVar(OpToPatterns);\n";
+ O << "#endif\n\n";
+
+ O.indent(2) << "AliasMatchingData M {\n";
+ O.indent(2) << " ArrayRef(OpToPatterns),\n";
+ O.indent(2) << " ArrayRef(Patterns),\n";
+ O.indent(2) << " ArrayRef(Conds),\n";
+ O.indent(2) << " StringRef(AsmStrings, std::size(AsmStrings)),\n";
+ if (MCOpPredicates.empty())
+ O.indent(2) << " nullptr,\n";
+ else
+ O.indent(2) << " &" << Target.getName() << ClassName << "ValidateMCOperand,\n";
+ O.indent(2) << "};\n";
+
+ O.indent(2) << "const char *AsmString = matchAliasPatterns(MI, "
+ << (PassSubtarget ? "&STI" : "nullptr") << ", M);\n";
+ O.indent(2) << "if (!AsmString) return false;\n\n";
+
+ // Code that prints the alias, replacing the operands with the ones from the
+ // MCInst.
+ O << " unsigned I = 0;\n";
+ O << " while (AsmString[I] != ' ' && AsmString[I] != '\\t' &&\n";
+ O << " AsmString[I] != '$' && AsmString[I] != '\\0')\n";
+ O << " ++I;\n";
+ O << " OS << '\\t' << StringRef(AsmString, I);\n";
+
+ O << " if (AsmString[I] != '\\0') {\n";
+ O << " if (AsmString[I] == ' ' || AsmString[I] == '\\t') {\n";
+ O << " OS << '\\t';\n";
+ O << " ++I;\n";
+ O << " }\n";
+ O << " do {\n";
+ O << " if (AsmString[I] == '$') {\n";
+ O << " ++I;\n";
+ O << " if (AsmString[I] == (char)0xff) {\n";
+ O << " ++I;\n";
+ O << " int OpIdx = AsmString[I++] - 1;\n";
+ O << " int PrintMethodIdx = AsmString[I++] - 1;\n";
+ O << " printCustomAliasOperand(MI, Address, OpIdx, PrintMethodIdx, ";
+ O << (PassSubtarget ? "STI, " : "");
+ O << "OS);\n";
+ O << " } else\n";
+ O << " printOperand(MI, unsigned(AsmString[I++]) - 1, ";
+ O << (PassSubtarget ? "STI, " : "");
+ O << "OS);\n";
+ O << " } else {\n";
+ O << " OS << AsmString[I++];\n";
+ O << " }\n";
+ O << " } while (AsmString[I] != '\\0');\n";
+ O << " }\n\n";
+
+ O << " return true;\n";
+ O << "}\n\n";
+
+ //////////////////////////////
+ // Write out the printCustomAliasOperand function
+ //////////////////////////////
+
+ O << "void " << Target.getName() << ClassName << "::"
+ << "printCustomAliasOperand(\n"
+ << " const MCInst *MI, uint64_t Address, unsigned OpIdx,\n"
+ << " unsigned PrintMethodIdx,\n"
+ << (PassSubtarget ? " const MCSubtargetInfo &STI,\n" : "")
+ << " raw_ostream &OS) {\n";
+ if (PrintMethods.empty())
+ O << " llvm_unreachable(\"Unknown PrintMethod kind\");\n";
+ else {
+ O << " switch (PrintMethodIdx) {\n"
+ << " default:\n"
+ << " llvm_unreachable(\"Unknown PrintMethod kind\");\n"
+ << " break;\n";
+
+ for (unsigned i = 0; i < PrintMethods.size(); ++i) {
+ O << " case " << i << ":\n"
+ << " " << PrintMethods[i].first << "(MI, "
+ << (PrintMethods[i].second ? "Address, " : "") << "OpIdx, "
+ << (PassSubtarget ? "STI, " : "") << "OS);\n"
+ << " break;\n";
+ }
+ O << " }\n";
+ }
+ O << "}\n\n";
+
+ if (!MCOpPredicates.empty()) {
+ O << "static bool " << Target.getName() << ClassName
+ << "ValidateMCOperand(const MCOperand &MCOp,\n"
+ << " const MCSubtargetInfo &STI,\n"
+ << " unsigned PredicateIndex) {\n"
+ << " switch (PredicateIndex) {\n"
+ << " default:\n"
+ << " llvm_unreachable(\"Unknown MCOperandPredicate kind\");\n"
+ << " break;\n";
+
+ for (unsigned i = 0; i < MCOpPredicates.size(); ++i) {
+ StringRef MCOpPred = MCOpPredicates[i]->getValueAsString("MCOperandPredicate");
+ O << " case " << i + 1 << ": {\n"
+ << MCOpPred.data() << "\n"
+ << " }\n";
+ }
+ O << " }\n"
+ << "}\n\n";
+ }
+
+ O << "#endif // PRINT_ALIAS_INSTR\n";
+}
+
+AsmWriterEmitter::AsmWriterEmitter(RecordKeeper &R) : Records(R), Target(R) {
+ Record *AsmWriter = Target.getAsmWriter();
+ unsigned Variant = AsmWriter->getValueAsInt("Variant");
+
+ // Get the instruction numbering.
+ NumberedInstructions = Target.getInstructionsByEnumValue();
+
+ for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) {
+ const CodeGenInstruction *I = NumberedInstructions[i];
+ if (!I->AsmString.empty() && I->TheDef->getName() != "PHI")
+ Instructions.emplace_back(*I, i, Variant);
+ }
+}
+
+void AsmWriterEmitter::run(raw_ostream &O) {
+ std::vector<std::vector<std::string>> TableDrivenOperandPrinters;
+ unsigned BitsLeft = 0;
+ unsigned AsmStrBits = 0;
+ EmitGetMnemonic(O, TableDrivenOperandPrinters, BitsLeft, AsmStrBits);
+ EmitPrintInstruction(O, TableDrivenOperandPrinters, BitsLeft, AsmStrBits);
+ EmitGetRegisterName(O);
+ EmitPrintAliasInstruction(O);
+}
+
+namespace llvm {
+
+void EmitAsmWriter(RecordKeeper &RK, raw_ostream &OS) {
+ emitSourceFileHeader("Assembly Writer Source Fragment", OS);
+ AsmWriterEmitter(RK).run(OS);
+}
+
+} // end namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/AsmWriterInst.cpp b/contrib/libs/llvm16/utils/TableGen/AsmWriterInst.cpp
new file mode 100644
index 0000000000..4a78108d6f
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/AsmWriterInst.cpp
@@ -0,0 +1,206 @@
+//===- AsmWriterInst.h - Classes encapsulating a printable inst -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes implement a parser for assembly strings.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AsmWriterInst.h"
+#include "CodeGenInstruction.h"
+#include "CodeGenTarget.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+
+using namespace llvm;
+
+static bool isIdentChar(char C) { return isAlnum(C) || C == '_'; }
+
+std::string AsmWriterOperand::getCode(bool PassSubtarget) const {
+ if (OperandType == isLiteralTextOperand) {
+ if (Str.size() == 1)
+ return "O << '" + Str + "';";
+ return "O << \"" + Str + "\";";
+ }
+
+ if (OperandType == isLiteralStatementOperand)
+ return Str;
+
+ std::string Result = Str + "(MI";
+ if (PCRel)
+ Result += ", Address";
+ if (MIOpNo != ~0U)
+ Result += ", " + utostr(MIOpNo);
+ if (PassSubtarget)
+ Result += ", STI";
+ Result += ", O";
+ if (!MiModifier.empty())
+ Result += ", \"" + MiModifier + '"';
+ return Result + ");";
+}
+
+/// ParseAsmString - Parse the specified Instruction's AsmString into this
+/// AsmWriterInst.
+///
+AsmWriterInst::AsmWriterInst(const CodeGenInstruction &CGI, unsigned CGIIndex,
+ unsigned Variant)
+ : CGI(&CGI), CGIIndex(CGIIndex) {
+
+ // NOTE: Any extensions to this code need to be mirrored in the
+ // AsmPrinter::printInlineAsm code that executes as compile time (assuming
+ // that inline asm strings should also get the new feature)!
+ std::string AsmString = CGI.FlattenAsmStringVariants(CGI.AsmString, Variant);
+ std::string::size_type LastEmitted = 0;
+ while (LastEmitted != AsmString.size()) {
+ std::string::size_type DollarPos =
+ AsmString.find_first_of("$\\", LastEmitted);
+ if (DollarPos == std::string::npos) DollarPos = AsmString.size();
+
+ // Emit a constant string fragment.
+ if (DollarPos != LastEmitted) {
+ for (; LastEmitted != DollarPos; ++LastEmitted)
+ switch (AsmString[LastEmitted]) {
+ case '\n':
+ AddLiteralString("\\n");
+ break;
+ case '\t':
+ AddLiteralString("\\t");
+ break;
+ case '"':
+ AddLiteralString("\\\"");
+ break;
+ case '\\':
+ AddLiteralString("\\\\");
+ break;
+ default:
+ AddLiteralString(std::string(1, AsmString[LastEmitted]));
+ break;
+ }
+ } else if (AsmString[DollarPos] == '\\') {
+ if (DollarPos+1 != AsmString.size()) {
+ if (AsmString[DollarPos+1] == 'n') {
+ AddLiteralString("\\n");
+ } else if (AsmString[DollarPos+1] == 't') {
+ AddLiteralString("\\t");
+ } else if (std::string("${|}\\").find(AsmString[DollarPos+1])
+ != std::string::npos) {
+ AddLiteralString(std::string(1, AsmString[DollarPos+1]));
+ } else {
+ PrintFatalError(
+ CGI.TheDef->getLoc(),
+ "Non-supported escaped character found in instruction '" +
+ CGI.TheDef->getName() + "'!");
+ }
+ LastEmitted = DollarPos+2;
+ continue;
+ }
+ } else if (DollarPos+1 != AsmString.size() &&
+ AsmString[DollarPos+1] == '$') {
+ AddLiteralString("$"); // "$$" -> $
+ LastEmitted = DollarPos+2;
+ } else {
+ // Get the name of the variable.
+ std::string::size_type VarEnd = DollarPos+1;
+
+ // handle ${foo}bar as $foo by detecting whether the character following
+ // the dollar sign is a curly brace. If so, advance VarEnd and DollarPos
+ // so the variable name does not contain the leading curly brace.
+ bool hasCurlyBraces = false;
+ if (VarEnd < AsmString.size() && '{' == AsmString[VarEnd]) {
+ hasCurlyBraces = true;
+ ++DollarPos;
+ ++VarEnd;
+ }
+
+ while (VarEnd < AsmString.size() && isIdentChar(AsmString[VarEnd]))
+ ++VarEnd;
+ StringRef VarName(AsmString.data()+DollarPos+1, VarEnd-DollarPos-1);
+
+ // Modifier - Support ${foo:modifier} syntax, where "modifier" is passed
+ // into printOperand. Also support ${:feature}, which is passed into
+ // PrintSpecial.
+ std::string Modifier;
+
+ // In order to avoid starting the next string at the terminating curly
+ // brace, advance the end position past it if we found an opening curly
+ // brace.
+ if (hasCurlyBraces) {
+ if (VarEnd >= AsmString.size())
+ PrintFatalError(
+ CGI.TheDef->getLoc(),
+ "Reached end of string before terminating curly brace in '" +
+ CGI.TheDef->getName() + "'");
+
+ // Look for a modifier string.
+ if (AsmString[VarEnd] == ':') {
+ ++VarEnd;
+ if (VarEnd >= AsmString.size())
+ PrintFatalError(
+ CGI.TheDef->getLoc(),
+ "Reached end of string before terminating curly brace in '" +
+ CGI.TheDef->getName() + "'");
+
+ std::string::size_type ModifierStart = VarEnd;
+ while (VarEnd < AsmString.size() && isIdentChar(AsmString[VarEnd]))
+ ++VarEnd;
+ Modifier = AsmString.substr(ModifierStart, VarEnd - ModifierStart);
+ if (Modifier.empty())
+ PrintFatalError(CGI.TheDef->getLoc(),
+ "Bad operand modifier name in '" +
+ CGI.TheDef->getName() + "'");
+ }
+
+ if (AsmString[VarEnd] != '}')
+ PrintFatalError(
+ CGI.TheDef->getLoc(),
+ "Variable name beginning with '{' did not end with '}' in '" +
+ CGI.TheDef->getName() + "'");
+ ++VarEnd;
+ }
+ if (VarName.empty() && Modifier.empty())
+ PrintFatalError(CGI.TheDef->getLoc(),
+ "Stray '$' in '" + CGI.TheDef->getName() +
+ "' asm string, maybe you want $$?");
+
+ if (VarName.empty()) {
+ // Just a modifier, pass this into PrintSpecial.
+ Operands.emplace_back("PrintSpecial", ~0U, Modifier);
+ } else {
+ // Otherwise, normal operand.
+ unsigned OpNo = CGI.Operands.getOperandNamed(VarName);
+ CGIOperandList::OperandInfo OpInfo = CGI.Operands[OpNo];
+
+ unsigned MIOp = OpInfo.MIOperandNo;
+ Operands.emplace_back(OpInfo.PrinterMethodName, MIOp, Modifier,
+ AsmWriterOperand::isMachineInstrOperand,
+ OpInfo.OperandType == "MCOI::OPERAND_PCREL");
+ }
+ LastEmitted = VarEnd;
+ }
+ }
+
+ Operands.emplace_back("return;", AsmWriterOperand::isLiteralStatementOperand);
+}
+
+/// MatchesAllButOneOp - If this instruction is exactly identical to the
+/// specified instruction except for one differing operand, return the differing
+/// operand number. If more than one operand mismatches, return ~1, otherwise
+/// if the instructions are identical return ~0.
+unsigned AsmWriterInst::MatchesAllButOneOp(const AsmWriterInst &Other)const{
+ if (Operands.size() != Other.Operands.size()) return ~1;
+
+ unsigned MismatchOperand = ~0U;
+ for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
+ if (Operands[i] != Other.Operands[i]) {
+ if (MismatchOperand != ~0U) // Already have one mismatch?
+ return ~1U;
+ MismatchOperand = i;
+ }
+ }
+ return MismatchOperand;
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/AsmWriterInst.h b/contrib/libs/llvm16/utils/TableGen/AsmWriterInst.h
new file mode 100644
index 0000000000..fe2b934e26
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/AsmWriterInst.h
@@ -0,0 +1,108 @@
+//===- AsmWriterInst.h - Classes encapsulating a printable inst -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes implement a parser for assembly strings. The parser splits
+// the string into operands, which can be literal strings (the constant bits of
+// the string), actual operands (i.e., operands from the MachineInstr), and
+// dynamically-generated text, specified by raw C++ code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_ASMWRITERINST_H
+#define LLVM_UTILS_TABLEGEN_ASMWRITERINST_H
+
+#include <string>
+#include <vector>
+
+namespace llvm {
+ class CodeGenInstruction;
+ class Record;
+
+ struct AsmWriterOperand {
+ enum OpType {
+ // Output this text surrounded by quotes to the asm.
+ isLiteralTextOperand,
+ // This is the name of a routine to call to print the operand.
+ isMachineInstrOperand,
+ // Output this text verbatim to the asm writer. It is code that
+ // will output some text to the asm.
+ isLiteralStatementOperand
+ } OperandType;
+
+ /// MiOpNo - For isMachineInstrOperand, this is the operand number of the
+ /// machine instruction.
+ unsigned MIOpNo = 0;
+
+ /// Str - For isLiteralTextOperand, this IS the literal text. For
+ /// isMachineInstrOperand, this is the PrinterMethodName for the operand..
+ /// For isLiteralStatementOperand, this is the code to insert verbatim
+ /// into the asm writer.
+ std::string Str;
+
+ /// MiModifier - For isMachineInstrOperand, this is the modifier string for
+ /// an operand, specified with syntax like ${opname:modifier}.
+ std::string MiModifier;
+
+ bool PCRel = false;
+
+ // To make VS STL happy
+ AsmWriterOperand(OpType op = isLiteralTextOperand):OperandType(op) {}
+
+ AsmWriterOperand(const std::string &LitStr,
+ OpType op = isLiteralTextOperand)
+ : OperandType(op), Str(LitStr) {}
+
+ AsmWriterOperand(const std::string &Printer, unsigned _MIOpNo,
+ const std::string &Modifier,
+ OpType op = isMachineInstrOperand, bool PCRel = false)
+ : OperandType(op), MIOpNo(_MIOpNo), Str(Printer), MiModifier(Modifier),
+ PCRel(PCRel) {}
+
+ bool operator!=(const AsmWriterOperand &Other) const {
+ if (OperandType != Other.OperandType || Str != Other.Str) return true;
+ if (OperandType == isMachineInstrOperand)
+ return MIOpNo != Other.MIOpNo || MiModifier != Other.MiModifier ||
+ PCRel != Other.PCRel;
+ return false;
+ }
+ bool operator==(const AsmWriterOperand &Other) const {
+ return !operator!=(Other);
+ }
+
+ /// getCode - Return the code that prints this operand.
+ std::string getCode(bool PassSubtarget) const;
+ };
+
+ class AsmWriterInst {
+ public:
+ std::vector<AsmWriterOperand> Operands;
+ const CodeGenInstruction *CGI;
+ unsigned CGIIndex;
+
+ AsmWriterInst(const CodeGenInstruction &CGI, unsigned CGIIndex,
+ unsigned Variant);
+
+ /// MatchesAllButOneOp - If this instruction is exactly identical to the
+ /// specified instruction except for one differing operand, return the
+ /// differing operand number. Otherwise return ~0.
+ unsigned MatchesAllButOneOp(const AsmWriterInst &Other) const;
+
+ private:
+ void AddLiteralString(const std::string &Str) {
+ // If the last operand was already a literal text string, append this to
+ // it, otherwise add a new operand.
+ if (!Operands.empty() &&
+ Operands.back().OperandType == AsmWriterOperand::isLiteralTextOperand)
+ Operands.back().Str.append(Str);
+ else
+ Operands.push_back(AsmWriterOperand(Str));
+ }
+ };
+}
+
+#endif
diff --git a/contrib/libs/llvm16/utils/TableGen/Attributes.cpp b/contrib/libs/llvm16/utils/TableGen/Attributes.cpp
new file mode 100644
index 0000000000..735c53dd6f
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/Attributes.cpp
@@ -0,0 +1,138 @@
+//===- Attributes.cpp - Generate attributes -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/TableGen/Record.h"
+#include <vector>
+using namespace llvm;
+
+#define DEBUG_TYPE "attr-enum"
+
+namespace {
+
+class Attributes {
+public:
+ Attributes(RecordKeeper &R) : Records(R) {}
+ void emit(raw_ostream &OS);
+
+private:
+ void emitTargetIndependentNames(raw_ostream &OS);
+ void emitFnAttrCompatCheck(raw_ostream &OS, bool IsStringAttr);
+ void emitAttributeProperties(raw_ostream &OF);
+
+ RecordKeeper &Records;
+};
+
+} // End anonymous namespace.
+
+void Attributes::emitTargetIndependentNames(raw_ostream &OS) {
+ OS << "#ifdef GET_ATTR_NAMES\n";
+ OS << "#undef GET_ATTR_NAMES\n";
+
+ OS << "#ifndef ATTRIBUTE_ALL\n";
+ OS << "#define ATTRIBUTE_ALL(FIRST, SECOND)\n";
+ OS << "#endif\n\n";
+
+ auto Emit = [&](ArrayRef<StringRef> KindNames, StringRef MacroName) {
+ OS << "#ifndef " << MacroName << "\n";
+ OS << "#define " << MacroName
+ << "(FIRST, SECOND) ATTRIBUTE_ALL(FIRST, SECOND)\n";
+ OS << "#endif\n\n";
+ for (StringRef KindName : KindNames) {
+ for (auto *A : Records.getAllDerivedDefinitions(KindName)) {
+ OS << MacroName << "(" << A->getName() << ","
+ << A->getValueAsString("AttrString") << ")\n";
+ }
+ }
+ OS << "#undef " << MacroName << "\n\n";
+ };
+
+ // Emit attribute enums in the same order llvm::Attribute::operator< expects.
+ Emit({"EnumAttr", "TypeAttr", "IntAttr"}, "ATTRIBUTE_ENUM");
+ Emit({"StrBoolAttr"}, "ATTRIBUTE_STRBOOL");
+
+ OS << "#undef ATTRIBUTE_ALL\n";
+ OS << "#endif\n\n";
+
+ OS << "#ifdef GET_ATTR_ENUM\n";
+ OS << "#undef GET_ATTR_ENUM\n";
+ unsigned Value = 1; // Leave zero for AttrKind::None.
+ for (StringRef KindName : {"EnumAttr", "TypeAttr", "IntAttr"}) {
+ OS << "First" << KindName << " = " << Value << ",\n";
+ for (auto *A : Records.getAllDerivedDefinitions(KindName)) {
+ OS << A->getName() << " = " << Value << ",\n";
+ Value++;
+ }
+ OS << "Last" << KindName << " = " << (Value - 1) << ",\n";
+ }
+ OS << "#endif\n\n";
+}
+
+void Attributes::emitFnAttrCompatCheck(raw_ostream &OS, bool IsStringAttr) {
+ OS << "#ifdef GET_ATTR_COMPAT_FUNC\n";
+ OS << "#undef GET_ATTR_COMPAT_FUNC\n";
+
+ OS << "static inline bool hasCompatibleFnAttrs(const Function &Caller,\n"
+ << " const Function &Callee) {\n";
+ OS << " bool Ret = true;\n\n";
+
+ std::vector<Record *> CompatRules =
+ Records.getAllDerivedDefinitions("CompatRule");
+
+ for (auto *Rule : CompatRules) {
+ StringRef FuncName = Rule->getValueAsString("CompatFunc");
+ OS << " Ret &= " << FuncName << "(Caller, Callee);\n";
+ }
+
+ OS << "\n";
+ OS << " return Ret;\n";
+ OS << "}\n\n";
+
+ std::vector<Record *> MergeRules =
+ Records.getAllDerivedDefinitions("MergeRule");
+ OS << "static inline void mergeFnAttrs(Function &Caller,\n"
+ << " const Function &Callee) {\n";
+
+ for (auto *Rule : MergeRules) {
+ StringRef FuncName = Rule->getValueAsString("MergeFunc");
+ OS << " " << FuncName << "(Caller, Callee);\n";
+ }
+
+ OS << "}\n\n";
+
+ OS << "#endif\n";
+}
+
+void Attributes::emitAttributeProperties(raw_ostream &OS) {
+ OS << "#ifdef GET_ATTR_PROP_TABLE\n";
+ OS << "#undef GET_ATTR_PROP_TABLE\n";
+ OS << "static const uint8_t AttrPropTable[] = {\n";
+ for (StringRef KindName : {"EnumAttr", "TypeAttr", "IntAttr"}) {
+ for (auto *A : Records.getAllDerivedDefinitions(KindName)) {
+ OS << "0";
+ for (Init *P : *A->getValueAsListInit("Properties"))
+ OS << " | AttributeProperty::" << cast<DefInit>(P)->getDef()->getName();
+ OS << ",\n";
+ }
+ }
+ OS << "};\n";
+ OS << "#endif\n";
+}
+
+void Attributes::emit(raw_ostream &OS) {
+ emitTargetIndependentNames(OS);
+ emitFnAttrCompatCheck(OS, false);
+ emitAttributeProperties(OS);
+}
+
+namespace llvm {
+
+void EmitAttributes(RecordKeeper &RK, raw_ostream &OS) {
+ Attributes(RK).emit(OS);
+}
+
+} // End llvm namespace.
diff --git a/contrib/libs/llvm16/utils/TableGen/CTagsEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/CTagsEmitter.cpp
new file mode 100644
index 0000000000..fe62d6a9b6
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/CTagsEmitter.cpp
@@ -0,0 +1,93 @@
+//===- CTagsEmitter.cpp - Generate ctags-compatible index ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend emits an index of definitions in ctags(1) format.
+// A helper script, utils/TableGen/tdtags, provides an easier-to-use
+// interface; run 'tdtags -H' for documentation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include <algorithm>
+#include <string>
+#include <vector>
+using namespace llvm;
+
+#define DEBUG_TYPE "ctags-emitter"
+
+namespace {
+
+class Tag {
+private:
+ StringRef Id;
+ StringRef BufferIdentifier;
+ unsigned Line;
+public:
+ Tag(StringRef Name, const SMLoc Location) : Id(Name) {
+ const MemoryBuffer *CurMB =
+ SrcMgr.getMemoryBuffer(SrcMgr.FindBufferContainingLoc(Location));
+ BufferIdentifier = CurMB->getBufferIdentifier();
+ auto LineAndColumn = SrcMgr.getLineAndColumn(Location);
+ Line = LineAndColumn.first;
+ }
+ int operator<(const Tag &B) const {
+ return std::make_tuple(Id, BufferIdentifier, Line) < std::make_tuple(B.Id, B.BufferIdentifier, B.Line);
+ }
+ void emit(raw_ostream &OS) const {
+ OS << Id << "\t" << BufferIdentifier << "\t" << Line << "\n";
+ }
+};
+
+class CTagsEmitter {
+private:
+ RecordKeeper &Records;
+public:
+ CTagsEmitter(RecordKeeper &R) : Records(R) {}
+
+ void run(raw_ostream &OS);
+
+private:
+ static SMLoc locate(const Record *R);
+};
+
+} // End anonymous namespace.
+
+SMLoc CTagsEmitter::locate(const Record *R) {
+ ArrayRef<SMLoc> Locs = R->getLoc();
+ return !Locs.empty() ? Locs.front() : SMLoc();
+}
+
+void CTagsEmitter::run(raw_ostream &OS) {
+ const auto &Classes = Records.getClasses();
+ const auto &Defs = Records.getDefs();
+ std::vector<Tag> Tags;
+ // Collect tags.
+ Tags.reserve(Classes.size() + Defs.size());
+ for (const auto &C : Classes) {
+ Tags.push_back(Tag(C.first, locate(C.second.get())));
+ for (SMLoc FwdLoc : C.second->getForwardDeclarationLocs())
+ Tags.push_back(Tag(C.first, FwdLoc));
+ }
+ for (const auto &D : Defs)
+ Tags.push_back(Tag(D.first, locate(D.second.get())));
+ // Emit tags.
+ llvm::sort(Tags);
+ OS << "!_TAG_FILE_FORMAT\t1\t/original ctags format/\n";
+ OS << "!_TAG_FILE_SORTED\t1\t/0=unsorted, 1=sorted, 2=foldcase/\n";
+ for (const Tag &T : Tags)
+ T.emit(OS);
+}
+
+namespace llvm {
+
+void EmitCTags(RecordKeeper &RK, raw_ostream &OS) { CTagsEmitter(RK).run(OS); }
+
+} // End llvm namespace.
diff --git a/contrib/libs/llvm16/utils/TableGen/CallingConvEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/CallingConvEmitter.cpp
new file mode 100644
index 0000000000..e8ec90e9c0
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/CallingConvEmitter.cpp
@@ -0,0 +1,436 @@
+//===- CallingConvEmitter.cpp - Generate calling conventions --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend is responsible for emitting descriptions of the calling
+// conventions supported by this target.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenTarget.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+using namespace llvm;
+
+namespace {
+class CallingConvEmitter {
+ RecordKeeper &Records;
+ unsigned Counter;
+ std::string CurrentAction;
+ bool SwiftAction;
+
+ std::map<std::string, std::set<std::string>> AssignedRegsMap;
+ std::map<std::string, std::set<std::string>> AssignedSwiftRegsMap;
+ std::map<std::string, std::set<std::string>> DelegateToMap;
+
+public:
+ explicit CallingConvEmitter(RecordKeeper &R) : Records(R) {}
+
+ void run(raw_ostream &o);
+
+private:
+ void EmitCallingConv(Record *CC, raw_ostream &O);
+ void EmitAction(Record *Action, unsigned Indent, raw_ostream &O);
+ void EmitArgRegisterLists(raw_ostream &O);
+};
+} // End anonymous namespace
+
+void CallingConvEmitter::run(raw_ostream &O) {
+ std::vector<Record*> CCs = Records.getAllDerivedDefinitions("CallingConv");
+
+ // Emit prototypes for all of the non-custom CC's so that they can forward ref
+ // each other.
+ Records.startTimer("Emit prototypes");
+ O << "#ifndef GET_CC_REGISTER_LISTS\n\n";
+ for (Record *CC : CCs) {
+ if (!CC->getValueAsBit("Custom")) {
+ unsigned Pad = CC->getName().size();
+ if (CC->getValueAsBit("Entry")) {
+ O << "bool llvm::";
+ Pad += 12;
+ } else {
+ O << "static bool ";
+ Pad += 13;
+ }
+ O << CC->getName() << "(unsigned ValNo, MVT ValVT,\n"
+ << std::string(Pad, ' ') << "MVT LocVT, CCValAssign::LocInfo LocInfo,\n"
+ << std::string(Pad, ' ')
+ << "ISD::ArgFlagsTy ArgFlags, CCState &State);\n";
+ }
+ }
+
+ // Emit each non-custom calling convention description in full.
+ Records.startTimer("Emit full descriptions");
+ for (Record *CC : CCs) {
+ if (!CC->getValueAsBit("Custom")) {
+ EmitCallingConv(CC, O);
+ }
+ }
+
+ EmitArgRegisterLists(O);
+
+ O << "\n#endif // CC_REGISTER_LIST\n";
+}
+
+void CallingConvEmitter::EmitCallingConv(Record *CC, raw_ostream &O) {
+ ListInit *CCActions = CC->getValueAsListInit("Actions");
+ Counter = 0;
+
+ CurrentAction = CC->getName().str();
+ // Call upon the creation of a map entry from the void!
+ // We want an entry in AssignedRegsMap for every action, even if that
+ // entry is empty.
+ AssignedRegsMap[CurrentAction] = {};
+
+ O << "\n\n";
+ unsigned Pad = CurrentAction.size();
+ if (CC->getValueAsBit("Entry")) {
+ O << "bool llvm::";
+ Pad += 12;
+ } else {
+ O << "static bool ";
+ Pad += 13;
+ }
+ O << CurrentAction << "(unsigned ValNo, MVT ValVT,\n"
+ << std::string(Pad, ' ') << "MVT LocVT, CCValAssign::LocInfo LocInfo,\n"
+ << std::string(Pad, ' ') << "ISD::ArgFlagsTy ArgFlags, CCState &State) {\n";
+ // Emit all of the actions, in order.
+ for (unsigned i = 0, e = CCActions->size(); i != e; ++i) {
+ Record *Action = CCActions->getElementAsRecord(i);
+ SwiftAction = llvm::any_of(Action->getSuperClasses(),
+ [](const std::pair<Record *, SMRange> &Class) {
+ std::string Name =
+ Class.first->getNameInitAsString();
+ return StringRef(Name).startswith("CCIfSwift");
+ });
+
+ O << "\n";
+ EmitAction(Action, 2, O);
+ }
+
+ O << "\n return true; // CC didn't match.\n";
+ O << "}\n";
+}
+
+void CallingConvEmitter::EmitAction(Record *Action,
+ unsigned Indent, raw_ostream &O) {
+ std::string IndentStr = std::string(Indent, ' ');
+
+ if (Action->isSubClassOf("CCPredicateAction")) {
+ O << IndentStr << "if (";
+
+ if (Action->isSubClassOf("CCIfType")) {
+ ListInit *VTs = Action->getValueAsListInit("VTs");
+ for (unsigned i = 0, e = VTs->size(); i != e; ++i) {
+ Record *VT = VTs->getElementAsRecord(i);
+ if (i != 0) O << " ||\n " << IndentStr;
+ O << "LocVT == " << getEnumName(getValueType(VT));
+ }
+
+ } else if (Action->isSubClassOf("CCIf")) {
+ O << Action->getValueAsString("Predicate");
+ } else {
+ errs() << *Action;
+ PrintFatalError(Action->getLoc(), "Unknown CCPredicateAction!");
+ }
+
+ O << ") {\n";
+ EmitAction(Action->getValueAsDef("SubAction"), Indent+2, O);
+ O << IndentStr << "}\n";
+ } else {
+ if (Action->isSubClassOf("CCDelegateTo")) {
+ Record *CC = Action->getValueAsDef("CC");
+ O << IndentStr << "if (!" << CC->getName()
+ << "(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))\n"
+ << IndentStr << " return false;\n";
+ DelegateToMap[CurrentAction].insert(CC->getName().str());
+ } else if (Action->isSubClassOf("CCAssignToReg") ||
+ Action->isSubClassOf("CCAssignToRegAndStack")) {
+ ListInit *RegList = Action->getValueAsListInit("RegList");
+ if (RegList->size() == 1) {
+ std::string Name = getQualifiedName(RegList->getElementAsRecord(0));
+ O << IndentStr << "if (unsigned Reg = State.AllocateReg(" << Name
+ << ")) {\n";
+ if (SwiftAction)
+ AssignedSwiftRegsMap[CurrentAction].insert(Name);
+ else
+ AssignedRegsMap[CurrentAction].insert(Name);
+ } else {
+ O << IndentStr << "static const MCPhysReg RegList" << ++Counter
+ << "[] = {\n";
+ O << IndentStr << " ";
+ ListSeparator LS;
+ for (unsigned i = 0, e = RegList->size(); i != e; ++i) {
+ std::string Name = getQualifiedName(RegList->getElementAsRecord(i));
+ if (SwiftAction)
+ AssignedSwiftRegsMap[CurrentAction].insert(Name);
+ else
+ AssignedRegsMap[CurrentAction].insert(Name);
+ O << LS << Name;
+ }
+ O << "\n" << IndentStr << "};\n";
+ O << IndentStr << "if (unsigned Reg = State.AllocateReg(RegList"
+ << Counter << ")) {\n";
+ }
+ O << IndentStr << " State.addLoc(CCValAssign::getReg(ValNo, ValVT, "
+ << "Reg, LocVT, LocInfo));\n";
+ if (Action->isSubClassOf("CCAssignToRegAndStack")) {
+ int Size = Action->getValueAsInt("Size");
+ int Align = Action->getValueAsInt("Align");
+ O << IndentStr << " (void)State.AllocateStack(";
+ if (Size)
+ O << Size << ", ";
+ else
+ O << "\n"
+ << IndentStr
+ << " State.getMachineFunction().getDataLayout()."
+ "getTypeAllocSize(EVT(LocVT).getTypeForEVT(State.getContext())),"
+ " ";
+ if (Align)
+ O << "Align(" << Align << ")";
+ else
+ O << "\n"
+ << IndentStr
+ << " State.getMachineFunction().getDataLayout()."
+ "getABITypeAlign(EVT(LocVT).getTypeForEVT(State.getContext()"
+ "))";
+ O << ");\n";
+ }
+ O << IndentStr << " return false;\n";
+ O << IndentStr << "}\n";
+ } else if (Action->isSubClassOf("CCAssignToRegWithShadow")) {
+ ListInit *RegList = Action->getValueAsListInit("RegList");
+ ListInit *ShadowRegList = Action->getValueAsListInit("ShadowRegList");
+ if (!ShadowRegList->empty() && ShadowRegList->size() != RegList->size())
+ PrintFatalError(Action->getLoc(),
+ "Invalid length of list of shadowed registers");
+
+ if (RegList->size() == 1) {
+ O << IndentStr << "if (unsigned Reg = State.AllocateReg(";
+ O << getQualifiedName(RegList->getElementAsRecord(0));
+ O << ", " << getQualifiedName(ShadowRegList->getElementAsRecord(0));
+ O << ")) {\n";
+ } else {
+ unsigned RegListNumber = ++Counter;
+ unsigned ShadowRegListNumber = ++Counter;
+
+ O << IndentStr << "static const MCPhysReg RegList" << RegListNumber
+ << "[] = {\n";
+ O << IndentStr << " ";
+ ListSeparator LS;
+ for (unsigned i = 0, e = RegList->size(); i != e; ++i)
+ O << LS << getQualifiedName(RegList->getElementAsRecord(i));
+ O << "\n" << IndentStr << "};\n";
+
+ O << IndentStr << "static const MCPhysReg RegList"
+ << ShadowRegListNumber << "[] = {\n";
+ O << IndentStr << " ";
+ ListSeparator LSS;
+ for (unsigned i = 0, e = ShadowRegList->size(); i != e; ++i)
+ O << LSS << getQualifiedName(ShadowRegList->getElementAsRecord(i));
+ O << "\n" << IndentStr << "};\n";
+
+ O << IndentStr << "if (unsigned Reg = State.AllocateReg(RegList"
+ << RegListNumber << ", " << "RegList" << ShadowRegListNumber
+ << ")) {\n";
+ }
+ O << IndentStr << " State.addLoc(CCValAssign::getReg(ValNo, ValVT, "
+ << "Reg, LocVT, LocInfo));\n";
+ O << IndentStr << " return false;\n";
+ O << IndentStr << "}\n";
+ } else if (Action->isSubClassOf("CCAssignToStack")) {
+ int Size = Action->getValueAsInt("Size");
+ int Align = Action->getValueAsInt("Align");
+
+ O << IndentStr << "unsigned Offset" << ++Counter
+ << " = State.AllocateStack(";
+ if (Size)
+ O << Size << ", ";
+ else
+ O << "\n" << IndentStr
+ << " State.getMachineFunction().getDataLayout()."
+ "getTypeAllocSize(EVT(LocVT).getTypeForEVT(State.getContext())),"
+ " ";
+ if (Align)
+ O << "Align(" << Align << ")";
+ else
+ O << "\n"
+ << IndentStr
+ << " State.getMachineFunction().getDataLayout()."
+ "getABITypeAlign(EVT(LocVT).getTypeForEVT(State.getContext()"
+ "))";
+ O << ");\n" << IndentStr
+ << "State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset"
+ << Counter << ", LocVT, LocInfo));\n";
+ O << IndentStr << "return false;\n";
+ } else if (Action->isSubClassOf("CCAssignToStackWithShadow")) {
+ int Size = Action->getValueAsInt("Size");
+ int Align = Action->getValueAsInt("Align");
+ ListInit *ShadowRegList = Action->getValueAsListInit("ShadowRegList");
+
+ unsigned ShadowRegListNumber = ++Counter;
+
+ O << IndentStr << "static const MCPhysReg ShadowRegList"
+ << ShadowRegListNumber << "[] = {\n";
+ O << IndentStr << " ";
+ ListSeparator LS;
+ for (unsigned i = 0, e = ShadowRegList->size(); i != e; ++i)
+ O << LS << getQualifiedName(ShadowRegList->getElementAsRecord(i));
+ O << "\n" << IndentStr << "};\n";
+
+ O << IndentStr << "unsigned Offset" << ++Counter
+ << " = State.AllocateStack(" << Size << ", Align(" << Align << "), "
+ << "ShadowRegList" << ShadowRegListNumber << ");\n";
+ O << IndentStr << "State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset"
+ << Counter << ", LocVT, LocInfo));\n";
+ O << IndentStr << "return false;\n";
+ } else if (Action->isSubClassOf("CCPromoteToType")) {
+ Record *DestTy = Action->getValueAsDef("DestTy");
+ MVT::SimpleValueType DestVT = getValueType(DestTy);
+ O << IndentStr << "LocVT = " << getEnumName(DestVT) <<";\n";
+ if (MVT(DestVT).isFloatingPoint()) {
+ O << IndentStr << "LocInfo = CCValAssign::FPExt;\n";
+ } else {
+ O << IndentStr << "if (ArgFlags.isSExt())\n"
+ << IndentStr << " LocInfo = CCValAssign::SExt;\n"
+ << IndentStr << "else if (ArgFlags.isZExt())\n"
+ << IndentStr << " LocInfo = CCValAssign::ZExt;\n"
+ << IndentStr << "else\n"
+ << IndentStr << " LocInfo = CCValAssign::AExt;\n";
+ }
+ } else if (Action->isSubClassOf("CCPromoteToUpperBitsInType")) {
+ Record *DestTy = Action->getValueAsDef("DestTy");
+ MVT::SimpleValueType DestVT = getValueType(DestTy);
+ O << IndentStr << "LocVT = " << getEnumName(DestVT) << ";\n";
+ if (MVT(DestVT).isFloatingPoint()) {
+ PrintFatalError(Action->getLoc(),
+ "CCPromoteToUpperBitsInType does not handle floating "
+ "point");
+ } else {
+ O << IndentStr << "if (ArgFlags.isSExt())\n"
+ << IndentStr << " LocInfo = CCValAssign::SExtUpper;\n"
+ << IndentStr << "else if (ArgFlags.isZExt())\n"
+ << IndentStr << " LocInfo = CCValAssign::ZExtUpper;\n"
+ << IndentStr << "else\n"
+ << IndentStr << " LocInfo = CCValAssign::AExtUpper;\n";
+ }
+ } else if (Action->isSubClassOf("CCBitConvertToType")) {
+ Record *DestTy = Action->getValueAsDef("DestTy");
+ O << IndentStr << "LocVT = " << getEnumName(getValueType(DestTy)) <<";\n";
+ O << IndentStr << "LocInfo = CCValAssign::BCvt;\n";
+ } else if (Action->isSubClassOf("CCTruncToType")) {
+ Record *DestTy = Action->getValueAsDef("DestTy");
+ O << IndentStr << "LocVT = " << getEnumName(getValueType(DestTy)) <<";\n";
+ O << IndentStr << "LocInfo = CCValAssign::Trunc;\n";
+ } else if (Action->isSubClassOf("CCPassIndirect")) {
+ Record *DestTy = Action->getValueAsDef("DestTy");
+ O << IndentStr << "LocVT = " << getEnumName(getValueType(DestTy)) <<";\n";
+ O << IndentStr << "LocInfo = CCValAssign::Indirect;\n";
+ } else if (Action->isSubClassOf("CCPassByVal")) {
+ int Size = Action->getValueAsInt("Size");
+ int Align = Action->getValueAsInt("Align");
+ O << IndentStr << "State.HandleByVal(ValNo, ValVT, LocVT, LocInfo, "
+ << Size << ", Align(" << Align << "), ArgFlags);\n";
+ O << IndentStr << "return false;\n";
+ } else if (Action->isSubClassOf("CCCustom")) {
+ O << IndentStr
+ << "if (" << Action->getValueAsString("FuncName") << "(ValNo, ValVT, "
+ << "LocVT, LocInfo, ArgFlags, State))\n";
+ O << IndentStr << " return false;\n";
+ } else {
+ errs() << *Action;
+ PrintFatalError(Action->getLoc(), "Unknown CCAction!");
+ }
+ }
+}
+
+void CallingConvEmitter::EmitArgRegisterLists(raw_ostream &O) {
+ // Transitively merge all delegated CCs into AssignedRegsMap.
+ using EntryTy = std::pair<std::string, std::set<std::string>>;
+ bool Redo;
+ do {
+ Redo = false;
+ std::deque<EntryTy> Worklist(DelegateToMap.begin(), DelegateToMap.end());
+
+ while (!Worklist.empty()) {
+ EntryTy Entry = Worklist.front();
+ Worklist.pop_front();
+
+ const std::string &CCName = Entry.first;
+ std::set<std::string> &Registers = Entry.second;
+ if (!Registers.empty())
+ continue;
+
+ for (auto &InnerEntry : Worklist) {
+ const std::string &InnerCCName = InnerEntry.first;
+ std::set<std::string> &InnerRegisters = InnerEntry.second;
+
+ if (InnerRegisters.find(CCName) != InnerRegisters.end()) {
+ AssignedRegsMap[InnerCCName].insert(
+ AssignedRegsMap[CCName].begin(),
+ AssignedRegsMap[CCName].end());
+ InnerRegisters.erase(CCName);
+ }
+ }
+
+ DelegateToMap.erase(CCName);
+ Redo = true;
+ }
+ } while (Redo);
+
+ if (AssignedRegsMap.empty())
+ return;
+
+ O << "\n#else\n\n";
+
+ for (auto &Entry : AssignedRegsMap) {
+ const std::string &RegName = Entry.first;
+ std::set<std::string> &Registers = Entry.second;
+
+ if (RegName.empty())
+ continue;
+
+ O << "const MCRegister " << Entry.first << "_ArgRegs[] = { ";
+
+ if (Registers.empty()) {
+ O << "0";
+ } else {
+ ListSeparator LS;
+ for (const std::string &Reg : Registers)
+ O << LS << Reg;
+ }
+
+ O << " };\n";
+ }
+
+ if (AssignedSwiftRegsMap.empty())
+ return;
+
+ O << "\n// Registers used by Swift.\n";
+ for (auto &Entry : AssignedSwiftRegsMap) {
+ const std::string &RegName = Entry.first;
+ std::set<std::string> &Registers = Entry.second;
+
+ O << "const MCRegister " << RegName << "_Swift_ArgRegs[] = { ";
+
+ ListSeparator LS;
+ for (const std::string &Reg : Registers)
+ O << LS << Reg;
+
+ O << " };\n";
+ }
+}
+
+namespace llvm {
+
+void EmitCallingConv(RecordKeeper &RK, raw_ostream &OS) {
+ emitSourceFileHeader("Calling Convention Implementation Fragment", OS);
+ CallingConvEmitter(RK).run(OS);
+}
+
+} // End llvm namespace
diff --git a/contrib/libs/llvm16/utils/TableGen/CodeEmitterGen.cpp b/contrib/libs/llvm16/utils/TableGen/CodeEmitterGen.cpp
new file mode 100644
index 0000000000..dc4fd589ea
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/CodeEmitterGen.cpp
@@ -0,0 +1,574 @@
+//===- CodeEmitterGen.cpp - Code Emitter Generator ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// CodeEmitterGen uses the descriptions of instructions and their fields to
+// construct an automated code emitter: a function that, given a MachineInstr,
+// returns the (currently, 32-bit unsigned) value of the instruction.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenInstruction.h"
+#include "CodeGenTarget.h"
+#include "SubtargetFeatureInfo.h"
+#include "Types.h"
+#include "VarLenCodeEmitterGen.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include <cstdint>
+#include <map>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+using namespace llvm;
+
+namespace {
+
+class CodeEmitterGen {
+ RecordKeeper &Records;
+
+public:
+ CodeEmitterGen(RecordKeeper &R) : Records(R) {}
+
+ void run(raw_ostream &o);
+
+private:
+ int getVariableBit(const std::string &VarName, BitsInit *BI, int bit);
+ std::string getInstructionCase(Record *R, CodeGenTarget &Target);
+ std::string getInstructionCaseForEncoding(Record *R, Record *EncodingDef,
+ CodeGenTarget &Target);
+ bool addCodeToMergeInOperand(Record *R, BitsInit *BI,
+ const std::string &VarName, unsigned &NumberedOp,
+ std::set<unsigned> &NamedOpIndices,
+ std::string &Case, CodeGenTarget &Target);
+
+ void emitInstructionBaseValues(
+ raw_ostream &o, ArrayRef<const CodeGenInstruction *> NumberedInstructions,
+ CodeGenTarget &Target, int HwMode = -1);
+ unsigned BitWidth;
+ bool UseAPInt;
+};
+
+// If the VarBitInit at position 'bit' matches the specified variable then
+// return the variable bit position. Otherwise return -1.
+int CodeEmitterGen::getVariableBit(const std::string &VarName,
+ BitsInit *BI, int bit) {
+ if (VarBitInit *VBI = dyn_cast<VarBitInit>(BI->getBit(bit))) {
+ if (VarInit *VI = dyn_cast<VarInit>(VBI->getBitVar()))
+ if (VI->getName() == VarName)
+ return VBI->getBitNum();
+ } else if (VarInit *VI = dyn_cast<VarInit>(BI->getBit(bit))) {
+ if (VI->getName() == VarName)
+ return 0;
+ }
+
+ return -1;
+}
+
+// Returns true if it succeeds, false if an error.
+bool CodeEmitterGen::addCodeToMergeInOperand(Record *R, BitsInit *BI,
+ const std::string &VarName,
+ unsigned &NumberedOp,
+ std::set<unsigned> &NamedOpIndices,
+ std::string &Case,
+ CodeGenTarget &Target) {
+ CodeGenInstruction &CGI = Target.getInstruction(R);
+
+ // Determine if VarName actually contributes to the Inst encoding.
+ int bit = BI->getNumBits()-1;
+
+ // Scan for a bit that this contributed to.
+ for (; bit >= 0; ) {
+ if (getVariableBit(VarName, BI, bit) != -1)
+ break;
+
+ --bit;
+ }
+
+ // If we found no bits, ignore this value, otherwise emit the call to get the
+ // operand encoding.
+ if (bit < 0)
+ return true;
+
+ // If the operand matches by name, reference according to that
+ // operand number. Non-matching operands are assumed to be in
+ // order.
+ unsigned OpIdx;
+ std::pair<unsigned, unsigned> SubOp;
+ if (CGI.Operands.hasSubOperandAlias(VarName, SubOp)) {
+ OpIdx = CGI.Operands[SubOp.first].MIOperandNo + SubOp.second;
+ } else if (CGI.Operands.hasOperandNamed(VarName, OpIdx)) {
+ // Get the machine operand number for the indicated operand.
+ OpIdx = CGI.Operands[OpIdx].MIOperandNo;
+ } else {
+ // Fall back to positional lookup. By default, we now disable positional
+ // lookup (and print an error, below), but even so, we'll do the lookup to
+ // help print a helpful diagnostic message.
+ //
+ // TODO: When we remove useDeprecatedPositionallyEncodedOperands, delete all
+ // this code, just leaving a "no operand named X in record Y" error.
+
+ unsigned NumberOps = CGI.Operands.size();
+ /// If this operand is not supposed to be emitted by the
+ /// generated emitter, skip it.
+ while (NumberedOp < NumberOps &&
+ (CGI.Operands.isFlatOperandNotEmitted(NumberedOp) ||
+ (!NamedOpIndices.empty() && NamedOpIndices.count(
+ CGI.Operands.getSubOperandNumber(NumberedOp).first)))) {
+ ++NumberedOp;
+ }
+
+ if (NumberedOp >=
+ CGI.Operands.back().MIOperandNo + CGI.Operands.back().MINumOperands) {
+ if (!Target.getInstructionSet()->getValueAsBit(
+ "useDeprecatedPositionallyEncodedOperands")) {
+ PrintError(R, Twine("No operand named ") + VarName + " in record " +
+ R->getName() +
+ " (would've given 'too few operands' error with "
+ "useDeprecatedPositionallyEncodedOperands=true)");
+ } else {
+ PrintError(R, "Too few operands in record " + R->getName() +
+ " (no match for variable " + VarName + ")");
+ }
+ return false;
+ }
+
+ OpIdx = NumberedOp++;
+
+ if (!Target.getInstructionSet()->getValueAsBit(
+ "useDeprecatedPositionallyEncodedOperands")) {
+ std::pair<unsigned, unsigned> SO =
+ CGI.Operands.getSubOperandNumber(OpIdx);
+ std::string OpName = CGI.Operands[SO.first].Name;
+ PrintError(R, Twine("No operand named ") + VarName + " in record " +
+ R->getName() + " (would've used positional operand #" +
+ Twine(SO.first) + " ('" + OpName + "') sub-op #" +
+ Twine(SO.second) +
+ " with useDeprecatedPositionallyEncodedOperands=true)");
+ return false;
+ }
+ }
+
+ if (CGI.Operands.isFlatOperandNotEmitted(OpIdx)) {
+ PrintError(R, "Operand " + VarName + " used but also marked as not emitted!");
+ return false;
+ }
+
+ std::pair<unsigned, unsigned> SO = CGI.Operands.getSubOperandNumber(OpIdx);
+ std::string &EncoderMethodName =
+ CGI.Operands[SO.first].EncoderMethodNames[SO.second];
+
+ if (UseAPInt)
+ Case += " op.clearAllBits();\n";
+
+ Case += " // op: " + VarName + "\n";
+
+ // If the source operand has a custom encoder, use it.
+ if (!EncoderMethodName.empty()) {
+ if (UseAPInt) {
+ Case += " " + EncoderMethodName + "(MI, " + utostr(OpIdx);
+ Case += ", op";
+ } else {
+ Case += " op = " + EncoderMethodName + "(MI, " + utostr(OpIdx);
+ }
+ Case += ", Fixups, STI);\n";
+ } else {
+ if (UseAPInt) {
+ Case += " getMachineOpValue(MI, MI.getOperand(" + utostr(OpIdx) + ")";
+ Case += ", op, Fixups, STI";
+ } else {
+ Case += " op = getMachineOpValue(MI, MI.getOperand(" + utostr(OpIdx) + ")";
+ Case += ", Fixups, STI";
+ }
+ Case += ");\n";
+ }
+
+ // Precalculate the number of lits this variable contributes to in the
+ // operand. If there is a single lit (consecutive range of bits) we can use a
+ // destructive sequence on APInt that reduces memory allocations.
+ int numOperandLits = 0;
+ for (int tmpBit = bit; tmpBit >= 0;) {
+ int varBit = getVariableBit(VarName, BI, tmpBit);
+
+ // If this bit isn't from a variable, skip it.
+ if (varBit == -1) {
+ --tmpBit;
+ continue;
+ }
+
+ // Figure out the consecutive range of bits covered by this operand, in
+ // order to generate better encoding code.
+ int beginVarBit = varBit;
+ int N = 1;
+ for (--tmpBit; tmpBit >= 0;) {
+ varBit = getVariableBit(VarName, BI, tmpBit);
+ if (varBit == -1 || varBit != (beginVarBit - N))
+ break;
+ ++N;
+ --tmpBit;
+ }
+ ++numOperandLits;
+ }
+
+ for (; bit >= 0; ) {
+ int varBit = getVariableBit(VarName, BI, bit);
+
+ // If this bit isn't from a variable, skip it.
+ if (varBit == -1) {
+ --bit;
+ continue;
+ }
+
+ // Figure out the consecutive range of bits covered by this operand, in
+ // order to generate better encoding code.
+ int beginInstBit = bit;
+ int beginVarBit = varBit;
+ int N = 1;
+ for (--bit; bit >= 0;) {
+ varBit = getVariableBit(VarName, BI, bit);
+ if (varBit == -1 || varBit != (beginVarBit - N)) break;
+ ++N;
+ --bit;
+ }
+
+ std::string maskStr;
+ int opShift;
+
+ unsigned loBit = beginVarBit - N + 1;
+ unsigned hiBit = loBit + N;
+ unsigned loInstBit = beginInstBit - N + 1;
+ if (UseAPInt) {
+ std::string extractStr;
+ if (N >= 64) {
+ extractStr = "op.extractBits(" + itostr(hiBit - loBit) + ", " +
+ itostr(loBit) + ")";
+ Case += " Value.insertBits(" + extractStr + ", " +
+ itostr(loInstBit) + ");\n";
+ } else {
+ extractStr = "op.extractBitsAsZExtValue(" + itostr(hiBit - loBit) +
+ ", " + itostr(loBit) + ")";
+ Case += " Value.insertBits(" + extractStr + ", " +
+ itostr(loInstBit) + ", " + itostr(hiBit - loBit) + ");\n";
+ }
+ } else {
+ uint64_t opMask = ~(uint64_t)0 >> (64 - N);
+ opShift = beginVarBit - N + 1;
+ opMask <<= opShift;
+ maskStr = "UINT64_C(" + utostr(opMask) + ")";
+ opShift = beginInstBit - beginVarBit;
+
+ if (numOperandLits == 1) {
+ Case += " op &= " + maskStr + ";\n";
+ if (opShift > 0) {
+ Case += " op <<= " + itostr(opShift) + ";\n";
+ } else if (opShift < 0) {
+ Case += " op >>= " + itostr(-opShift) + ";\n";
+ }
+ Case += " Value |= op;\n";
+ } else {
+ if (opShift > 0) {
+ Case += " Value |= (op & " + maskStr + ") << " +
+ itostr(opShift) + ";\n";
+ } else if (opShift < 0) {
+ Case += " Value |= (op & " + maskStr + ") >> " +
+ itostr(-opShift) + ";\n";
+ } else {
+ Case += " Value |= (op & " + maskStr + ");\n";
+ }
+ }
+ }
+ }
+ return true;
+}
+
+std::string CodeEmitterGen::getInstructionCase(Record *R,
+ CodeGenTarget &Target) {
+ std::string Case;
+ if (const RecordVal *RV = R->getValue("EncodingInfos")) {
+ if (auto *DI = dyn_cast_or_null<DefInit>(RV->getValue())) {
+ const CodeGenHwModes &HWM = Target.getHwModes();
+ EncodingInfoByHwMode EBM(DI->getDef(), HWM);
+ Case += " switch (HwMode) {\n";
+ Case += " default: llvm_unreachable(\"Unhandled HwMode\");\n";
+ for (auto &KV : EBM) {
+ Case += " case " + itostr(KV.first) + ": {\n";
+ Case += getInstructionCaseForEncoding(R, KV.second, Target);
+ Case += " break;\n";
+ Case += " }\n";
+ }
+ Case += " }\n";
+ return Case;
+ }
+ }
+ return getInstructionCaseForEncoding(R, R, Target);
+}
+
+std::string CodeEmitterGen::getInstructionCaseForEncoding(Record *R, Record *EncodingDef,
+ CodeGenTarget &Target) {
+ std::string Case;
+ BitsInit *BI = EncodingDef->getValueAsBitsInit("Inst");
+ unsigned NumberedOp = 0;
+ std::set<unsigned> NamedOpIndices;
+
+ // Collect the set of operand indices that might correspond to named
+ // operand, and skip these when assigning operands based on position.
+ if (Target.getInstructionSet()->
+ getValueAsBit("noNamedPositionallyEncodedOperands")) {
+ CodeGenInstruction &CGI = Target.getInstruction(R);
+ for (const RecordVal &RV : R->getValues()) {
+ unsigned OpIdx;
+ if (!CGI.Operands.hasOperandNamed(RV.getName(), OpIdx))
+ continue;
+
+ NamedOpIndices.insert(OpIdx);
+ }
+ }
+
+ // Loop over all of the fields in the instruction, determining which are the
+ // operands to the instruction.
+ bool Success = true;
+ for (const RecordVal &RV : EncodingDef->getValues()) {
+ // Ignore fixed fields in the record, we're looking for values like:
+ // bits<5> RST = { ?, ?, ?, ?, ? };
+ if (RV.isNonconcreteOK() || RV.getValue()->isComplete())
+ continue;
+
+ Success &=
+ addCodeToMergeInOperand(R, BI, std::string(RV.getName()), NumberedOp,
+ NamedOpIndices, Case, Target);
+ }
+
+ if (!Success) {
+ // Dump the record, so we can see what's going on...
+ std::string E;
+ raw_string_ostream S(E);
+ S << "Dumping record for previous error:\n";
+ S << *R;
+ PrintNote(E);
+ }
+
+ StringRef PostEmitter = R->getValueAsString("PostEncoderMethod");
+ if (!PostEmitter.empty()) {
+ Case += " Value = ";
+ Case += PostEmitter;
+ Case += "(MI, Value";
+ Case += ", STI";
+ Case += ");\n";
+ }
+
+ return Case;
+}
+
+static void emitInstBits(raw_ostream &OS, const APInt &Bits) {
+ for (unsigned I = 0; I < Bits.getNumWords(); ++I)
+ OS << ((I > 0) ? ", " : "") << "UINT64_C(" << utostr(Bits.getRawData()[I])
+ << ")";
+}
+
+void CodeEmitterGen::emitInstructionBaseValues(
+ raw_ostream &o, ArrayRef<const CodeGenInstruction *> NumberedInstructions,
+ CodeGenTarget &Target, int HwMode) {
+ const CodeGenHwModes &HWM = Target.getHwModes();
+ if (HwMode == -1)
+ o << " static const uint64_t InstBits[] = {\n";
+ else
+ o << " static const uint64_t InstBits_" << HWM.getMode(HwMode).Name
+ << "[] = {\n";
+
+ for (const CodeGenInstruction *CGI : NumberedInstructions) {
+ Record *R = CGI->TheDef;
+
+ if (R->getValueAsString("Namespace") == "TargetOpcode" ||
+ R->getValueAsBit("isPseudo")) {
+ o << " "; emitInstBits(o, APInt(BitWidth, 0)); o << ",\n";
+ continue;
+ }
+
+ Record *EncodingDef = R;
+ if (const RecordVal *RV = R->getValue("EncodingInfos")) {
+ if (auto *DI = dyn_cast_or_null<DefInit>(RV->getValue())) {
+ EncodingInfoByHwMode EBM(DI->getDef(), HWM);
+ if (EBM.hasMode(HwMode))
+ EncodingDef = EBM.get(HwMode);
+ }
+ }
+ BitsInit *BI = EncodingDef->getValueAsBitsInit("Inst");
+
+ // Start by filling in fixed values.
+ APInt Value(BitWidth, 0);
+ for (unsigned i = 0, e = BI->getNumBits(); i != e; ++i) {
+ if (auto *B = dyn_cast<BitInit>(BI->getBit(i)); B && B->getValue())
+ Value.setBit(i);
+ }
+ o << " ";
+ emitInstBits(o, Value);
+ o << "," << '\t' << "// " << R->getName() << "\n";
+ }
+ o << " UINT64_C(0)\n };\n";
+}
+
+void CodeEmitterGen::run(raw_ostream &o) {
+ CodeGenTarget Target(Records);
+ std::vector<Record*> Insts = Records.getAllDerivedDefinitions("Instruction");
+
+ // For little-endian instruction bit encodings, reverse the bit order
+ Target.reverseBitsForLittleEndianEncoding();
+
+ ArrayRef<const CodeGenInstruction*> NumberedInstructions =
+ Target.getInstructionsByEnumValue();
+
+ if (any_of(NumberedInstructions, [](const CodeGenInstruction *CGI) {
+ Record *R = CGI->TheDef;
+ return R->getValue("Inst") && isa<DagInit>(R->getValueInit("Inst"));
+ })) {
+ emitVarLenCodeEmitter(Records, o);
+ } else {
+ const CodeGenHwModes &HWM = Target.getHwModes();
+ // The set of HwModes used by instruction encodings.
+ std::set<unsigned> HwModes;
+ BitWidth = 0;
+ for (const CodeGenInstruction *CGI : NumberedInstructions) {
+ Record *R = CGI->TheDef;
+ if (R->getValueAsString("Namespace") == "TargetOpcode" ||
+ R->getValueAsBit("isPseudo"))
+ continue;
+
+ if (const RecordVal *RV = R->getValue("EncodingInfos")) {
+ if (DefInit *DI = dyn_cast_or_null<DefInit>(RV->getValue())) {
+ EncodingInfoByHwMode EBM(DI->getDef(), HWM);
+ for (auto &KV : EBM) {
+ BitsInit *BI = KV.second->getValueAsBitsInit("Inst");
+ BitWidth = std::max(BitWidth, BI->getNumBits());
+ HwModes.insert(KV.first);
+ }
+ continue;
+ }
+ }
+ BitsInit *BI = R->getValueAsBitsInit("Inst");
+ BitWidth = std::max(BitWidth, BI->getNumBits());
+ }
+ UseAPInt = BitWidth > 64;
+
+ // Emit function declaration
+ if (UseAPInt) {
+ o << "void " << Target.getName()
+ << "MCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,\n"
+ << " SmallVectorImpl<MCFixup> &Fixups,\n"
+ << " APInt &Inst,\n"
+ << " APInt &Scratch,\n"
+ << " const MCSubtargetInfo &STI) const {\n";
+ } else {
+ o << "uint64_t " << Target.getName();
+ o << "MCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,\n"
+ << " SmallVectorImpl<MCFixup> &Fixups,\n"
+ << " const MCSubtargetInfo &STI) const {\n";
+ }
+
+ // Emit instruction base values
+ if (HwModes.empty()) {
+ emitInstructionBaseValues(o, NumberedInstructions, Target, -1);
+ } else {
+ for (unsigned HwMode : HwModes)
+ emitInstructionBaseValues(o, NumberedInstructions, Target, (int)HwMode);
+ }
+
+ if (!HwModes.empty()) {
+ o << " const uint64_t *InstBits;\n";
+ o << " unsigned HwMode = STI.getHwMode();\n";
+ o << " switch (HwMode) {\n";
+ o << " default: llvm_unreachable(\"Unknown hardware mode!\"); break;\n";
+ for (unsigned I : HwModes) {
+ o << " case " << I << ": InstBits = InstBits_" << HWM.getMode(I).Name
+ << "; break;\n";
+ }
+ o << " };\n";
+ }
+
+ // Map to accumulate all the cases.
+ std::map<std::string, std::vector<std::string>> CaseMap;
+
+ // Construct all cases statement for each opcode
+ for (Record *R : Insts) {
+ if (R->getValueAsString("Namespace") == "TargetOpcode" ||
+ R->getValueAsBit("isPseudo"))
+ continue;
+ std::string InstName =
+ (R->getValueAsString("Namespace") + "::" + R->getName()).str();
+ std::string Case = getInstructionCase(R, Target);
+
+ CaseMap[Case].push_back(std::move(InstName));
+ }
+
+ // Emit initial function code
+ if (UseAPInt) {
+ int NumWords = APInt::getNumWords(BitWidth);
+ o << " const unsigned opcode = MI.getOpcode();\n"
+ << " if (Scratch.getBitWidth() != " << BitWidth << ")\n"
+ << " Scratch = Scratch.zext(" << BitWidth << ");\n"
+ << " Inst = APInt(" << BitWidth << ", ArrayRef(InstBits + opcode * "
+ << NumWords << ", " << NumWords << "));\n"
+ << " APInt &Value = Inst;\n"
+ << " APInt &op = Scratch;\n"
+ << " switch (opcode) {\n";
+ } else {
+ o << " const unsigned opcode = MI.getOpcode();\n"
+ << " uint64_t Value = InstBits[opcode];\n"
+ << " uint64_t op = 0;\n"
+ << " (void)op; // suppress warning\n"
+ << " switch (opcode) {\n";
+ }
+
+ // Emit each case statement
+ std::map<std::string, std::vector<std::string>>::iterator IE, EE;
+ for (IE = CaseMap.begin(), EE = CaseMap.end(); IE != EE; ++IE) {
+ const std::string &Case = IE->first;
+ std::vector<std::string> &InstList = IE->second;
+
+ for (int i = 0, N = InstList.size(); i < N; i++) {
+ if (i)
+ o << "\n";
+ o << " case " << InstList[i] << ":";
+ }
+ o << " {\n";
+ o << Case;
+ o << " break;\n"
+ << " }\n";
+ }
+
+ // Default case: unhandled opcode
+ o << " default:\n"
+ << " std::string msg;\n"
+ << " raw_string_ostream Msg(msg);\n"
+ << " Msg << \"Not supported instr: \" << MI;\n"
+ << " report_fatal_error(Msg.str().c_str());\n"
+ << " }\n";
+ if (UseAPInt)
+ o << " Inst = Value;\n";
+ else
+ o << " return Value;\n";
+ o << "}\n\n";
+ }
+}
+
+} // end anonymous namespace
+
+namespace llvm {
+
+void EmitCodeEmitter(RecordKeeper &RK, raw_ostream &OS) {
+ emitSourceFileHeader("Machine Code Emitter", OS);
+ CodeEmitterGen(RK).run(OS);
+}
+
+} // end namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/CodeGenDAGPatterns.cpp b/contrib/libs/llvm16/utils/TableGen/CodeGenDAGPatterns.cpp
new file mode 100644
index 0000000000..dd04778e2d
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/CodeGenDAGPatterns.cpp
@@ -0,0 +1,4797 @@
+//===- CodeGenDAGPatterns.cpp - Read DAG patterns from .td file -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the CodeGenDAGPatterns class, which is used to read and
+// represent the patterns present in a .td file for instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenDAGPatterns.h"
+#include "CodeGenInstruction.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TypeSize.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include <algorithm>
+#include <cstdio>
+#include <iterator>
+#include <set>
+using namespace llvm;
+
+#define DEBUG_TYPE "dag-patterns"
+
+static inline bool isIntegerOrPtr(MVT VT) {
+ return VT.isInteger() || VT == MVT::iPTR;
+}
+static inline bool isFloatingPoint(MVT VT) {
+ return VT.isFloatingPoint();
+}
+static inline bool isVector(MVT VT) {
+ return VT.isVector();
+}
+static inline bool isScalar(MVT VT) {
+ return !VT.isVector();
+}
+static inline bool isScalarInteger(MVT VT) {
+ return VT.isScalarInteger();
+}
+
+template <typename Predicate>
+static bool berase_if(MachineValueTypeSet &S, Predicate P) {
+ bool Erased = false;
+ // It is ok to iterate over MachineValueTypeSet and remove elements from it
+ // at the same time.
+ for (MVT T : S) {
+ if (!P(T))
+ continue;
+ Erased = true;
+ S.erase(T);
+ }
+ return Erased;
+}
+
+void MachineValueTypeSet::writeToStream(raw_ostream &OS) const {
+ SmallVector<MVT, 4> Types(begin(), end());
+ array_pod_sort(Types.begin(), Types.end());
+
+ OS << '[';
+ ListSeparator LS(" ");
+ for (const MVT &T : Types)
+ OS << LS << ValueTypeByHwMode::getMVTName(T);
+ OS << ']';
+}
+
+// --- TypeSetByHwMode
+
+// This is a parameterized type-set class. For each mode there is a list
+// of types that are currently possible for a given tree node. Type
+// inference will apply to each mode separately.
+
+TypeSetByHwMode::TypeSetByHwMode(ArrayRef<ValueTypeByHwMode> VTList) {
+ for (const ValueTypeByHwMode &VVT : VTList) {
+ insert(VVT);
+ AddrSpaces.push_back(VVT.PtrAddrSpace);
+ }
+}
+
+bool TypeSetByHwMode::isValueTypeByHwMode(bool AllowEmpty) const {
+ for (const auto &I : *this) {
+ if (I.second.size() > 1)
+ return false;
+ if (!AllowEmpty && I.second.empty())
+ return false;
+ }
+ return true;
+}
+
+ValueTypeByHwMode TypeSetByHwMode::getValueTypeByHwMode() const {
+ assert(isValueTypeByHwMode(true) &&
+ "The type set has multiple types for at least one HW mode");
+ ValueTypeByHwMode VVT;
+ auto ASI = AddrSpaces.begin();
+
+ for (const auto &I : *this) {
+ MVT T = I.second.empty() ? MVT::Other : *I.second.begin();
+ VVT.getOrCreateTypeForMode(I.first, T);
+ if (ASI != AddrSpaces.end())
+ VVT.PtrAddrSpace = *ASI++;
+ }
+ return VVT;
+}
+
+bool TypeSetByHwMode::isPossible() const {
+ for (const auto &I : *this)
+ if (!I.second.empty())
+ return true;
+ return false;
+}
+
+bool TypeSetByHwMode::insert(const ValueTypeByHwMode &VVT) {
+ bool Changed = false;
+ bool ContainsDefault = false;
+ MVT DT = MVT::Other;
+
+ for (const auto &P : VVT) {
+ unsigned M = P.first;
+ // Make sure there exists a set for each specific mode from VVT.
+ Changed |= getOrCreate(M).insert(P.second).second;
+ // Cache VVT's default mode.
+ if (DefaultMode == M) {
+ ContainsDefault = true;
+ DT = P.second;
+ }
+ }
+
+ // If VVT has a default mode, add the corresponding type to all
+ // modes in "this" that do not exist in VVT.
+ if (ContainsDefault)
+ for (auto &I : *this)
+ if (!VVT.hasMode(I.first))
+ Changed |= I.second.insert(DT).second;
+
+ return Changed;
+}
+
+// Constrain the type set to be the intersection with VTS.
+bool TypeSetByHwMode::constrain(const TypeSetByHwMode &VTS) {
+ bool Changed = false;
+ if (hasDefault()) {
+ for (const auto &I : VTS) {
+ unsigned M = I.first;
+ if (M == DefaultMode || hasMode(M))
+ continue;
+ Map.insert({M, Map.at(DefaultMode)});
+ Changed = true;
+ }
+ }
+
+ for (auto &I : *this) {
+ unsigned M = I.first;
+ SetType &S = I.second;
+ if (VTS.hasMode(M) || VTS.hasDefault()) {
+ Changed |= intersect(I.second, VTS.get(M));
+ } else if (!S.empty()) {
+ S.clear();
+ Changed = true;
+ }
+ }
+ return Changed;
+}
+
+template <typename Predicate>
+bool TypeSetByHwMode::constrain(Predicate P) {
+ bool Changed = false;
+ for (auto &I : *this)
+ Changed |= berase_if(I.second, [&P](MVT VT) { return !P(VT); });
+ return Changed;
+}
+
+template <typename Predicate>
+bool TypeSetByHwMode::assign_if(const TypeSetByHwMode &VTS, Predicate P) {
+ assert(empty());
+ for (const auto &I : VTS) {
+ SetType &S = getOrCreate(I.first);
+ for (auto J : I.second)
+ if (P(J))
+ S.insert(J);
+ }
+ return !empty();
+}
+
+void TypeSetByHwMode::writeToStream(raw_ostream &OS) const {
+ SmallVector<unsigned, 4> Modes;
+ Modes.reserve(Map.size());
+
+ for (const auto &I : *this)
+ Modes.push_back(I.first);
+ if (Modes.empty()) {
+ OS << "{}";
+ return;
+ }
+ array_pod_sort(Modes.begin(), Modes.end());
+
+ OS << '{';
+ for (unsigned M : Modes) {
+ OS << ' ' << getModeName(M) << ':';
+ get(M).writeToStream(OS);
+ }
+ OS << " }";
+}
+
+bool TypeSetByHwMode::operator==(const TypeSetByHwMode &VTS) const {
+ // The isSimple call is much quicker than hasDefault - check this first.
+ bool IsSimple = isSimple();
+ bool VTSIsSimple = VTS.isSimple();
+ if (IsSimple && VTSIsSimple)
+ return *begin() == *VTS.begin();
+
+ // Speedup: We have a default if the set is simple.
+ bool HaveDefault = IsSimple || hasDefault();
+ bool VTSHaveDefault = VTSIsSimple || VTS.hasDefault();
+ if (HaveDefault != VTSHaveDefault)
+ return false;
+
+ SmallSet<unsigned, 4> Modes;
+ for (auto &I : *this)
+ Modes.insert(I.first);
+ for (const auto &I : VTS)
+ Modes.insert(I.first);
+
+ if (HaveDefault) {
+ // Both sets have default mode.
+ for (unsigned M : Modes) {
+ if (get(M) != VTS.get(M))
+ return false;
+ }
+ } else {
+ // Neither set has default mode.
+ for (unsigned M : Modes) {
+ // If there is no default mode, an empty set is equivalent to not having
+ // the corresponding mode.
+ bool NoModeThis = !hasMode(M) || get(M).empty();
+ bool NoModeVTS = !VTS.hasMode(M) || VTS.get(M).empty();
+ if (NoModeThis != NoModeVTS)
+ return false;
+ if (!NoModeThis)
+ if (get(M) != VTS.get(M))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+namespace llvm {
+ raw_ostream &operator<<(raw_ostream &OS, const MachineValueTypeSet &T) {
+ T.writeToStream(OS);
+ return OS;
+ }
+ raw_ostream &operator<<(raw_ostream &OS, const TypeSetByHwMode &T) {
+ T.writeToStream(OS);
+ return OS;
+ }
+}
+
+LLVM_DUMP_METHOD
+void TypeSetByHwMode::dump() const {
+ dbgs() << *this << '\n';
+}
+
+bool TypeSetByHwMode::intersect(SetType &Out, const SetType &In) {
+ bool OutP = Out.count(MVT::iPTR), InP = In.count(MVT::iPTR);
+ // Complement of In.
+ auto CompIn = [&In](MVT T) -> bool { return !In.count(T); };
+
+ if (OutP == InP)
+ return berase_if(Out, CompIn);
+
+ // Compute the intersection of scalars separately to account for only
+ // one set containing iPTR.
+ // The intersection of iPTR with a set of integer scalar types that does not
+ // include iPTR will result in the most specific scalar type:
+ // - iPTR is more specific than any set with two elements or more
+ // - iPTR is less specific than any single integer scalar type.
+ // For example
+ // { iPTR } * { i32 } -> { i32 }
+ // { iPTR } * { i32 i64 } -> { iPTR }
+ // and
+ // { iPTR i32 } * { i32 } -> { i32 }
+ // { iPTR i32 } * { i32 i64 } -> { i32 i64 }
+ // { iPTR i32 } * { i32 i64 i128 } -> { iPTR i32 }
+
+ // Let In' = elements only in In, Out' = elements only in Out, and
+ // IO = elements common to both. Normally IO would be returned as the result
+ // of the intersection, but we need to account for iPTR being a "wildcard" of
+ // sorts. Since elements in IO are those that match both sets exactly, they
+ // will all belong to the output. If any of the "leftovers" (i.e. In' or
+ // Out') contain iPTR, it means that the other set doesn't have it, but it
+ // could have (1) a more specific type, or (2) a set of types that is less
+ // specific. The "leftovers" from the other set is what we want to examine
+ // more closely.
+
+ auto subtract = [](const SetType &A, const SetType &B) {
+ SetType Diff = A;
+ berase_if(Diff, [&B](MVT T) { return B.count(T); });
+ return Diff;
+ };
+
+ if (InP) {
+ SetType OutOnly = subtract(Out, In);
+ if (OutOnly.empty()) {
+ // This means that Out \subset In, so no change to Out.
+ return false;
+ }
+ unsigned NumI = llvm::count_if(OutOnly, isScalarInteger);
+ if (NumI == 1 && OutOnly.size() == 1) {
+ // There is only one element in Out', and it happens to be a scalar
+ // integer that should be kept as a match for iPTR in In.
+ return false;
+ }
+ berase_if(Out, CompIn);
+ if (NumI == 1) {
+ // Replace the iPTR with the leftover scalar integer.
+ Out.insert(*llvm::find_if(OutOnly, isScalarInteger));
+ } else if (NumI > 1) {
+ Out.insert(MVT::iPTR);
+ }
+ return true;
+ }
+
+ // OutP == true
+ SetType InOnly = subtract(In, Out);
+ unsigned SizeOut = Out.size();
+ berase_if(Out, CompIn); // This will remove at least the iPTR.
+ unsigned NumI = llvm::count_if(InOnly, isScalarInteger);
+ if (NumI == 0) {
+ // iPTR deleted from Out.
+ return true;
+ }
+ if (NumI == 1) {
+ // Replace the iPTR with the leftover scalar integer.
+ Out.insert(*llvm::find_if(InOnly, isScalarInteger));
+ return true;
+ }
+
+ // NumI > 1: Keep the iPTR in Out.
+ Out.insert(MVT::iPTR);
+ // If iPTR was the only element initially removed from Out, then Out
+ // has not changed.
+ return SizeOut != Out.size();
+}
+
+bool TypeSetByHwMode::validate() const {
+#ifndef NDEBUG
+ if (empty())
+ return true;
+ bool AllEmpty = true;
+ for (const auto &I : *this)
+ AllEmpty &= I.second.empty();
+ return !AllEmpty;
+#endif
+ return true;
+}
+
+// --- TypeInfer
+
+bool TypeInfer::MergeInTypeInfo(TypeSetByHwMode &Out,
+ const TypeSetByHwMode &In) {
+ ValidateOnExit _1(Out, *this);
+ In.validate();
+ if (In.empty() || Out == In || TP.hasError())
+ return false;
+ if (Out.empty()) {
+ Out = In;
+ return true;
+ }
+
+ bool Changed = Out.constrain(In);
+ if (Changed && Out.empty())
+ TP.error("Type contradiction");
+
+ return Changed;
+}
+
+bool TypeInfer::forceArbitrary(TypeSetByHwMode &Out) {
+ ValidateOnExit _1(Out, *this);
+ if (TP.hasError())
+ return false;
+ assert(!Out.empty() && "cannot pick from an empty set");
+
+ bool Changed = false;
+ for (auto &I : Out) {
+ TypeSetByHwMode::SetType &S = I.second;
+ if (S.size() <= 1)
+ continue;
+ MVT T = *S.begin(); // Pick the first element.
+ S.clear();
+ S.insert(T);
+ Changed = true;
+ }
+ return Changed;
+}
+
+bool TypeInfer::EnforceInteger(TypeSetByHwMode &Out) {
+ ValidateOnExit _1(Out, *this);
+ if (TP.hasError())
+ return false;
+ if (!Out.empty())
+ return Out.constrain(isIntegerOrPtr);
+
+ return Out.assign_if(getLegalTypes(), isIntegerOrPtr);
+}
+
+bool TypeInfer::EnforceFloatingPoint(TypeSetByHwMode &Out) {
+ ValidateOnExit _1(Out, *this);
+ if (TP.hasError())
+ return false;
+ if (!Out.empty())
+ return Out.constrain(isFloatingPoint);
+
+ return Out.assign_if(getLegalTypes(), isFloatingPoint);
+}
+
+bool TypeInfer::EnforceScalar(TypeSetByHwMode &Out) {
+ ValidateOnExit _1(Out, *this);
+ if (TP.hasError())
+ return false;
+ if (!Out.empty())
+ return Out.constrain(isScalar);
+
+ return Out.assign_if(getLegalTypes(), isScalar);
+}
+
+bool TypeInfer::EnforceVector(TypeSetByHwMode &Out) {
+ ValidateOnExit _1(Out, *this);
+ if (TP.hasError())
+ return false;
+ if (!Out.empty())
+ return Out.constrain(isVector);
+
+ return Out.assign_if(getLegalTypes(), isVector);
+}
+
+bool TypeInfer::EnforceAny(TypeSetByHwMode &Out) {
+ ValidateOnExit _1(Out, *this);
+ if (TP.hasError() || !Out.empty())
+ return false;
+
+ Out = getLegalTypes();
+ return true;
+}
+
+template <typename Iter, typename Pred, typename Less>
+static Iter min_if(Iter B, Iter E, Pred P, Less L) {
+ if (B == E)
+ return E;
+ Iter Min = E;
+ for (Iter I = B; I != E; ++I) {
+ if (!P(*I))
+ continue;
+ if (Min == E || L(*I, *Min))
+ Min = I;
+ }
+ return Min;
+}
+
+template <typename Iter, typename Pred, typename Less>
+static Iter max_if(Iter B, Iter E, Pred P, Less L) {
+ if (B == E)
+ return E;
+ Iter Max = E;
+ for (Iter I = B; I != E; ++I) {
+ if (!P(*I))
+ continue;
+ if (Max == E || L(*Max, *I))
+ Max = I;
+ }
+ return Max;
+}
+
+/// Make sure that for each type in Small, there exists a larger type in Big.
+bool TypeInfer::EnforceSmallerThan(TypeSetByHwMode &Small, TypeSetByHwMode &Big,
+ bool SmallIsVT) {
+ ValidateOnExit _1(Small, *this), _2(Big, *this);
+ if (TP.hasError())
+ return false;
+ bool Changed = false;
+
+ assert((!SmallIsVT || !Small.empty()) &&
+ "Small should not be empty for SDTCisVTSmallerThanOp");
+
+ if (Small.empty())
+ Changed |= EnforceAny(Small);
+ if (Big.empty())
+ Changed |= EnforceAny(Big);
+
+ assert(Small.hasDefault() && Big.hasDefault());
+
+ SmallVector<unsigned, 4> Modes;
+ union_modes(Small, Big, Modes);
+
+ // 1. Only allow integer or floating point types and make sure that
+ // both sides are both integer or both floating point.
+ // 2. Make sure that either both sides have vector types, or neither
+ // of them does.
+ for (unsigned M : Modes) {
+ TypeSetByHwMode::SetType &S = Small.get(M);
+ TypeSetByHwMode::SetType &B = Big.get(M);
+
+ assert((!SmallIsVT || !S.empty()) && "Expected non-empty type");
+
+ if (any_of(S, isIntegerOrPtr) && any_of(B, isIntegerOrPtr)) {
+ auto NotInt = [](MVT VT) { return !isIntegerOrPtr(VT); };
+ Changed |= berase_if(S, NotInt);
+ Changed |= berase_if(B, NotInt);
+ } else if (any_of(S, isFloatingPoint) && any_of(B, isFloatingPoint)) {
+ auto NotFP = [](MVT VT) { return !isFloatingPoint(VT); };
+ Changed |= berase_if(S, NotFP);
+ Changed |= berase_if(B, NotFP);
+ } else if (SmallIsVT && B.empty()) {
+ // B is empty and since S is a specific VT, it will never be empty. Don't
+ // report this as a change, just clear S and continue. This prevents an
+ // infinite loop.
+ S.clear();
+ } else if (S.empty() || B.empty()) {
+ Changed = !S.empty() || !B.empty();
+ S.clear();
+ B.clear();
+ } else {
+ TP.error("Incompatible types");
+ return Changed;
+ }
+
+ if (none_of(S, isVector) || none_of(B, isVector)) {
+ Changed |= berase_if(S, isVector);
+ Changed |= berase_if(B, isVector);
+ }
+ }
+
+ auto LT = [](MVT A, MVT B) -> bool {
+ // Always treat non-scalable MVTs as smaller than scalable MVTs for the
+ // purposes of ordering.
+ auto ASize = std::make_tuple(A.isScalableVector(), A.getScalarSizeInBits(),
+ A.getSizeInBits().getKnownMinValue());
+ auto BSize = std::make_tuple(B.isScalableVector(), B.getScalarSizeInBits(),
+ B.getSizeInBits().getKnownMinValue());
+ return ASize < BSize;
+ };
+ auto SameKindLE = [](MVT A, MVT B) -> bool {
+ // This function is used when removing elements: when a vector is compared
+ // to a non-vector or a scalable vector to any non-scalable MVT, it should
+ // return false (to avoid removal).
+ if (std::make_tuple(A.isVector(), A.isScalableVector()) !=
+ std::make_tuple(B.isVector(), B.isScalableVector()))
+ return false;
+
+ return std::make_tuple(A.getScalarSizeInBits(),
+ A.getSizeInBits().getKnownMinValue()) <=
+ std::make_tuple(B.getScalarSizeInBits(),
+ B.getSizeInBits().getKnownMinValue());
+ };
+
+ for (unsigned M : Modes) {
+ TypeSetByHwMode::SetType &S = Small.get(M);
+ TypeSetByHwMode::SetType &B = Big.get(M);
+ // MinS = min scalar in Small, remove all scalars from Big that are
+ // smaller-or-equal than MinS.
+ auto MinS = min_if(S.begin(), S.end(), isScalar, LT);
+ if (MinS != S.end())
+ Changed |= berase_if(B, std::bind(SameKindLE,
+ std::placeholders::_1, *MinS));
+
+ // MaxS = max scalar in Big, remove all scalars from Small that are
+ // larger than MaxS.
+ auto MaxS = max_if(B.begin(), B.end(), isScalar, LT);
+ if (MaxS != B.end())
+ Changed |= berase_if(S, std::bind(SameKindLE,
+ *MaxS, std::placeholders::_1));
+
+ // MinV = min vector in Small, remove all vectors from Big that are
+ // smaller-or-equal than MinV.
+ auto MinV = min_if(S.begin(), S.end(), isVector, LT);
+ if (MinV != S.end())
+ Changed |= berase_if(B, std::bind(SameKindLE,
+ std::placeholders::_1, *MinV));
+
+ // MaxV = max vector in Big, remove all vectors from Small that are
+ // larger than MaxV.
+ auto MaxV = max_if(B.begin(), B.end(), isVector, LT);
+ if (MaxV != B.end())
+ Changed |= berase_if(S, std::bind(SameKindLE,
+ *MaxV, std::placeholders::_1));
+ }
+
+ return Changed;
+}
+
+/// 1. Ensure that for each type T in Vec, T is a vector type, and that
+/// for each type U in Elem, U is a scalar type.
+/// 2. Ensure that for each (scalar) type U in Elem, there exists a (vector)
+/// type T in Vec, such that U is the element type of T.
+bool TypeInfer::EnforceVectorEltTypeIs(TypeSetByHwMode &Vec,
+ TypeSetByHwMode &Elem) {
+ ValidateOnExit _1(Vec, *this), _2(Elem, *this);
+ if (TP.hasError())
+ return false;
+ bool Changed = false;
+
+ if (Vec.empty())
+ Changed |= EnforceVector(Vec);
+ if (Elem.empty())
+ Changed |= EnforceScalar(Elem);
+
+ SmallVector<unsigned, 4> Modes;
+ union_modes(Vec, Elem, Modes);
+ for (unsigned M : Modes) {
+ TypeSetByHwMode::SetType &V = Vec.get(M);
+ TypeSetByHwMode::SetType &E = Elem.get(M);
+
+ Changed |= berase_if(V, isScalar); // Scalar = !vector
+ Changed |= berase_if(E, isVector); // Vector = !scalar
+ assert(!V.empty() && !E.empty());
+
+ MachineValueTypeSet VT, ST;
+ // Collect element types from the "vector" set.
+ for (MVT T : V)
+ VT.insert(T.getVectorElementType());
+ // Collect scalar types from the "element" set.
+ for (MVT T : E)
+ ST.insert(T);
+
+ // Remove from V all (vector) types whose element type is not in S.
+ Changed |= berase_if(V, [&ST](MVT T) -> bool {
+ return !ST.count(T.getVectorElementType());
+ });
+ // Remove from E all (scalar) types, for which there is no corresponding
+ // type in V.
+ Changed |= berase_if(E, [&VT](MVT T) -> bool { return !VT.count(T); });
+ }
+
+ return Changed;
+}
+
+bool TypeInfer::EnforceVectorEltTypeIs(TypeSetByHwMode &Vec,
+ const ValueTypeByHwMode &VVT) {
+ TypeSetByHwMode Tmp(VVT);
+ ValidateOnExit _1(Vec, *this), _2(Tmp, *this);
+ return EnforceVectorEltTypeIs(Vec, Tmp);
+}
+
+/// Ensure that for each type T in Sub, T is a vector type, and there
+/// exists a type U in Vec such that U is a vector type with the same
+/// element type as T and at least as many elements as T.
+bool TypeInfer::EnforceVectorSubVectorTypeIs(TypeSetByHwMode &Vec,
+ TypeSetByHwMode &Sub) {
+ ValidateOnExit _1(Vec, *this), _2(Sub, *this);
+ if (TP.hasError())
+ return false;
+
+ /// Return true if B is a suB-vector of P, i.e. P is a suPer-vector of B.
+ auto IsSubVec = [](MVT B, MVT P) -> bool {
+ if (!B.isVector() || !P.isVector())
+ return false;
+ // Logically a <4 x i32> is a valid subvector of <n x 4 x i32>
+ // but until there are obvious use-cases for this, keep the
+ // types separate.
+ if (B.isScalableVector() != P.isScalableVector())
+ return false;
+ if (B.getVectorElementType() != P.getVectorElementType())
+ return false;
+ return B.getVectorMinNumElements() < P.getVectorMinNumElements();
+ };
+
+ /// Return true if S has no element (vector type) that T is a sub-vector of,
+ /// i.e. has the same element type as T and more elements.
+ auto NoSubV = [&IsSubVec](const TypeSetByHwMode::SetType &S, MVT T) -> bool {
+ for (auto I : S)
+ if (IsSubVec(T, I))
+ return false;
+ return true;
+ };
+
+ /// Return true if S has no element (vector type) that T is a super-vector
+ /// of, i.e. has the same element type as T and fewer elements.
+ auto NoSupV = [&IsSubVec](const TypeSetByHwMode::SetType &S, MVT T) -> bool {
+ for (auto I : S)
+ if (IsSubVec(I, T))
+ return false;
+ return true;
+ };
+
+ bool Changed = false;
+
+ if (Vec.empty())
+ Changed |= EnforceVector(Vec);
+ if (Sub.empty())
+ Changed |= EnforceVector(Sub);
+
+ SmallVector<unsigned, 4> Modes;
+ union_modes(Vec, Sub, Modes);
+ for (unsigned M : Modes) {
+ TypeSetByHwMode::SetType &S = Sub.get(M);
+ TypeSetByHwMode::SetType &V = Vec.get(M);
+
+ Changed |= berase_if(S, isScalar);
+
+ // Erase all types from S that are not sub-vectors of a type in V.
+ Changed |= berase_if(S, std::bind(NoSubV, V, std::placeholders::_1));
+
+ // Erase all types from V that are not super-vectors of a type in S.
+ Changed |= berase_if(V, std::bind(NoSupV, S, std::placeholders::_1));
+ }
+
+ return Changed;
+}
+
+/// 1. Ensure that V has a scalar type iff W has a scalar type.
+/// 2. Ensure that for each vector type T in V, there exists a vector
+/// type U in W, such that T and U have the same number of elements.
+/// 3. Ensure that for each vector type U in W, there exists a vector
+/// type T in V, such that T and U have the same number of elements
+/// (reverse of 2).
+bool TypeInfer::EnforceSameNumElts(TypeSetByHwMode &V, TypeSetByHwMode &W) {
+ ValidateOnExit _1(V, *this), _2(W, *this);
+ if (TP.hasError())
+ return false;
+
+ bool Changed = false;
+ if (V.empty())
+ Changed |= EnforceAny(V);
+ if (W.empty())
+ Changed |= EnforceAny(W);
+
+ // An actual vector type cannot have 0 elements, so we can treat scalars
+ // as zero-length vectors. This way both vectors and scalars can be
+ // processed identically.
+ auto NoLength = [](const SmallDenseSet<ElementCount> &Lengths,
+ MVT T) -> bool {
+ return !Lengths.count(T.isVector() ? T.getVectorElementCount()
+ : ElementCount());
+ };
+
+ SmallVector<unsigned, 4> Modes;
+ union_modes(V, W, Modes);
+ for (unsigned M : Modes) {
+ TypeSetByHwMode::SetType &VS = V.get(M);
+ TypeSetByHwMode::SetType &WS = W.get(M);
+
+ SmallDenseSet<ElementCount> VN, WN;
+ for (MVT T : VS)
+ VN.insert(T.isVector() ? T.getVectorElementCount() : ElementCount());
+ for (MVT T : WS)
+ WN.insert(T.isVector() ? T.getVectorElementCount() : ElementCount());
+
+ Changed |= berase_if(VS, std::bind(NoLength, WN, std::placeholders::_1));
+ Changed |= berase_if(WS, std::bind(NoLength, VN, std::placeholders::_1));
+ }
+ return Changed;
+}
+
+namespace {
+struct TypeSizeComparator {
+ bool operator()(const TypeSize &LHS, const TypeSize &RHS) const {
+ return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
+ std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
+ }
+};
+} // end anonymous namespace
+
+/// 1. Ensure that for each type T in A, there exists a type U in B,
+/// such that T and U have equal size in bits.
+/// 2. Ensure that for each type U in B, there exists a type T in A
+/// such that T and U have equal size in bits (reverse of 1).
+bool TypeInfer::EnforceSameSize(TypeSetByHwMode &A, TypeSetByHwMode &B) {
+ ValidateOnExit _1(A, *this), _2(B, *this);
+ if (TP.hasError())
+ return false;
+ bool Changed = false;
+ if (A.empty())
+ Changed |= EnforceAny(A);
+ if (B.empty())
+ Changed |= EnforceAny(B);
+
+ typedef SmallSet<TypeSize, 2, TypeSizeComparator> TypeSizeSet;
+
+ auto NoSize = [](const TypeSizeSet &Sizes, MVT T) -> bool {
+ return !Sizes.count(T.getSizeInBits());
+ };
+
+ SmallVector<unsigned, 4> Modes;
+ union_modes(A, B, Modes);
+ for (unsigned M : Modes) {
+ TypeSetByHwMode::SetType &AS = A.get(M);
+ TypeSetByHwMode::SetType &BS = B.get(M);
+ TypeSizeSet AN, BN;
+
+ for (MVT T : AS)
+ AN.insert(T.getSizeInBits());
+ for (MVT T : BS)
+ BN.insert(T.getSizeInBits());
+
+ Changed |= berase_if(AS, std::bind(NoSize, BN, std::placeholders::_1));
+ Changed |= berase_if(BS, std::bind(NoSize, AN, std::placeholders::_1));
+ }
+
+ return Changed;
+}
+
+void TypeInfer::expandOverloads(TypeSetByHwMode &VTS) {
+ ValidateOnExit _1(VTS, *this);
+ const TypeSetByHwMode &Legal = getLegalTypes();
+ assert(Legal.isDefaultOnly() && "Default-mode only expected");
+ const TypeSetByHwMode::SetType &LegalTypes = Legal.get(DefaultMode);
+
+ for (auto &I : VTS)
+ expandOverloads(I.second, LegalTypes);
+}
+
+void TypeInfer::expandOverloads(TypeSetByHwMode::SetType &Out,
+ const TypeSetByHwMode::SetType &Legal) {
+ std::set<MVT> Ovs;
+ for (MVT T : Out) {
+ if (!T.isOverloaded())
+ continue;
+
+ Ovs.insert(T);
+ // MachineValueTypeSet allows iteration and erasing.
+ Out.erase(T);
+ }
+
+ for (MVT Ov : Ovs) {
+ switch (Ov.SimpleTy) {
+ case MVT::iPTRAny:
+ Out.insert(MVT::iPTR);
+ return;
+ case MVT::iAny:
+ for (MVT T : MVT::integer_valuetypes())
+ if (Legal.count(T))
+ Out.insert(T);
+ for (MVT T : MVT::integer_fixedlen_vector_valuetypes())
+ if (Legal.count(T))
+ Out.insert(T);
+ for (MVT T : MVT::integer_scalable_vector_valuetypes())
+ if (Legal.count(T))
+ Out.insert(T);
+ return;
+ case MVT::fAny:
+ for (MVT T : MVT::fp_valuetypes())
+ if (Legal.count(T))
+ Out.insert(T);
+ for (MVT T : MVT::fp_fixedlen_vector_valuetypes())
+ if (Legal.count(T))
+ Out.insert(T);
+ for (MVT T : MVT::fp_scalable_vector_valuetypes())
+ if (Legal.count(T))
+ Out.insert(T);
+ return;
+ case MVT::vAny:
+ for (MVT T : MVT::vector_valuetypes())
+ if (Legal.count(T))
+ Out.insert(T);
+ return;
+ case MVT::Any:
+ for (MVT T : MVT::all_valuetypes())
+ if (Legal.count(T))
+ Out.insert(T);
+ return;
+ default:
+ break;
+ }
+ }
+}
+
+const TypeSetByHwMode &TypeInfer::getLegalTypes() {
+ if (!LegalTypesCached) {
+ TypeSetByHwMode::SetType &LegalTypes = LegalCache.getOrCreate(DefaultMode);
+ // Stuff all types from all modes into the default mode.
+ const TypeSetByHwMode &LTS = TP.getDAGPatterns().getLegalTypes();
+ for (const auto &I : LTS)
+ LegalTypes.insert(I.second);
+ LegalTypesCached = true;
+ }
+ assert(LegalCache.isDefaultOnly() && "Default-mode only expected");
+ return LegalCache;
+}
+
+#ifndef NDEBUG
+TypeInfer::ValidateOnExit::~ValidateOnExit() {
+ if (Infer.Validate && !VTS.validate()) {
+ dbgs() << "Type set is empty for each HW mode:\n"
+ "possible type contradiction in the pattern below "
+ "(use -print-records with llvm-tblgen to see all "
+ "expanded records).\n";
+ Infer.TP.dump();
+ dbgs() << "Generated from record:\n";
+ Infer.TP.getRecord()->dump();
+ PrintFatalError(Infer.TP.getRecord()->getLoc(),
+ "Type set is empty for each HW mode in '" +
+ Infer.TP.getRecord()->getName() + "'");
+ }
+}
+#endif
+
+
+//===----------------------------------------------------------------------===//
+// ScopedName Implementation
+//===----------------------------------------------------------------------===//
+
+bool ScopedName::operator==(const ScopedName &o) const {
+ return Scope == o.Scope && Identifier == o.Identifier;
+}
+
+bool ScopedName::operator!=(const ScopedName &o) const {
+ return !(*this == o);
+}
+
+
+//===----------------------------------------------------------------------===//
+// TreePredicateFn Implementation
+//===----------------------------------------------------------------------===//
+
+/// TreePredicateFn constructor. Here 'N' is a subclass of PatFrag.
+TreePredicateFn::TreePredicateFn(TreePattern *N) : PatFragRec(N) {
+ assert(
+ (!hasPredCode() || !hasImmCode()) &&
+ ".td file corrupt: can't have a node predicate *and* an imm predicate");
+}
+
+bool TreePredicateFn::hasPredCode() const {
+ return isLoad() || isStore() || isAtomic() || hasNoUse() ||
+ !PatFragRec->getRecord()->getValueAsString("PredicateCode").empty();
+}
+
+std::string TreePredicateFn::getPredCode() const {
+ std::string Code;
+
+ if (!isLoad() && !isStore() && !isAtomic()) {
+ Record *MemoryVT = getMemoryVT();
+
+ if (MemoryVT)
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "MemoryVT requires IsLoad or IsStore");
+ }
+
+ if (!isLoad() && !isStore()) {
+ if (isUnindexed())
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsUnindexed requires IsLoad or IsStore");
+
+ Record *ScalarMemoryVT = getScalarMemoryVT();
+
+ if (ScalarMemoryVT)
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "ScalarMemoryVT requires IsLoad or IsStore");
+ }
+
+ if (isLoad() + isStore() + isAtomic() > 1)
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsLoad, IsStore, and IsAtomic are mutually exclusive");
+
+ if (isLoad()) {
+ if (!isUnindexed() && !isNonExtLoad() && !isAnyExtLoad() &&
+ !isSignExtLoad() && !isZeroExtLoad() && getMemoryVT() == nullptr &&
+ getScalarMemoryVT() == nullptr && getAddressSpaces() == nullptr &&
+ getMinAlignment() < 1)
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsLoad cannot be used by itself");
+ } else {
+ if (isNonExtLoad())
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsNonExtLoad requires IsLoad");
+ if (isAnyExtLoad())
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsAnyExtLoad requires IsLoad");
+
+ if (!isAtomic()) {
+ if (isSignExtLoad())
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsSignExtLoad requires IsLoad or IsAtomic");
+ if (isZeroExtLoad())
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsZeroExtLoad requires IsLoad or IsAtomic");
+ }
+ }
+
+ if (isStore()) {
+ if (!isUnindexed() && !isTruncStore() && !isNonTruncStore() &&
+ getMemoryVT() == nullptr && getScalarMemoryVT() == nullptr &&
+ getAddressSpaces() == nullptr && getMinAlignment() < 1)
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsStore cannot be used by itself");
+ } else {
+ if (isNonTruncStore())
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsNonTruncStore requires IsStore");
+ if (isTruncStore())
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsTruncStore requires IsStore");
+ }
+
+ if (isAtomic()) {
+ if (getMemoryVT() == nullptr && !isAtomicOrderingMonotonic() &&
+ getAddressSpaces() == nullptr &&
+ // FIXME: Should atomic loads be IsLoad, IsAtomic, or both?
+ !isZeroExtLoad() && !isSignExtLoad() && !isAtomicOrderingAcquire() &&
+ !isAtomicOrderingRelease() && !isAtomicOrderingAcquireRelease() &&
+ !isAtomicOrderingSequentiallyConsistent() &&
+ !isAtomicOrderingAcquireOrStronger() &&
+ !isAtomicOrderingReleaseOrStronger() &&
+ !isAtomicOrderingWeakerThanAcquire() &&
+ !isAtomicOrderingWeakerThanRelease())
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsAtomic cannot be used by itself");
+ } else {
+ if (isAtomicOrderingMonotonic())
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsAtomicOrderingMonotonic requires IsAtomic");
+ if (isAtomicOrderingAcquire())
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsAtomicOrderingAcquire requires IsAtomic");
+ if (isAtomicOrderingRelease())
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsAtomicOrderingRelease requires IsAtomic");
+ if (isAtomicOrderingAcquireRelease())
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsAtomicOrderingAcquireRelease requires IsAtomic");
+ if (isAtomicOrderingSequentiallyConsistent())
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsAtomicOrderingSequentiallyConsistent requires IsAtomic");
+ if (isAtomicOrderingAcquireOrStronger())
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsAtomicOrderingAcquireOrStronger requires IsAtomic");
+ if (isAtomicOrderingReleaseOrStronger())
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsAtomicOrderingReleaseOrStronger requires IsAtomic");
+ if (isAtomicOrderingWeakerThanAcquire())
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsAtomicOrderingWeakerThanAcquire requires IsAtomic");
+ }
+
+ if (isLoad() || isStore() || isAtomic()) {
+ if (ListInit *AddressSpaces = getAddressSpaces()) {
+ Code += "unsigned AddrSpace = cast<MemSDNode>(N)->getAddressSpace();\n"
+ " if (";
+
+ ListSeparator LS(" && ");
+ for (Init *Val : AddressSpaces->getValues()) {
+ Code += LS;
+
+ IntInit *IntVal = dyn_cast<IntInit>(Val);
+ if (!IntVal) {
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "AddressSpaces element must be integer");
+ }
+
+ Code += "AddrSpace != " + utostr(IntVal->getValue());
+ }
+
+ Code += ")\nreturn false;\n";
+ }
+
+ int64_t MinAlign = getMinAlignment();
+ if (MinAlign > 0) {
+ Code += "if (cast<MemSDNode>(N)->getAlign() < Align(";
+ Code += utostr(MinAlign);
+ Code += "))\nreturn false;\n";
+ }
+
+ Record *MemoryVT = getMemoryVT();
+
+ if (MemoryVT)
+ Code += ("if (cast<MemSDNode>(N)->getMemoryVT() != MVT::" +
+ MemoryVT->getName() + ") return false;\n")
+ .str();
+ }
+
+ if (isAtomic() && isAtomicOrderingMonotonic())
+ Code += "if (cast<AtomicSDNode>(N)->getMergedOrdering() != "
+ "AtomicOrdering::Monotonic) return false;\n";
+ if (isAtomic() && isAtomicOrderingAcquire())
+ Code += "if (cast<AtomicSDNode>(N)->getMergedOrdering() != "
+ "AtomicOrdering::Acquire) return false;\n";
+ if (isAtomic() && isAtomicOrderingRelease())
+ Code += "if (cast<AtomicSDNode>(N)->getMergedOrdering() != "
+ "AtomicOrdering::Release) return false;\n";
+ if (isAtomic() && isAtomicOrderingAcquireRelease())
+ Code += "if (cast<AtomicSDNode>(N)->getMergedOrdering() != "
+ "AtomicOrdering::AcquireRelease) return false;\n";
+ if (isAtomic() && isAtomicOrderingSequentiallyConsistent())
+ Code += "if (cast<AtomicSDNode>(N)->getMergedOrdering() != "
+ "AtomicOrdering::SequentiallyConsistent) return false;\n";
+
+ if (isAtomic() && isAtomicOrderingAcquireOrStronger())
+ Code += "if (!isAcquireOrStronger(cast<AtomicSDNode>(N)->getMergedOrdering())) "
+ "return false;\n";
+ if (isAtomic() && isAtomicOrderingWeakerThanAcquire())
+ Code += "if (isAcquireOrStronger(cast<AtomicSDNode>(N)->getMergedOrdering())) "
+ "return false;\n";
+
+ if (isAtomic() && isAtomicOrderingReleaseOrStronger())
+ Code += "if (!isReleaseOrStronger(cast<AtomicSDNode>(N)->getMergedOrdering())) "
+ "return false;\n";
+ if (isAtomic() && isAtomicOrderingWeakerThanRelease())
+ Code += "if (isReleaseOrStronger(cast<AtomicSDNode>(N)->getMergedOrdering())) "
+ "return false;\n";
+
+ // TODO: Handle atomic sextload/zextload normally when ATOMIC_LOAD is removed.
+ if (isAtomic() && (isZeroExtLoad() || isSignExtLoad()))
+ Code += "return false;\n";
+
+ if (isLoad() || isStore()) {
+ StringRef SDNodeName = isLoad() ? "LoadSDNode" : "StoreSDNode";
+
+ if (isUnindexed())
+ Code += ("if (cast<" + SDNodeName +
+ ">(N)->getAddressingMode() != ISD::UNINDEXED) "
+ "return false;\n")
+ .str();
+
+ if (isLoad()) {
+ if ((isNonExtLoad() + isAnyExtLoad() + isSignExtLoad() +
+ isZeroExtLoad()) > 1)
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsNonExtLoad, IsAnyExtLoad, IsSignExtLoad, and "
+ "IsZeroExtLoad are mutually exclusive");
+ if (isNonExtLoad())
+ Code += "if (cast<LoadSDNode>(N)->getExtensionType() != "
+ "ISD::NON_EXTLOAD) return false;\n";
+ if (isAnyExtLoad())
+ Code += "if (cast<LoadSDNode>(N)->getExtensionType() != ISD::EXTLOAD) "
+ "return false;\n";
+ if (isSignExtLoad())
+ Code += "if (cast<LoadSDNode>(N)->getExtensionType() != ISD::SEXTLOAD) "
+ "return false;\n";
+ if (isZeroExtLoad())
+ Code += "if (cast<LoadSDNode>(N)->getExtensionType() != ISD::ZEXTLOAD) "
+ "return false;\n";
+ } else {
+ if ((isNonTruncStore() + isTruncStore()) > 1)
+ PrintFatalError(
+ getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsNonTruncStore, and IsTruncStore are mutually exclusive");
+ if (isNonTruncStore())
+ Code +=
+ " if (cast<StoreSDNode>(N)->isTruncatingStore()) return false;\n";
+ if (isTruncStore())
+ Code +=
+ " if (!cast<StoreSDNode>(N)->isTruncatingStore()) return false;\n";
+ }
+
+ Record *ScalarMemoryVT = getScalarMemoryVT();
+
+ if (ScalarMemoryVT)
+ Code += ("if (cast<" + SDNodeName +
+ ">(N)->getMemoryVT().getScalarType() != MVT::" +
+ ScalarMemoryVT->getName() + ") return false;\n")
+ .str();
+ }
+
+ if (hasNoUse())
+ Code += "if (!SDValue(N, 0).use_empty()) return false;\n";
+
+ std::string PredicateCode =
+ std::string(PatFragRec->getRecord()->getValueAsString("PredicateCode"));
+
+ Code += PredicateCode;
+
+ if (PredicateCode.empty() && !Code.empty())
+ Code += "return true;\n";
+
+ return Code;
+}
+
+bool TreePredicateFn::hasImmCode() const {
+ return !PatFragRec->getRecord()->getValueAsString("ImmediateCode").empty();
+}
+
+std::string TreePredicateFn::getImmCode() const {
+ return std::string(
+ PatFragRec->getRecord()->getValueAsString("ImmediateCode"));
+}
+
+bool TreePredicateFn::immCodeUsesAPInt() const {
+ return getOrigPatFragRecord()->getRecord()->getValueAsBit("IsAPInt");
+}
+
+bool TreePredicateFn::immCodeUsesAPFloat() const {
+ bool Unset;
+ // The return value will be false when IsAPFloat is unset.
+ return getOrigPatFragRecord()->getRecord()->getValueAsBitOrUnset("IsAPFloat",
+ Unset);
+}
+
+bool TreePredicateFn::isPredefinedPredicateEqualTo(StringRef Field,
+ bool Value) const {
+ bool Unset;
+ bool Result =
+ getOrigPatFragRecord()->getRecord()->getValueAsBitOrUnset(Field, Unset);
+ if (Unset)
+ return false;
+ return Result == Value;
+}
+bool TreePredicateFn::usesOperands() const {
+ return isPredefinedPredicateEqualTo("PredicateCodeUsesOperands", true);
+}
+bool TreePredicateFn::hasNoUse() const {
+ return isPredefinedPredicateEqualTo("HasNoUse", true);
+}
+bool TreePredicateFn::isLoad() const {
+ return isPredefinedPredicateEqualTo("IsLoad", true);
+}
+bool TreePredicateFn::isStore() const {
+ return isPredefinedPredicateEqualTo("IsStore", true);
+}
+bool TreePredicateFn::isAtomic() const {
+ return isPredefinedPredicateEqualTo("IsAtomic", true);
+}
+bool TreePredicateFn::isUnindexed() const {
+ return isPredefinedPredicateEqualTo("IsUnindexed", true);
+}
+bool TreePredicateFn::isNonExtLoad() const {
+ return isPredefinedPredicateEqualTo("IsNonExtLoad", true);
+}
+bool TreePredicateFn::isAnyExtLoad() const {
+ return isPredefinedPredicateEqualTo("IsAnyExtLoad", true);
+}
+bool TreePredicateFn::isSignExtLoad() const {
+ return isPredefinedPredicateEqualTo("IsSignExtLoad", true);
+}
+bool TreePredicateFn::isZeroExtLoad() const {
+ return isPredefinedPredicateEqualTo("IsZeroExtLoad", true);
+}
+bool TreePredicateFn::isNonTruncStore() const {
+ return isPredefinedPredicateEqualTo("IsTruncStore", false);
+}
+bool TreePredicateFn::isTruncStore() const {
+ return isPredefinedPredicateEqualTo("IsTruncStore", true);
+}
+bool TreePredicateFn::isAtomicOrderingMonotonic() const {
+ return isPredefinedPredicateEqualTo("IsAtomicOrderingMonotonic", true);
+}
+bool TreePredicateFn::isAtomicOrderingAcquire() const {
+ return isPredefinedPredicateEqualTo("IsAtomicOrderingAcquire", true);
+}
+bool TreePredicateFn::isAtomicOrderingRelease() const {
+ return isPredefinedPredicateEqualTo("IsAtomicOrderingRelease", true);
+}
+bool TreePredicateFn::isAtomicOrderingAcquireRelease() const {
+ return isPredefinedPredicateEqualTo("IsAtomicOrderingAcquireRelease", true);
+}
+bool TreePredicateFn::isAtomicOrderingSequentiallyConsistent() const {
+ return isPredefinedPredicateEqualTo("IsAtomicOrderingSequentiallyConsistent",
+ true);
+}
+bool TreePredicateFn::isAtomicOrderingAcquireOrStronger() const {
+ return isPredefinedPredicateEqualTo("IsAtomicOrderingAcquireOrStronger", true);
+}
+bool TreePredicateFn::isAtomicOrderingWeakerThanAcquire() const {
+ return isPredefinedPredicateEqualTo("IsAtomicOrderingAcquireOrStronger", false);
+}
+bool TreePredicateFn::isAtomicOrderingReleaseOrStronger() const {
+ return isPredefinedPredicateEqualTo("IsAtomicOrderingReleaseOrStronger", true);
+}
+bool TreePredicateFn::isAtomicOrderingWeakerThanRelease() const {
+ return isPredefinedPredicateEqualTo("IsAtomicOrderingReleaseOrStronger", false);
+}
+Record *TreePredicateFn::getMemoryVT() const {
+ Record *R = getOrigPatFragRecord()->getRecord();
+ if (R->isValueUnset("MemoryVT"))
+ return nullptr;
+ return R->getValueAsDef("MemoryVT");
+}
+
+ListInit *TreePredicateFn::getAddressSpaces() const {
+ Record *R = getOrigPatFragRecord()->getRecord();
+ if (R->isValueUnset("AddressSpaces"))
+ return nullptr;
+ return R->getValueAsListInit("AddressSpaces");
+}
+
+int64_t TreePredicateFn::getMinAlignment() const {
+ Record *R = getOrigPatFragRecord()->getRecord();
+ if (R->isValueUnset("MinAlignment"))
+ return 0;
+ return R->getValueAsInt("MinAlignment");
+}
+
+Record *TreePredicateFn::getScalarMemoryVT() const {
+ Record *R = getOrigPatFragRecord()->getRecord();
+ if (R->isValueUnset("ScalarMemoryVT"))
+ return nullptr;
+ return R->getValueAsDef("ScalarMemoryVT");
+}
+bool TreePredicateFn::hasGISelPredicateCode() const {
+ return !PatFragRec->getRecord()
+ ->getValueAsString("GISelPredicateCode")
+ .empty();
+}
+std::string TreePredicateFn::getGISelPredicateCode() const {
+ return std::string(
+ PatFragRec->getRecord()->getValueAsString("GISelPredicateCode"));
+}
+
+StringRef TreePredicateFn::getImmType() const {
+ if (immCodeUsesAPInt())
+ return "const APInt &";
+ if (immCodeUsesAPFloat())
+ return "const APFloat &";
+ return "int64_t";
+}
+
+StringRef TreePredicateFn::getImmTypeIdentifier() const {
+ if (immCodeUsesAPInt())
+ return "APInt";
+ if (immCodeUsesAPFloat())
+ return "APFloat";
+ return "I64";
+}
+
+/// isAlwaysTrue - Return true if this is a noop predicate.
+bool TreePredicateFn::isAlwaysTrue() const {
+ return !hasPredCode() && !hasImmCode();
+}
+
+/// Return the name to use in the generated code to reference this, this is
+/// "Predicate_foo" if from a pattern fragment "foo".
+std::string TreePredicateFn::getFnName() const {
+ return "Predicate_" + PatFragRec->getRecord()->getName().str();
+}
+
+/// getCodeToRunOnSDNode - Return the code for the function body that
+/// evaluates this predicate. The argument is expected to be in "Node",
+/// not N. This handles casting and conversion to a concrete node type as
+/// appropriate.
+std::string TreePredicateFn::getCodeToRunOnSDNode() const {
+ // Handle immediate predicates first.
+ std::string ImmCode = getImmCode();
+ if (!ImmCode.empty()) {
+ if (isLoad())
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsLoad cannot be used with ImmLeaf or its subclasses");
+ if (isStore())
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsStore cannot be used with ImmLeaf or its subclasses");
+ if (isUnindexed())
+ PrintFatalError(
+ getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsUnindexed cannot be used with ImmLeaf or its subclasses");
+ if (isNonExtLoad())
+ PrintFatalError(
+ getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsNonExtLoad cannot be used with ImmLeaf or its subclasses");
+ if (isAnyExtLoad())
+ PrintFatalError(
+ getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsAnyExtLoad cannot be used with ImmLeaf or its subclasses");
+ if (isSignExtLoad())
+ PrintFatalError(
+ getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsSignExtLoad cannot be used with ImmLeaf or its subclasses");
+ if (isZeroExtLoad())
+ PrintFatalError(
+ getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsZeroExtLoad cannot be used with ImmLeaf or its subclasses");
+ if (isNonTruncStore())
+ PrintFatalError(
+ getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsNonTruncStore cannot be used with ImmLeaf or its subclasses");
+ if (isTruncStore())
+ PrintFatalError(
+ getOrigPatFragRecord()->getRecord()->getLoc(),
+ "IsTruncStore cannot be used with ImmLeaf or its subclasses");
+ if (getMemoryVT())
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "MemoryVT cannot be used with ImmLeaf or its subclasses");
+ if (getScalarMemoryVT())
+ PrintFatalError(
+ getOrigPatFragRecord()->getRecord()->getLoc(),
+ "ScalarMemoryVT cannot be used with ImmLeaf or its subclasses");
+
+ std::string Result = (" " + getImmType() + " Imm = ").str();
+ if (immCodeUsesAPFloat())
+ Result += "cast<ConstantFPSDNode>(Node)->getValueAPF();\n";
+ else if (immCodeUsesAPInt())
+ Result += "cast<ConstantSDNode>(Node)->getAPIntValue();\n";
+ else
+ Result += "cast<ConstantSDNode>(Node)->getSExtValue();\n";
+ return Result + ImmCode;
+ }
+
+ // Handle arbitrary node predicates.
+ assert(hasPredCode() && "Don't have any predicate code!");
+
+ // If this is using PatFrags, there are multiple trees to search. They should
+ // all have the same class. FIXME: Is there a way to find a common
+ // superclass?
+ StringRef ClassName;
+ for (const auto &Tree : PatFragRec->getTrees()) {
+ StringRef TreeClassName;
+ if (Tree->isLeaf())
+ TreeClassName = "SDNode";
+ else {
+ Record *Op = Tree->getOperator();
+ const SDNodeInfo &Info = PatFragRec->getDAGPatterns().getSDNodeInfo(Op);
+ TreeClassName = Info.getSDClassName();
+ }
+
+ if (ClassName.empty())
+ ClassName = TreeClassName;
+ else if (ClassName != TreeClassName) {
+ PrintFatalError(getOrigPatFragRecord()->getRecord()->getLoc(),
+ "PatFrags trees do not have consistent class");
+ }
+ }
+
+ std::string Result;
+ if (ClassName == "SDNode")
+ Result = " SDNode *N = Node;\n";
+ else
+ Result = " auto *N = cast<" + ClassName.str() + ">(Node);\n";
+
+ return (Twine(Result) + " (void)N;\n" + getPredCode()).str();
+}
+
+//===----------------------------------------------------------------------===//
+// PatternToMatch implementation
+//
+
+static bool isImmAllOnesAllZerosMatch(const TreePatternNode *P) {
+ if (!P->isLeaf())
+ return false;
+ DefInit *DI = dyn_cast<DefInit>(P->getLeafValue());
+ if (!DI)
+ return false;
+
+ Record *R = DI->getDef();
+ return R->getName() == "immAllOnesV" || R->getName() == "immAllZerosV";
+}
+
+/// getPatternSize - Return the 'size' of this pattern. We want to match large
+/// patterns before small ones. This is used to determine the size of a
+/// pattern.
+static unsigned getPatternSize(const TreePatternNode *P,
+ const CodeGenDAGPatterns &CGP) {
+ unsigned Size = 3; // The node itself.
+ // If the root node is a ConstantSDNode, increases its size.
+ // e.g. (set R32:$dst, 0).
+ if (P->isLeaf() && isa<IntInit>(P->getLeafValue()))
+ Size += 2;
+
+ if (const ComplexPattern *AM = P->getComplexPatternInfo(CGP)) {
+ Size += AM->getComplexity();
+ // We don't want to count any children twice, so return early.
+ return Size;
+ }
+
+ // If this node has some predicate function that must match, it adds to the
+ // complexity of this node.
+ if (!P->getPredicateCalls().empty())
+ ++Size;
+
+ // Count children in the count if they are also nodes.
+ for (unsigned i = 0, e = P->getNumChildren(); i != e; ++i) {
+ const TreePatternNode *Child = P->getChild(i);
+ if (!Child->isLeaf() && Child->getNumTypes()) {
+ const TypeSetByHwMode &T0 = Child->getExtType(0);
+ // At this point, all variable type sets should be simple, i.e. only
+ // have a default mode.
+ if (T0.getMachineValueType() != MVT::Other) {
+ Size += getPatternSize(Child, CGP);
+ continue;
+ }
+ }
+ if (Child->isLeaf()) {
+ if (isa<IntInit>(Child->getLeafValue()))
+ Size += 5; // Matches a ConstantSDNode (+3) and a specific value (+2).
+ else if (Child->getComplexPatternInfo(CGP))
+ Size += getPatternSize(Child, CGP);
+ else if (isImmAllOnesAllZerosMatch(Child))
+ Size += 4; // Matches a build_vector(+3) and a predicate (+1).
+ else if (!Child->getPredicateCalls().empty())
+ ++Size;
+ }
+ }
+
+ return Size;
+}
+
+/// Compute the complexity metric for the input pattern. This roughly
+/// corresponds to the number of nodes that are covered.
+int PatternToMatch::
+getPatternComplexity(const CodeGenDAGPatterns &CGP) const {
+ return getPatternSize(getSrcPattern(), CGP) + getAddedComplexity();
+}
+
+void PatternToMatch::getPredicateRecords(
+ SmallVectorImpl<Record *> &PredicateRecs) const {
+ for (Init *I : Predicates->getValues()) {
+ if (DefInit *Pred = dyn_cast<DefInit>(I)) {
+ Record *Def = Pred->getDef();
+ if (!Def->isSubClassOf("Predicate")) {
+#ifndef NDEBUG
+ Def->dump();
+#endif
+ llvm_unreachable("Unknown predicate type!");
+ }
+ PredicateRecs.push_back(Def);
+ }
+ }
+ // Sort so that different orders get canonicalized to the same string.
+ llvm::sort(PredicateRecs, LessRecord());
+}
+
+/// getPredicateCheck - Return a single string containing all of this
+/// pattern's predicates concatenated with "&&" operators.
+///
+std::string PatternToMatch::getPredicateCheck() const {
+ SmallVector<Record *, 4> PredicateRecs;
+ getPredicateRecords(PredicateRecs);
+
+ SmallString<128> PredicateCheck;
+ for (Record *Pred : PredicateRecs) {
+ StringRef CondString = Pred->getValueAsString("CondString");
+ if (CondString.empty())
+ continue;
+ if (!PredicateCheck.empty())
+ PredicateCheck += " && ";
+ PredicateCheck += "(";
+ PredicateCheck += CondString;
+ PredicateCheck += ")";
+ }
+
+ if (!HwModeFeatures.empty()) {
+ if (!PredicateCheck.empty())
+ PredicateCheck += " && ";
+ PredicateCheck += HwModeFeatures;
+ }
+
+ return std::string(PredicateCheck);
+}
+
+//===----------------------------------------------------------------------===//
+// SDTypeConstraint implementation
+//
+
+SDTypeConstraint::SDTypeConstraint(Record *R, const CodeGenHwModes &CGH) {
+ OperandNo = R->getValueAsInt("OperandNum");
+
+ if (R->isSubClassOf("SDTCisVT")) {
+ ConstraintType = SDTCisVT;
+ VVT = getValueTypeByHwMode(R->getValueAsDef("VT"), CGH);
+ for (const auto &P : VVT)
+ if (P.second == MVT::isVoid)
+ PrintFatalError(R->getLoc(), "Cannot use 'Void' as type to SDTCisVT");
+ } else if (R->isSubClassOf("SDTCisPtrTy")) {
+ ConstraintType = SDTCisPtrTy;
+ } else if (R->isSubClassOf("SDTCisInt")) {
+ ConstraintType = SDTCisInt;
+ } else if (R->isSubClassOf("SDTCisFP")) {
+ ConstraintType = SDTCisFP;
+ } else if (R->isSubClassOf("SDTCisVec")) {
+ ConstraintType = SDTCisVec;
+ } else if (R->isSubClassOf("SDTCisSameAs")) {
+ ConstraintType = SDTCisSameAs;
+ x.SDTCisSameAs_Info.OtherOperandNum = R->getValueAsInt("OtherOperandNum");
+ } else if (R->isSubClassOf("SDTCisVTSmallerThanOp")) {
+ ConstraintType = SDTCisVTSmallerThanOp;
+ x.SDTCisVTSmallerThanOp_Info.OtherOperandNum =
+ R->getValueAsInt("OtherOperandNum");
+ } else if (R->isSubClassOf("SDTCisOpSmallerThanOp")) {
+ ConstraintType = SDTCisOpSmallerThanOp;
+ x.SDTCisOpSmallerThanOp_Info.BigOperandNum =
+ R->getValueAsInt("BigOperandNum");
+ } else if (R->isSubClassOf("SDTCisEltOfVec")) {
+ ConstraintType = SDTCisEltOfVec;
+ x.SDTCisEltOfVec_Info.OtherOperandNum = R->getValueAsInt("OtherOpNum");
+ } else if (R->isSubClassOf("SDTCisSubVecOfVec")) {
+ ConstraintType = SDTCisSubVecOfVec;
+ x.SDTCisSubVecOfVec_Info.OtherOperandNum =
+ R->getValueAsInt("OtherOpNum");
+ } else if (R->isSubClassOf("SDTCVecEltisVT")) {
+ ConstraintType = SDTCVecEltisVT;
+ VVT = getValueTypeByHwMode(R->getValueAsDef("VT"), CGH);
+ for (const auto &P : VVT) {
+ MVT T = P.second;
+ if (T.isVector())
+ PrintFatalError(R->getLoc(),
+ "Cannot use vector type as SDTCVecEltisVT");
+ if (!T.isInteger() && !T.isFloatingPoint())
+ PrintFatalError(R->getLoc(), "Must use integer or floating point type "
+ "as SDTCVecEltisVT");
+ }
+ } else if (R->isSubClassOf("SDTCisSameNumEltsAs")) {
+ ConstraintType = SDTCisSameNumEltsAs;
+ x.SDTCisSameNumEltsAs_Info.OtherOperandNum =
+ R->getValueAsInt("OtherOperandNum");
+ } else if (R->isSubClassOf("SDTCisSameSizeAs")) {
+ ConstraintType = SDTCisSameSizeAs;
+ x.SDTCisSameSizeAs_Info.OtherOperandNum =
+ R->getValueAsInt("OtherOperandNum");
+ } else {
+ PrintFatalError(R->getLoc(),
+ "Unrecognized SDTypeConstraint '" + R->getName() + "'!\n");
+ }
+}
+
+/// getOperandNum - Return the node corresponding to operand #OpNo in tree
+/// N, and the result number in ResNo.
+static TreePatternNode *getOperandNum(unsigned OpNo, TreePatternNode *N,
+ const SDNodeInfo &NodeInfo,
+ unsigned &ResNo) {
+ unsigned NumResults = NodeInfo.getNumResults();
+ if (OpNo < NumResults) {
+ ResNo = OpNo;
+ return N;
+ }
+
+ OpNo -= NumResults;
+
+ if (OpNo >= N->getNumChildren()) {
+ std::string S;
+ raw_string_ostream OS(S);
+ OS << "Invalid operand number in type constraint "
+ << (OpNo+NumResults) << " ";
+ N->print(OS);
+ PrintFatalError(S);
+ }
+
+ return N->getChild(OpNo);
+}
+
+/// ApplyTypeConstraint - Given a node in a pattern, apply this type
+/// constraint to the nodes operands. This returns true if it makes a
+/// change, false otherwise. If a type contradiction is found, flag an error.
+bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N,
+ const SDNodeInfo &NodeInfo,
+ TreePattern &TP) const {
+ if (TP.hasError())
+ return false;
+
+ unsigned ResNo = 0; // The result number being referenced.
+ TreePatternNode *NodeToApply = getOperandNum(OperandNo, N, NodeInfo, ResNo);
+ TypeInfer &TI = TP.getInfer();
+
+ switch (ConstraintType) {
+ case SDTCisVT:
+ // Operand must be a particular type.
+ return NodeToApply->UpdateNodeType(ResNo, VVT, TP);
+ case SDTCisPtrTy:
+ // Operand must be same as target pointer type.
+ return NodeToApply->UpdateNodeType(ResNo, MVT::iPTR, TP);
+ case SDTCisInt:
+ // Require it to be one of the legal integer VTs.
+ return TI.EnforceInteger(NodeToApply->getExtType(ResNo));
+ case SDTCisFP:
+ // Require it to be one of the legal fp VTs.
+ return TI.EnforceFloatingPoint(NodeToApply->getExtType(ResNo));
+ case SDTCisVec:
+ // Require it to be one of the legal vector VTs.
+ return TI.EnforceVector(NodeToApply->getExtType(ResNo));
+ case SDTCisSameAs: {
+ unsigned OResNo = 0;
+ TreePatternNode *OtherNode =
+ getOperandNum(x.SDTCisSameAs_Info.OtherOperandNum, N, NodeInfo, OResNo);
+ return (int)NodeToApply->UpdateNodeType(ResNo,
+ OtherNode->getExtType(OResNo), TP) |
+ (int)OtherNode->UpdateNodeType(OResNo,
+ NodeToApply->getExtType(ResNo), TP);
+ }
+ case SDTCisVTSmallerThanOp: {
+ // The NodeToApply must be a leaf node that is a VT. OtherOperandNum must
+ // have an integer type that is smaller than the VT.
+ if (!NodeToApply->isLeaf() ||
+ !isa<DefInit>(NodeToApply->getLeafValue()) ||
+ !cast<DefInit>(NodeToApply->getLeafValue())->getDef()
+ ->isSubClassOf("ValueType")) {
+ TP.error(N->getOperator()->getName() + " expects a VT operand!");
+ return false;
+ }
+ DefInit *DI = cast<DefInit>(NodeToApply->getLeafValue());
+ const CodeGenTarget &T = TP.getDAGPatterns().getTargetInfo();
+ auto VVT = getValueTypeByHwMode(DI->getDef(), T.getHwModes());
+ TypeSetByHwMode TypeListTmp(VVT);
+
+ unsigned OResNo = 0;
+ TreePatternNode *OtherNode =
+ getOperandNum(x.SDTCisVTSmallerThanOp_Info.OtherOperandNum, N, NodeInfo,
+ OResNo);
+
+ return TI.EnforceSmallerThan(TypeListTmp, OtherNode->getExtType(OResNo),
+ /*SmallIsVT*/ true);
+ }
+ case SDTCisOpSmallerThanOp: {
+ unsigned BResNo = 0;
+ TreePatternNode *BigOperand =
+ getOperandNum(x.SDTCisOpSmallerThanOp_Info.BigOperandNum, N, NodeInfo,
+ BResNo);
+ return TI.EnforceSmallerThan(NodeToApply->getExtType(ResNo),
+ BigOperand->getExtType(BResNo));
+ }
+ case SDTCisEltOfVec: {
+ unsigned VResNo = 0;
+ TreePatternNode *VecOperand =
+ getOperandNum(x.SDTCisEltOfVec_Info.OtherOperandNum, N, NodeInfo,
+ VResNo);
+ // Filter vector types out of VecOperand that don't have the right element
+ // type.
+ return TI.EnforceVectorEltTypeIs(VecOperand->getExtType(VResNo),
+ NodeToApply->getExtType(ResNo));
+ }
+ case SDTCisSubVecOfVec: {
+ unsigned VResNo = 0;
+ TreePatternNode *BigVecOperand =
+ getOperandNum(x.SDTCisSubVecOfVec_Info.OtherOperandNum, N, NodeInfo,
+ VResNo);
+
+ // Filter vector types out of BigVecOperand that don't have the
+ // right subvector type.
+ return TI.EnforceVectorSubVectorTypeIs(BigVecOperand->getExtType(VResNo),
+ NodeToApply->getExtType(ResNo));
+ }
+ case SDTCVecEltisVT: {
+ return TI.EnforceVectorEltTypeIs(NodeToApply->getExtType(ResNo), VVT);
+ }
+ case SDTCisSameNumEltsAs: {
+ unsigned OResNo = 0;
+ TreePatternNode *OtherNode =
+ getOperandNum(x.SDTCisSameNumEltsAs_Info.OtherOperandNum,
+ N, NodeInfo, OResNo);
+ return TI.EnforceSameNumElts(OtherNode->getExtType(OResNo),
+ NodeToApply->getExtType(ResNo));
+ }
+ case SDTCisSameSizeAs: {
+ unsigned OResNo = 0;
+ TreePatternNode *OtherNode =
+ getOperandNum(x.SDTCisSameSizeAs_Info.OtherOperandNum,
+ N, NodeInfo, OResNo);
+ return TI.EnforceSameSize(OtherNode->getExtType(OResNo),
+ NodeToApply->getExtType(ResNo));
+ }
+ }
+ llvm_unreachable("Invalid ConstraintType!");
+}
+
+// Update the node type to match an instruction operand or result as specified
+// in the ins or outs lists on the instruction definition. Return true if the
+// type was actually changed.
+bool TreePatternNode::UpdateNodeTypeFromInst(unsigned ResNo,
+ Record *Operand,
+ TreePattern &TP) {
+ // The 'unknown' operand indicates that types should be inferred from the
+ // context.
+ if (Operand->isSubClassOf("unknown_class"))
+ return false;
+
+ // The Operand class specifies a type directly.
+ if (Operand->isSubClassOf("Operand")) {
+ Record *R = Operand->getValueAsDef("Type");
+ const CodeGenTarget &T = TP.getDAGPatterns().getTargetInfo();
+ return UpdateNodeType(ResNo, getValueTypeByHwMode(R, T.getHwModes()), TP);
+ }
+
+ // PointerLikeRegClass has a type that is determined at runtime.
+ if (Operand->isSubClassOf("PointerLikeRegClass"))
+ return UpdateNodeType(ResNo, MVT::iPTR, TP);
+
+ // Both RegisterClass and RegisterOperand operands derive their types from a
+ // register class def.
+ Record *RC = nullptr;
+ if (Operand->isSubClassOf("RegisterClass"))
+ RC = Operand;
+ else if (Operand->isSubClassOf("RegisterOperand"))
+ RC = Operand->getValueAsDef("RegClass");
+
+ assert(RC && "Unknown operand type");
+ CodeGenTarget &Tgt = TP.getDAGPatterns().getTargetInfo();
+ return UpdateNodeType(ResNo, Tgt.getRegisterClass(RC).getValueTypes(), TP);
+}
+
+bool TreePatternNode::ContainsUnresolvedType(TreePattern &TP) const {
+ for (unsigned i = 0, e = Types.size(); i != e; ++i)
+ if (!TP.getInfer().isConcrete(Types[i], true))
+ return true;
+ for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
+ if (getChild(i)->ContainsUnresolvedType(TP))
+ return true;
+ return false;
+}
+
+bool TreePatternNode::hasProperTypeByHwMode() const {
+ for (const TypeSetByHwMode &S : Types)
+ if (!S.isDefaultOnly())
+ return true;
+ for (const TreePatternNodePtr &C : Children)
+ if (C->hasProperTypeByHwMode())
+ return true;
+ return false;
+}
+
+bool TreePatternNode::hasPossibleType() const {
+ for (const TypeSetByHwMode &S : Types)
+ if (!S.isPossible())
+ return false;
+ for (const TreePatternNodePtr &C : Children)
+ if (!C->hasPossibleType())
+ return false;
+ return true;
+}
+
+bool TreePatternNode::setDefaultMode(unsigned Mode) {
+ for (TypeSetByHwMode &S : Types) {
+ S.makeSimple(Mode);
+ // Check if the selected mode had a type conflict.
+ if (S.get(DefaultMode).empty())
+ return false;
+ }
+ for (const TreePatternNodePtr &C : Children)
+ if (!C->setDefaultMode(Mode))
+ return false;
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// SDNodeInfo implementation
+//
+SDNodeInfo::SDNodeInfo(Record *R, const CodeGenHwModes &CGH) : Def(R) {
+ EnumName = R->getValueAsString("Opcode");
+ SDClassName = R->getValueAsString("SDClass");
+ Record *TypeProfile = R->getValueAsDef("TypeProfile");
+ NumResults = TypeProfile->getValueAsInt("NumResults");
+ NumOperands = TypeProfile->getValueAsInt("NumOperands");
+
+ // Parse the properties.
+ Properties = parseSDPatternOperatorProperties(R);
+
+ // Parse the type constraints.
+ std::vector<Record*> ConstraintList =
+ TypeProfile->getValueAsListOfDefs("Constraints");
+ for (Record *R : ConstraintList)
+ TypeConstraints.emplace_back(R, CGH);
+}
+
+/// getKnownType - If the type constraints on this node imply a fixed type
+/// (e.g. all stores return void, etc), then return it as an
+/// MVT::SimpleValueType. Otherwise, return EEVT::Other.
+MVT::SimpleValueType SDNodeInfo::getKnownType(unsigned ResNo) const {
+ unsigned NumResults = getNumResults();
+ assert(NumResults <= 1 &&
+ "We only work with nodes with zero or one result so far!");
+ assert(ResNo == 0 && "Only handles single result nodes so far");
+
+ for (const SDTypeConstraint &Constraint : TypeConstraints) {
+ // Make sure that this applies to the correct node result.
+ if (Constraint.OperandNo >= NumResults) // FIXME: need value #
+ continue;
+
+ switch (Constraint.ConstraintType) {
+ default: break;
+ case SDTypeConstraint::SDTCisVT:
+ if (Constraint.VVT.isSimple())
+ return Constraint.VVT.getSimple().SimpleTy;
+ break;
+ case SDTypeConstraint::SDTCisPtrTy:
+ return MVT::iPTR;
+ }
+ }
+ return MVT::Other;
+}
+
+//===----------------------------------------------------------------------===//
+// TreePatternNode implementation
+//
+
+static unsigned GetNumNodeResults(Record *Operator, CodeGenDAGPatterns &CDP) {
+ if (Operator->getName() == "set" ||
+ Operator->getName() == "implicit")
+ return 0; // All return nothing.
+
+ if (Operator->isSubClassOf("Intrinsic"))
+ return CDP.getIntrinsic(Operator).IS.RetVTs.size();
+
+ if (Operator->isSubClassOf("SDNode"))
+ return CDP.getSDNodeInfo(Operator).getNumResults();
+
+ if (Operator->isSubClassOf("PatFrags")) {
+ // If we've already parsed this pattern fragment, get it. Otherwise, handle
+ // the forward reference case where one pattern fragment references another
+ // before it is processed.
+ if (TreePattern *PFRec = CDP.getPatternFragmentIfRead(Operator)) {
+ // The number of results of a fragment with alternative records is the
+ // maximum number of results across all alternatives.
+ unsigned NumResults = 0;
+ for (const auto &T : PFRec->getTrees())
+ NumResults = std::max(NumResults, T->getNumTypes());
+ return NumResults;
+ }
+
+ ListInit *LI = Operator->getValueAsListInit("Fragments");
+ assert(LI && "Invalid Fragment");
+ unsigned NumResults = 0;
+ for (Init *I : LI->getValues()) {
+ Record *Op = nullptr;
+ if (DagInit *Dag = dyn_cast<DagInit>(I))
+ if (DefInit *DI = dyn_cast<DefInit>(Dag->getOperator()))
+ Op = DI->getDef();
+ assert(Op && "Invalid Fragment");
+ NumResults = std::max(NumResults, GetNumNodeResults(Op, CDP));
+ }
+ return NumResults;
+ }
+
+ if (Operator->isSubClassOf("Instruction")) {
+ CodeGenInstruction &InstInfo = CDP.getTargetInfo().getInstruction(Operator);
+
+ unsigned NumDefsToAdd = InstInfo.Operands.NumDefs;
+
+ // Subtract any defaulted outputs.
+ for (unsigned i = 0; i != InstInfo.Operands.NumDefs; ++i) {
+ Record *OperandNode = InstInfo.Operands[i].Rec;
+
+ if (OperandNode->isSubClassOf("OperandWithDefaultOps") &&
+ !CDP.getDefaultOperand(OperandNode).DefaultOps.empty())
+ --NumDefsToAdd;
+ }
+
+ // Add on one implicit def if it has a resolvable type.
+ if (InstInfo.HasOneImplicitDefWithKnownVT(CDP.getTargetInfo()) !=MVT::Other)
+ ++NumDefsToAdd;
+ return NumDefsToAdd;
+ }
+
+ if (Operator->isSubClassOf("SDNodeXForm"))
+ return 1; // FIXME: Generalize SDNodeXForm
+
+ if (Operator->isSubClassOf("ValueType"))
+ return 1; // A type-cast of one result.
+
+ if (Operator->isSubClassOf("ComplexPattern"))
+ return 1;
+
+ errs() << *Operator;
+ PrintFatalError("Unhandled node in GetNumNodeResults");
+}
+
+void TreePatternNode::print(raw_ostream &OS) const {
+ if (isLeaf())
+ OS << *getLeafValue();
+ else
+ OS << '(' << getOperator()->getName();
+
+ for (unsigned i = 0, e = Types.size(); i != e; ++i) {
+ OS << ':';
+ getExtType(i).writeToStream(OS);
+ }
+
+ if (!isLeaf()) {
+ if (getNumChildren() != 0) {
+ OS << " ";
+ ListSeparator LS;
+ for (unsigned i = 0, e = getNumChildren(); i != e; ++i) {
+ OS << LS;
+ getChild(i)->print(OS);
+ }
+ }
+ OS << ")";
+ }
+
+ for (const TreePredicateCall &Pred : PredicateCalls) {
+ OS << "<<P:";
+ if (Pred.Scope)
+ OS << Pred.Scope << ":";
+ OS << Pred.Fn.getFnName() << ">>";
+ }
+ if (TransformFn)
+ OS << "<<X:" << TransformFn->getName() << ">>";
+ if (!getName().empty())
+ OS << ":$" << getName();
+
+ for (const ScopedName &Name : NamesAsPredicateArg)
+ OS << ":$pred:" << Name.getScope() << ":" << Name.getIdentifier();
+}
+void TreePatternNode::dump() const {
+ print(errs());
+}
+
+/// isIsomorphicTo - Return true if this node is recursively
+/// isomorphic to the specified node. For this comparison, the node's
+/// entire state is considered. The assigned name is ignored, since
+/// nodes with differing names are considered isomorphic. However, if
+/// the assigned name is present in the dependent variable set, then
+/// the assigned name is considered significant and the node is
+/// isomorphic if the names match.
+bool TreePatternNode::isIsomorphicTo(const TreePatternNode *N,
+ const MultipleUseVarSet &DepVars) const {
+ if (N == this) return true;
+ if (N->isLeaf() != isLeaf() || getExtTypes() != N->getExtTypes() ||
+ getPredicateCalls() != N->getPredicateCalls() ||
+ getTransformFn() != N->getTransformFn())
+ return false;
+
+ if (isLeaf()) {
+ if (DefInit *DI = dyn_cast<DefInit>(getLeafValue())) {
+ if (DefInit *NDI = dyn_cast<DefInit>(N->getLeafValue())) {
+ return ((DI->getDef() == NDI->getDef())
+ && (DepVars.find(getName()) == DepVars.end()
+ || getName() == N->getName()));
+ }
+ }
+ return getLeafValue() == N->getLeafValue();
+ }
+
+ if (N->getOperator() != getOperator() ||
+ N->getNumChildren() != getNumChildren()) return false;
+ for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
+ if (!getChild(i)->isIsomorphicTo(N->getChild(i), DepVars))
+ return false;
+ return true;
+}
+
+/// clone - Make a copy of this tree and all of its children.
+///
+TreePatternNodePtr TreePatternNode::clone() const {
+ TreePatternNodePtr New;
+ if (isLeaf()) {
+ New = std::make_shared<TreePatternNode>(getLeafValue(), getNumTypes());
+ } else {
+ std::vector<TreePatternNodePtr> CChildren;
+ CChildren.reserve(Children.size());
+ for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
+ CChildren.push_back(getChild(i)->clone());
+ New = std::make_shared<TreePatternNode>(getOperator(), std::move(CChildren),
+ getNumTypes());
+ }
+ New->setName(getName());
+ New->setNamesAsPredicateArg(getNamesAsPredicateArg());
+ New->Types = Types;
+ New->setPredicateCalls(getPredicateCalls());
+ New->setTransformFn(getTransformFn());
+ return New;
+}
+
+/// RemoveAllTypes - Recursively strip all the types of this tree.
+void TreePatternNode::RemoveAllTypes() {
+ // Reset to unknown type.
+ std::fill(Types.begin(), Types.end(), TypeSetByHwMode());
+ if (isLeaf()) return;
+ for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
+ getChild(i)->RemoveAllTypes();
+}
+
+
+/// SubstituteFormalArguments - Replace the formal arguments in this tree
+/// with actual values specified by ArgMap.
+void TreePatternNode::SubstituteFormalArguments(
+ std::map<std::string, TreePatternNodePtr> &ArgMap) {
+ if (isLeaf()) return;
+
+ for (unsigned i = 0, e = getNumChildren(); i != e; ++i) {
+ TreePatternNode *Child = getChild(i);
+ if (Child->isLeaf()) {
+ Init *Val = Child->getLeafValue();
+ // Note that, when substituting into an output pattern, Val might be an
+ // UnsetInit.
+ if (isa<UnsetInit>(Val) || (isa<DefInit>(Val) &&
+ cast<DefInit>(Val)->getDef()->getName() == "node")) {
+ // We found a use of a formal argument, replace it with its value.
+ TreePatternNodePtr NewChild = ArgMap[Child->getName()];
+ assert(NewChild && "Couldn't find formal argument!");
+ assert((Child->getPredicateCalls().empty() ||
+ NewChild->getPredicateCalls() == Child->getPredicateCalls()) &&
+ "Non-empty child predicate clobbered!");
+ setChild(i, std::move(NewChild));
+ }
+ } else {
+ getChild(i)->SubstituteFormalArguments(ArgMap);
+ }
+ }
+}
+
+
+/// InlinePatternFragments - If this pattern refers to any pattern
+/// fragments, return the set of inlined versions (this can be more than
+/// one if a PatFrags record has multiple alternatives).
+void TreePatternNode::InlinePatternFragments(
+ TreePatternNodePtr T, TreePattern &TP,
+ std::vector<TreePatternNodePtr> &OutAlternatives) {
+
+ if (TP.hasError())
+ return;
+
+ if (isLeaf()) {
+ OutAlternatives.push_back(T); // nothing to do.
+ return;
+ }
+
+ Record *Op = getOperator();
+
+ if (!Op->isSubClassOf("PatFrags")) {
+ if (getNumChildren() == 0) {
+ OutAlternatives.push_back(T);
+ return;
+ }
+
+ // Recursively inline children nodes.
+ std::vector<std::vector<TreePatternNodePtr> > ChildAlternatives;
+ ChildAlternatives.resize(getNumChildren());
+ for (unsigned i = 0, e = getNumChildren(); i != e; ++i) {
+ TreePatternNodePtr Child = getChildShared(i);
+ Child->InlinePatternFragments(Child, TP, ChildAlternatives[i]);
+ // If there are no alternatives for any child, there are no
+ // alternatives for this expression as whole.
+ if (ChildAlternatives[i].empty())
+ return;
+
+ assert((Child->getPredicateCalls().empty() ||
+ llvm::all_of(ChildAlternatives[i],
+ [&](const TreePatternNodePtr &NewChild) {
+ return NewChild->getPredicateCalls() ==
+ Child->getPredicateCalls();
+ })) &&
+ "Non-empty child predicate clobbered!");
+ }
+
+ // The end result is an all-pairs construction of the resultant pattern.
+ std::vector<unsigned> Idxs;
+ Idxs.resize(ChildAlternatives.size());
+ bool NotDone;
+ do {
+ // Create the variant and add it to the output list.
+ std::vector<TreePatternNodePtr> NewChildren;
+ for (unsigned i = 0, e = ChildAlternatives.size(); i != e; ++i)
+ NewChildren.push_back(ChildAlternatives[i][Idxs[i]]);
+ TreePatternNodePtr R = std::make_shared<TreePatternNode>(
+ getOperator(), std::move(NewChildren), getNumTypes());
+
+ // Copy over properties.
+ R->setName(getName());
+ R->setNamesAsPredicateArg(getNamesAsPredicateArg());
+ R->setPredicateCalls(getPredicateCalls());
+ R->setTransformFn(getTransformFn());
+ for (unsigned i = 0, e = getNumTypes(); i != e; ++i)
+ R->setType(i, getExtType(i));
+ for (unsigned i = 0, e = getNumResults(); i != e; ++i)
+ R->setResultIndex(i, getResultIndex(i));
+
+ // Register alternative.
+ OutAlternatives.push_back(R);
+
+ // Increment indices to the next permutation by incrementing the
+ // indices from last index backward, e.g., generate the sequence
+ // [0, 0], [0, 1], [1, 0], [1, 1].
+ int IdxsIdx;
+ for (IdxsIdx = Idxs.size() - 1; IdxsIdx >= 0; --IdxsIdx) {
+ if (++Idxs[IdxsIdx] == ChildAlternatives[IdxsIdx].size())
+ Idxs[IdxsIdx] = 0;
+ else
+ break;
+ }
+ NotDone = (IdxsIdx >= 0);
+ } while (NotDone);
+
+ return;
+ }
+
+ // Otherwise, we found a reference to a fragment. First, look up its
+ // TreePattern record.
+ TreePattern *Frag = TP.getDAGPatterns().getPatternFragment(Op);
+
+ // Verify that we are passing the right number of operands.
+ if (Frag->getNumArgs() != Children.size()) {
+ TP.error("'" + Op->getName() + "' fragment requires " +
+ Twine(Frag->getNumArgs()) + " operands!");
+ return;
+ }
+
+ TreePredicateFn PredFn(Frag);
+ unsigned Scope = 0;
+ if (TreePredicateFn(Frag).usesOperands())
+ Scope = TP.getDAGPatterns().allocateScope();
+
+ // Compute the map of formal to actual arguments.
+ std::map<std::string, TreePatternNodePtr> ArgMap;
+ for (unsigned i = 0, e = Frag->getNumArgs(); i != e; ++i) {
+ TreePatternNodePtr Child = getChildShared(i);
+ if (Scope != 0) {
+ Child = Child->clone();
+ Child->addNameAsPredicateArg(ScopedName(Scope, Frag->getArgName(i)));
+ }
+ ArgMap[Frag->getArgName(i)] = Child;
+ }
+
+ // Loop over all fragment alternatives.
+ for (const auto &Alternative : Frag->getTrees()) {
+ TreePatternNodePtr FragTree = Alternative->clone();
+
+ if (!PredFn.isAlwaysTrue())
+ FragTree->addPredicateCall(PredFn, Scope);
+
+ // Resolve formal arguments to their actual value.
+ if (Frag->getNumArgs())
+ FragTree->SubstituteFormalArguments(ArgMap);
+
+ // Transfer types. Note that the resolved alternative may have fewer
+ // (but not more) results than the PatFrags node.
+ FragTree->setName(getName());
+ for (unsigned i = 0, e = FragTree->getNumTypes(); i != e; ++i)
+ FragTree->UpdateNodeType(i, getExtType(i), TP);
+
+ // Transfer in the old predicates.
+ for (const TreePredicateCall &Pred : getPredicateCalls())
+ FragTree->addPredicateCall(Pred);
+
+ // The fragment we inlined could have recursive inlining that is needed. See
+ // if there are any pattern fragments in it and inline them as needed.
+ FragTree->InlinePatternFragments(FragTree, TP, OutAlternatives);
+ }
+}
+
+/// getImplicitType - Check to see if the specified record has an implicit
+/// type which should be applied to it. This will infer the type of register
+/// references from the register file information, for example.
+///
+/// When Unnamed is set, return the type of a DAG operand with no name, such as
+/// the F8RC register class argument in:
+///
+/// (COPY_TO_REGCLASS GPR:$src, F8RC)
+///
+/// When Unnamed is false, return the type of a named DAG operand such as the
+/// GPR:$src operand above.
+///
+static TypeSetByHwMode getImplicitType(Record *R, unsigned ResNo,
+ bool NotRegisters,
+ bool Unnamed,
+ TreePattern &TP) {
+ CodeGenDAGPatterns &CDP = TP.getDAGPatterns();
+
+ // Check to see if this is a register operand.
+ if (R->isSubClassOf("RegisterOperand")) {
+ assert(ResNo == 0 && "Regoperand ref only has one result!");
+ if (NotRegisters)
+ return TypeSetByHwMode(); // Unknown.
+ Record *RegClass = R->getValueAsDef("RegClass");
+ const CodeGenTarget &T = TP.getDAGPatterns().getTargetInfo();
+ return TypeSetByHwMode(T.getRegisterClass(RegClass).getValueTypes());
+ }
+
+ // Check to see if this is a register or a register class.
+ if (R->isSubClassOf("RegisterClass")) {
+ assert(ResNo == 0 && "Regclass ref only has one result!");
+ // An unnamed register class represents itself as an i32 immediate, for
+ // example on a COPY_TO_REGCLASS instruction.
+ if (Unnamed)
+ return TypeSetByHwMode(MVT::i32);
+
+ // In a named operand, the register class provides the possible set of
+ // types.
+ if (NotRegisters)
+ return TypeSetByHwMode(); // Unknown.
+ const CodeGenTarget &T = TP.getDAGPatterns().getTargetInfo();
+ return TypeSetByHwMode(T.getRegisterClass(R).getValueTypes());
+ }
+
+ if (R->isSubClassOf("PatFrags")) {
+ assert(ResNo == 0 && "FIXME: PatFrag with multiple results?");
+ // Pattern fragment types will be resolved when they are inlined.
+ return TypeSetByHwMode(); // Unknown.
+ }
+
+ if (R->isSubClassOf("Register")) {
+ assert(ResNo == 0 && "Registers only produce one result!");
+ if (NotRegisters)
+ return TypeSetByHwMode(); // Unknown.
+ const CodeGenTarget &T = TP.getDAGPatterns().getTargetInfo();
+ return TypeSetByHwMode(T.getRegisterVTs(R));
+ }
+
+ if (R->isSubClassOf("SubRegIndex")) {
+ assert(ResNo == 0 && "SubRegisterIndices only produce one result!");
+ return TypeSetByHwMode(MVT::i32);
+ }
+
+ if (R->isSubClassOf("ValueType")) {
+ assert(ResNo == 0 && "This node only has one result!");
+ // An unnamed VTSDNode represents itself as an MVT::Other immediate.
+ //
+ // (sext_inreg GPR:$src, i16)
+ // ~~~
+ if (Unnamed)
+ return TypeSetByHwMode(MVT::Other);
+ // With a name, the ValueType simply provides the type of the named
+ // variable.
+ //
+ // (sext_inreg i32:$src, i16)
+ // ~~~~~~~~
+ if (NotRegisters)
+ return TypeSetByHwMode(); // Unknown.
+ const CodeGenHwModes &CGH = CDP.getTargetInfo().getHwModes();
+ return TypeSetByHwMode(getValueTypeByHwMode(R, CGH));
+ }
+
+ if (R->isSubClassOf("CondCode")) {
+ assert(ResNo == 0 && "This node only has one result!");
+ // Using a CondCodeSDNode.
+ return TypeSetByHwMode(MVT::Other);
+ }
+
+ if (R->isSubClassOf("ComplexPattern")) {
+ assert(ResNo == 0 && "FIXME: ComplexPattern with multiple results?");
+ if (NotRegisters)
+ return TypeSetByHwMode(); // Unknown.
+ Record *T = CDP.getComplexPattern(R).getValueType();
+ const CodeGenHwModes &CGH = CDP.getTargetInfo().getHwModes();
+ return TypeSetByHwMode(getValueTypeByHwMode(T, CGH));
+ }
+ if (R->isSubClassOf("PointerLikeRegClass")) {
+ assert(ResNo == 0 && "Regclass can only have one result!");
+ TypeSetByHwMode VTS(MVT::iPTR);
+ TP.getInfer().expandOverloads(VTS);
+ return VTS;
+ }
+
+ if (R->getName() == "node" || R->getName() == "srcvalue" ||
+ R->getName() == "zero_reg" || R->getName() == "immAllOnesV" ||
+ R->getName() == "immAllZerosV" || R->getName() == "undef_tied_input") {
+ // Placeholder.
+ return TypeSetByHwMode(); // Unknown.
+ }
+
+ if (R->isSubClassOf("Operand")) {
+ const CodeGenHwModes &CGH = CDP.getTargetInfo().getHwModes();
+ Record *T = R->getValueAsDef("Type");
+ return TypeSetByHwMode(getValueTypeByHwMode(T, CGH));
+ }
+
+ TP.error("Unknown node flavor used in pattern: " + R->getName());
+ return TypeSetByHwMode(MVT::Other);
+}
+
+
+/// getIntrinsicInfo - If this node corresponds to an intrinsic, return the
+/// CodeGenIntrinsic information for it, otherwise return a null pointer.
+const CodeGenIntrinsic *TreePatternNode::
+getIntrinsicInfo(const CodeGenDAGPatterns &CDP) const {
+ if (getOperator() != CDP.get_intrinsic_void_sdnode() &&
+ getOperator() != CDP.get_intrinsic_w_chain_sdnode() &&
+ getOperator() != CDP.get_intrinsic_wo_chain_sdnode())
+ return nullptr;
+
+ unsigned IID = cast<IntInit>(getChild(0)->getLeafValue())->getValue();
+ return &CDP.getIntrinsicInfo(IID);
+}
+
+/// getComplexPatternInfo - If this node corresponds to a ComplexPattern,
+/// return the ComplexPattern information, otherwise return null.
+const ComplexPattern *
+TreePatternNode::getComplexPatternInfo(const CodeGenDAGPatterns &CGP) const {
+ Record *Rec;
+ if (isLeaf()) {
+ DefInit *DI = dyn_cast<DefInit>(getLeafValue());
+ if (!DI)
+ return nullptr;
+ Rec = DI->getDef();
+ } else
+ Rec = getOperator();
+
+ if (!Rec->isSubClassOf("ComplexPattern"))
+ return nullptr;
+ return &CGP.getComplexPattern(Rec);
+}
+
+unsigned TreePatternNode::getNumMIResults(const CodeGenDAGPatterns &CGP) const {
+ // A ComplexPattern specifically declares how many results it fills in.
+ if (const ComplexPattern *CP = getComplexPatternInfo(CGP))
+ return CP->getNumOperands();
+
+ // If MIOperandInfo is specified, that gives the count.
+ if (isLeaf()) {
+ DefInit *DI = dyn_cast<DefInit>(getLeafValue());
+ if (DI && DI->getDef()->isSubClassOf("Operand")) {
+ DagInit *MIOps = DI->getDef()->getValueAsDag("MIOperandInfo");
+ if (MIOps->getNumArgs())
+ return MIOps->getNumArgs();
+ }
+ }
+
+ // Otherwise there is just one result.
+ return 1;
+}
+
+/// NodeHasProperty - Return true if this node has the specified property.
+bool TreePatternNode::NodeHasProperty(SDNP Property,
+ const CodeGenDAGPatterns &CGP) const {
+ if (isLeaf()) {
+ if (const ComplexPattern *CP = getComplexPatternInfo(CGP))
+ return CP->hasProperty(Property);
+
+ return false;
+ }
+
+ if (Property != SDNPHasChain) {
+ // The chain proprety is already present on the different intrinsic node
+ // types (intrinsic_w_chain, intrinsic_void), and is not explicitly listed
+ // on the intrinsic. Anything else is specific to the individual intrinsic.
+ if (const CodeGenIntrinsic *Int = getIntrinsicInfo(CGP))
+ return Int->hasProperty(Property);
+ }
+
+ if (!Operator->isSubClassOf("SDPatternOperator"))
+ return false;
+
+ return CGP.getSDNodeInfo(Operator).hasProperty(Property);
+}
+
+
+
+
+/// TreeHasProperty - Return true if any node in this tree has the specified
+/// property.
+bool TreePatternNode::TreeHasProperty(SDNP Property,
+ const CodeGenDAGPatterns &CGP) const {
+ if (NodeHasProperty(Property, CGP))
+ return true;
+ for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
+ if (getChild(i)->TreeHasProperty(Property, CGP))
+ return true;
+ return false;
+}
+
+/// isCommutativeIntrinsic - Return true if the node corresponds to a
+/// commutative intrinsic.
+bool
+TreePatternNode::isCommutativeIntrinsic(const CodeGenDAGPatterns &CDP) const {
+ if (const CodeGenIntrinsic *Int = getIntrinsicInfo(CDP))
+ return Int->isCommutative;
+ return false;
+}
+
+static bool isOperandClass(const TreePatternNode *N, StringRef Class) {
+ if (!N->isLeaf())
+ return N->getOperator()->isSubClassOf(Class);
+
+ DefInit *DI = dyn_cast<DefInit>(N->getLeafValue());
+ if (DI && DI->getDef()->isSubClassOf(Class))
+ return true;
+
+ return false;
+}
+
+static void emitTooManyOperandsError(TreePattern &TP,
+ StringRef InstName,
+ unsigned Expected,
+ unsigned Actual) {
+ TP.error("Instruction '" + InstName + "' was provided " + Twine(Actual) +
+ " operands but expected only " + Twine(Expected) + "!");
+}
+
+static void emitTooFewOperandsError(TreePattern &TP,
+ StringRef InstName,
+ unsigned Actual) {
+ TP.error("Instruction '" + InstName +
+ "' expects more than the provided " + Twine(Actual) + " operands!");
+}
+
+/// ApplyTypeConstraints - Apply all of the type constraints relevant to
+/// this node and its children in the tree. This returns true if it makes a
+/// change, false otherwise. If a type contradiction is found, flag an error.
+bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
+ if (TP.hasError())
+ return false;
+
+ CodeGenDAGPatterns &CDP = TP.getDAGPatterns();
+ if (isLeaf()) {
+ if (DefInit *DI = dyn_cast<DefInit>(getLeafValue())) {
+ // If it's a regclass or something else known, include the type.
+ bool MadeChange = false;
+ for (unsigned i = 0, e = Types.size(); i != e; ++i)
+ MadeChange |= UpdateNodeType(i, getImplicitType(DI->getDef(), i,
+ NotRegisters,
+ !hasName(), TP), TP);
+ return MadeChange;
+ }
+
+ if (IntInit *II = dyn_cast<IntInit>(getLeafValue())) {
+ assert(Types.size() == 1 && "Invalid IntInit");
+
+ // Int inits are always integers. :)
+ bool MadeChange = TP.getInfer().EnforceInteger(Types[0]);
+
+ if (!TP.getInfer().isConcrete(Types[0], false))
+ return MadeChange;
+
+ ValueTypeByHwMode VVT = TP.getInfer().getConcrete(Types[0], false);
+ for (auto &P : VVT) {
+ MVT::SimpleValueType VT = P.second.SimpleTy;
+ if (VT == MVT::iPTR || VT == MVT::iPTRAny)
+ continue;
+ unsigned Size = MVT(VT).getFixedSizeInBits();
+ // Make sure that the value is representable for this type.
+ if (Size >= 32)
+ continue;
+ // Check that the value doesn't use more bits than we have. It must
+ // either be a sign- or zero-extended equivalent of the original.
+ int64_t SignBitAndAbove = II->getValue() >> (Size - 1);
+ if (SignBitAndAbove == -1 || SignBitAndAbove == 0 ||
+ SignBitAndAbove == 1)
+ continue;
+
+ TP.error("Integer value '" + Twine(II->getValue()) +
+ "' is out of range for type '" + getEnumName(VT) + "'!");
+ break;
+ }
+ return MadeChange;
+ }
+
+ return false;
+ }
+
+ if (const CodeGenIntrinsic *Int = getIntrinsicInfo(CDP)) {
+ bool MadeChange = false;
+
+ // Apply the result type to the node.
+ unsigned NumRetVTs = Int->IS.RetVTs.size();
+ unsigned NumParamVTs = Int->IS.ParamVTs.size();
+
+ for (unsigned i = 0, e = NumRetVTs; i != e; ++i)
+ MadeChange |= UpdateNodeType(i, Int->IS.RetVTs[i], TP);
+
+ if (getNumChildren() != NumParamVTs + 1) {
+ TP.error("Intrinsic '" + Int->Name + "' expects " + Twine(NumParamVTs) +
+ " operands, not " + Twine(getNumChildren() - 1) + " operands!");
+ return false;
+ }
+
+ // Apply type info to the intrinsic ID.
+ MadeChange |= getChild(0)->UpdateNodeType(0, MVT::iPTR, TP);
+
+ for (unsigned i = 0, e = getNumChildren()-1; i != e; ++i) {
+ MadeChange |= getChild(i+1)->ApplyTypeConstraints(TP, NotRegisters);
+
+ MVT::SimpleValueType OpVT = Int->IS.ParamVTs[i];
+ assert(getChild(i+1)->getNumTypes() == 1 && "Unhandled case");
+ MadeChange |= getChild(i+1)->UpdateNodeType(0, OpVT, TP);
+ }
+ return MadeChange;
+ }
+
+ if (getOperator()->isSubClassOf("SDNode")) {
+ const SDNodeInfo &NI = CDP.getSDNodeInfo(getOperator());
+
+ // Check that the number of operands is sane. Negative operands -> varargs.
+ if (NI.getNumOperands() >= 0 &&
+ getNumChildren() != (unsigned)NI.getNumOperands()) {
+ TP.error(getOperator()->getName() + " node requires exactly " +
+ Twine(NI.getNumOperands()) + " operands!");
+ return false;
+ }
+
+ bool MadeChange = false;
+ for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
+ MadeChange |= getChild(i)->ApplyTypeConstraints(TP, NotRegisters);
+ MadeChange |= NI.ApplyTypeConstraints(this, TP);
+ return MadeChange;
+ }
+
+ if (getOperator()->isSubClassOf("Instruction")) {
+ const DAGInstruction &Inst = CDP.getInstruction(getOperator());
+ CodeGenInstruction &InstInfo =
+ CDP.getTargetInfo().getInstruction(getOperator());
+
+ bool MadeChange = false;
+
+ // Apply the result types to the node, these come from the things in the
+ // (outs) list of the instruction.
+ unsigned NumResultsToAdd = std::min(InstInfo.Operands.NumDefs,
+ Inst.getNumResults());
+ for (unsigned ResNo = 0; ResNo != NumResultsToAdd; ++ResNo)
+ MadeChange |= UpdateNodeTypeFromInst(ResNo, Inst.getResult(ResNo), TP);
+
+ // If the instruction has implicit defs, we apply the first one as a result.
+ // FIXME: This sucks, it should apply all implicit defs.
+ if (!InstInfo.ImplicitDefs.empty()) {
+ unsigned ResNo = NumResultsToAdd;
+
+ // FIXME: Generalize to multiple possible types and multiple possible
+ // ImplicitDefs.
+ MVT::SimpleValueType VT =
+ InstInfo.HasOneImplicitDefWithKnownVT(CDP.getTargetInfo());
+
+ if (VT != MVT::Other)
+ MadeChange |= UpdateNodeType(ResNo, VT, TP);
+ }
+
+ // If this is an INSERT_SUBREG, constrain the source and destination VTs to
+ // be the same.
+ if (getOperator()->getName() == "INSERT_SUBREG") {
+ assert(getChild(0)->getNumTypes() == 1 && "FIXME: Unhandled");
+ MadeChange |= UpdateNodeType(0, getChild(0)->getExtType(0), TP);
+ MadeChange |= getChild(0)->UpdateNodeType(0, getExtType(0), TP);
+ } else if (getOperator()->getName() == "REG_SEQUENCE") {
+ // We need to do extra, custom typechecking for REG_SEQUENCE since it is
+ // variadic.
+
+ unsigned NChild = getNumChildren();
+ if (NChild < 3) {
+ TP.error("REG_SEQUENCE requires at least 3 operands!");
+ return false;
+ }
+
+ if (NChild % 2 == 0) {
+ TP.error("REG_SEQUENCE requires an odd number of operands!");
+ return false;
+ }
+
+ if (!isOperandClass(getChild(0), "RegisterClass")) {
+ TP.error("REG_SEQUENCE requires a RegisterClass for first operand!");
+ return false;
+ }
+
+ for (unsigned I = 1; I < NChild; I += 2) {
+ TreePatternNode *SubIdxChild = getChild(I + 1);
+ if (!isOperandClass(SubIdxChild, "SubRegIndex")) {
+ TP.error("REG_SEQUENCE requires a SubRegIndex for operand " +
+ Twine(I + 1) + "!");
+ return false;
+ }
+ }
+ }
+
+ unsigned NumResults = Inst.getNumResults();
+ unsigned NumFixedOperands = InstInfo.Operands.size();
+
+ // If one or more operands with a default value appear at the end of the
+ // formal operand list for an instruction, we allow them to be overridden
+ // by optional operands provided in the pattern.
+ //
+ // But if an operand B without a default appears at any point after an
+ // operand A with a default, then we don't allow A to be overridden,
+ // because there would be no way to specify whether the next operand in
+ // the pattern was intended to override A or skip it.
+ unsigned NonOverridableOperands = NumFixedOperands;
+ while (NonOverridableOperands > NumResults &&
+ CDP.operandHasDefault(InstInfo.Operands[NonOverridableOperands-1].Rec))
+ --NonOverridableOperands;
+
+ unsigned ChildNo = 0;
+ assert(NumResults <= NumFixedOperands);
+ for (unsigned i = NumResults, e = NumFixedOperands; i != e; ++i) {
+ Record *OperandNode = InstInfo.Operands[i].Rec;
+
+ // If the operand has a default value, do we use it? We must use the
+ // default if we've run out of children of the pattern DAG to consume,
+ // or if the operand is followed by a non-defaulted one.
+ if (CDP.operandHasDefault(OperandNode) &&
+ (i < NonOverridableOperands || ChildNo >= getNumChildren()))
+ continue;
+
+ // If we have run out of child nodes and there _isn't_ a default
+ // value we can use for the next operand, give an error.
+ if (ChildNo >= getNumChildren()) {
+ emitTooFewOperandsError(TP, getOperator()->getName(), getNumChildren());
+ return false;
+ }
+
+ TreePatternNode *Child = getChild(ChildNo++);
+ unsigned ChildResNo = 0; // Instructions always use res #0 of their op.
+
+ // If the operand has sub-operands, they may be provided by distinct
+ // child patterns, so attempt to match each sub-operand separately.
+ if (OperandNode->isSubClassOf("Operand")) {
+ DagInit *MIOpInfo = OperandNode->getValueAsDag("MIOperandInfo");
+ if (unsigned NumArgs = MIOpInfo->getNumArgs()) {
+ // But don't do that if the whole operand is being provided by
+ // a single ComplexPattern-related Operand.
+
+ if (Child->getNumMIResults(CDP) < NumArgs) {
+ // Match first sub-operand against the child we already have.
+ Record *SubRec = cast<DefInit>(MIOpInfo->getArg(0))->getDef();
+ MadeChange |=
+ Child->UpdateNodeTypeFromInst(ChildResNo, SubRec, TP);
+
+ // And the remaining sub-operands against subsequent children.
+ for (unsigned Arg = 1; Arg < NumArgs; ++Arg) {
+ if (ChildNo >= getNumChildren()) {
+ emitTooFewOperandsError(TP, getOperator()->getName(),
+ getNumChildren());
+ return false;
+ }
+ Child = getChild(ChildNo++);
+
+ SubRec = cast<DefInit>(MIOpInfo->getArg(Arg))->getDef();
+ MadeChange |=
+ Child->UpdateNodeTypeFromInst(ChildResNo, SubRec, TP);
+ }
+ continue;
+ }
+ }
+ }
+
+ // If we didn't match by pieces above, attempt to match the whole
+ // operand now.
+ MadeChange |= Child->UpdateNodeTypeFromInst(ChildResNo, OperandNode, TP);
+ }
+
+ if (!InstInfo.Operands.isVariadic && ChildNo != getNumChildren()) {
+ emitTooManyOperandsError(TP, getOperator()->getName(),
+ ChildNo, getNumChildren());
+ return false;
+ }
+
+ for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
+ MadeChange |= getChild(i)->ApplyTypeConstraints(TP, NotRegisters);
+ return MadeChange;
+ }
+
+ if (getOperator()->isSubClassOf("ComplexPattern")) {
+ bool MadeChange = false;
+
+ if (!NotRegisters) {
+ assert(Types.size() == 1 && "ComplexPatterns only produce one result!");
+ Record *T = CDP.getComplexPattern(getOperator()).getValueType();
+ const CodeGenHwModes &CGH = CDP.getTargetInfo().getHwModes();
+ const ValueTypeByHwMode VVT = getValueTypeByHwMode(T, CGH);
+ // TODO: AArch64 and AMDGPU use ComplexPattern<untyped, ...> and then
+ // exclusively use those as non-leaf nodes with explicit type casts, so
+ // for backwards compatibility we do no inference in that case. This is
+ // not supported when the ComplexPattern is used as a leaf value,
+ // however; this inconsistency should be resolved, either by adding this
+ // case there or by altering the backends to not do this (e.g. using Any
+ // instead may work).
+ if (!VVT.isSimple() || VVT.getSimple() != MVT::Untyped)
+ MadeChange |= UpdateNodeType(0, VVT, TP);
+ }
+
+ for (unsigned i = 0; i < getNumChildren(); ++i)
+ MadeChange |= getChild(i)->ApplyTypeConstraints(TP, NotRegisters);
+
+ return MadeChange;
+ }
+
+ assert(getOperator()->isSubClassOf("SDNodeXForm") && "Unknown node type!");
+
+ // Node transforms always take one operand.
+ if (getNumChildren() != 1) {
+ TP.error("Node transform '" + getOperator()->getName() +
+ "' requires one operand!");
+ return false;
+ }
+
+ bool MadeChange = getChild(0)->ApplyTypeConstraints(TP, NotRegisters);
+ return MadeChange;
+}
+
+/// OnlyOnRHSOfCommutative - Return true if this value is only allowed on the
+/// RHS of a commutative operation, not the on LHS.
+static bool OnlyOnRHSOfCommutative(TreePatternNode *N) {
+ if (!N->isLeaf() && N->getOperator()->getName() == "imm")
+ return true;
+ if (N->isLeaf() && isa<IntInit>(N->getLeafValue()))
+ return true;
+ if (isImmAllOnesAllZerosMatch(N))
+ return true;
+ return false;
+}
+
+
+/// canPatternMatch - If it is impossible for this pattern to match on this
+/// target, fill in Reason and return false. Otherwise, return true. This is
+/// used as a sanity check for .td files (to prevent people from writing stuff
+/// that can never possibly work), and to prevent the pattern permuter from
+/// generating stuff that is useless.
+bool TreePatternNode::canPatternMatch(std::string &Reason,
+ const CodeGenDAGPatterns &CDP) {
+ if (isLeaf()) return true;
+
+ for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
+ if (!getChild(i)->canPatternMatch(Reason, CDP))
+ return false;
+
+ // If this is an intrinsic, handle cases that would make it not match. For
+ // example, if an operand is required to be an immediate.
+ if (getOperator()->isSubClassOf("Intrinsic")) {
+ // TODO:
+ return true;
+ }
+
+ if (getOperator()->isSubClassOf("ComplexPattern"))
+ return true;
+
+ // If this node is a commutative operator, check that the LHS isn't an
+ // immediate.
+ const SDNodeInfo &NodeInfo = CDP.getSDNodeInfo(getOperator());
+ bool isCommIntrinsic = isCommutativeIntrinsic(CDP);
+ if (NodeInfo.hasProperty(SDNPCommutative) || isCommIntrinsic) {
+ // Scan all of the operands of the node and make sure that only the last one
+ // is a constant node, unless the RHS also is.
+ if (!OnlyOnRHSOfCommutative(getChild(getNumChildren()-1))) {
+ unsigned Skip = isCommIntrinsic ? 1 : 0; // First operand is intrinsic id.
+ for (unsigned i = Skip, e = getNumChildren()-1; i != e; ++i)
+ if (OnlyOnRHSOfCommutative(getChild(i))) {
+ Reason="Immediate value must be on the RHS of commutative operators!";
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// TreePattern implementation
+//
+
+TreePattern::TreePattern(Record *TheRec, ListInit *RawPat, bool isInput,
+ CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp),
+ isInputPattern(isInput), HasError(false),
+ Infer(*this) {
+ for (Init *I : RawPat->getValues())
+ Trees.push_back(ParseTreePattern(I, ""));
+}
+
+TreePattern::TreePattern(Record *TheRec, DagInit *Pat, bool isInput,
+ CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp),
+ isInputPattern(isInput), HasError(false),
+ Infer(*this) {
+ Trees.push_back(ParseTreePattern(Pat, ""));
+}
+
+TreePattern::TreePattern(Record *TheRec, TreePatternNodePtr Pat, bool isInput,
+ CodeGenDAGPatterns &cdp)
+ : TheRecord(TheRec), CDP(cdp), isInputPattern(isInput), HasError(false),
+ Infer(*this) {
+ Trees.push_back(Pat);
+}
+
+void TreePattern::error(const Twine &Msg) {
+ if (HasError)
+ return;
+ dump();
+ PrintError(TheRecord->getLoc(), "In " + TheRecord->getName() + ": " + Msg);
+ HasError = true;
+}
+
+void TreePattern::ComputeNamedNodes() {
+ for (TreePatternNodePtr &Tree : Trees)
+ ComputeNamedNodes(Tree.get());
+}
+
+void TreePattern::ComputeNamedNodes(TreePatternNode *N) {
+ if (!N->getName().empty())
+ NamedNodes[N->getName()].push_back(N);
+
+ for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i)
+ ComputeNamedNodes(N->getChild(i));
+}
+
+TreePatternNodePtr TreePattern::ParseTreePattern(Init *TheInit,
+ StringRef OpName) {
+ RecordKeeper &RK = TheInit->getRecordKeeper();
+ if (DefInit *DI = dyn_cast<DefInit>(TheInit)) {
+ Record *R = DI->getDef();
+
+ // Direct reference to a leaf DagNode or PatFrag? Turn it into a
+ // TreePatternNode of its own. For example:
+ /// (foo GPR, imm) -> (foo GPR, (imm))
+ if (R->isSubClassOf("SDNode") || R->isSubClassOf("PatFrags"))
+ return ParseTreePattern(
+ DagInit::get(DI, nullptr,
+ std::vector<std::pair<Init*, StringInit*> >()),
+ OpName);
+
+ // Input argument?
+ TreePatternNodePtr Res = std::make_shared<TreePatternNode>(DI, 1);
+ if (R->getName() == "node" && !OpName.empty()) {
+ if (OpName.empty())
+ error("'node' argument requires a name to match with operand list");
+ Args.push_back(std::string(OpName));
+ }
+
+ Res->setName(OpName);
+ return Res;
+ }
+
+ // ?:$name or just $name.
+ if (isa<UnsetInit>(TheInit)) {
+ if (OpName.empty())
+ error("'?' argument requires a name to match with operand list");
+ TreePatternNodePtr Res = std::make_shared<TreePatternNode>(TheInit, 1);
+ Args.push_back(std::string(OpName));
+ Res->setName(OpName);
+ return Res;
+ }
+
+ if (isa<IntInit>(TheInit) || isa<BitInit>(TheInit)) {
+ if (!OpName.empty())
+ error("Constant int or bit argument should not have a name!");
+ if (isa<BitInit>(TheInit))
+ TheInit = TheInit->convertInitializerTo(IntRecTy::get(RK));
+ return std::make_shared<TreePatternNode>(TheInit, 1);
+ }
+
+ if (BitsInit *BI = dyn_cast<BitsInit>(TheInit)) {
+ // Turn this into an IntInit.
+ Init *II = BI->convertInitializerTo(IntRecTy::get(RK));
+ if (!II || !isa<IntInit>(II))
+ error("Bits value must be constants!");
+ return ParseTreePattern(II, OpName);
+ }
+
+ DagInit *Dag = dyn_cast<DagInit>(TheInit);
+ if (!Dag) {
+ TheInit->print(errs());
+ error("Pattern has unexpected init kind!");
+ }
+ DefInit *OpDef = dyn_cast<DefInit>(Dag->getOperator());
+ if (!OpDef) error("Pattern has unexpected operator type!");
+ Record *Operator = OpDef->getDef();
+
+ if (Operator->isSubClassOf("ValueType")) {
+ // If the operator is a ValueType, then this must be "type cast" of a leaf
+ // node.
+ if (Dag->getNumArgs() != 1)
+ error("Type cast only takes one operand!");
+
+ TreePatternNodePtr New =
+ ParseTreePattern(Dag->getArg(0), Dag->getArgNameStr(0));
+
+ // Apply the type cast.
+ if (New->getNumTypes() != 1)
+ error("Type cast can only have one type!");
+ const CodeGenHwModes &CGH = getDAGPatterns().getTargetInfo().getHwModes();
+ New->UpdateNodeType(0, getValueTypeByHwMode(Operator, CGH), *this);
+
+ if (!OpName.empty())
+ error("ValueType cast should not have a name!");
+ return New;
+ }
+
+ // Verify that this is something that makes sense for an operator.
+ if (!Operator->isSubClassOf("PatFrags") &&
+ !Operator->isSubClassOf("SDNode") &&
+ !Operator->isSubClassOf("Instruction") &&
+ !Operator->isSubClassOf("SDNodeXForm") &&
+ !Operator->isSubClassOf("Intrinsic") &&
+ !Operator->isSubClassOf("ComplexPattern") &&
+ Operator->getName() != "set" &&
+ Operator->getName() != "implicit")
+ error("Unrecognized node '" + Operator->getName() + "'!");
+
+ // Check to see if this is something that is illegal in an input pattern.
+ if (isInputPattern) {
+ if (Operator->isSubClassOf("Instruction") ||
+ Operator->isSubClassOf("SDNodeXForm"))
+ error("Cannot use '" + Operator->getName() + "' in an input pattern!");
+ } else {
+ if (Operator->isSubClassOf("Intrinsic"))
+ error("Cannot use '" + Operator->getName() + "' in an output pattern!");
+
+ if (Operator->isSubClassOf("SDNode") &&
+ Operator->getName() != "imm" &&
+ Operator->getName() != "timm" &&
+ Operator->getName() != "fpimm" &&
+ Operator->getName() != "tglobaltlsaddr" &&
+ Operator->getName() != "tconstpool" &&
+ Operator->getName() != "tjumptable" &&
+ Operator->getName() != "tframeindex" &&
+ Operator->getName() != "texternalsym" &&
+ Operator->getName() != "tblockaddress" &&
+ Operator->getName() != "tglobaladdr" &&
+ Operator->getName() != "bb" &&
+ Operator->getName() != "vt" &&
+ Operator->getName() != "mcsym")
+ error("Cannot use '" + Operator->getName() + "' in an output pattern!");
+ }
+
+ std::vector<TreePatternNodePtr> Children;
+
+ // Parse all the operands.
+ for (unsigned i = 0, e = Dag->getNumArgs(); i != e; ++i)
+ Children.push_back(ParseTreePattern(Dag->getArg(i), Dag->getArgNameStr(i)));
+
+ // Get the actual number of results before Operator is converted to an intrinsic
+ // node (which is hard-coded to have either zero or one result).
+ unsigned NumResults = GetNumNodeResults(Operator, CDP);
+
+ // If the operator is an intrinsic, then this is just syntactic sugar for
+ // (intrinsic_* <number>, ..children..). Pick the right intrinsic node, and
+ // convert the intrinsic name to a number.
+ if (Operator->isSubClassOf("Intrinsic")) {
+ const CodeGenIntrinsic &Int = getDAGPatterns().getIntrinsic(Operator);
+ unsigned IID = getDAGPatterns().getIntrinsicID(Operator)+1;
+
+ // If this intrinsic returns void, it must have side-effects and thus a
+ // chain.
+ if (Int.IS.RetVTs.empty())
+ Operator = getDAGPatterns().get_intrinsic_void_sdnode();
+ else if (!Int.ME.doesNotAccessMemory() || Int.hasSideEffects)
+ // Has side-effects, requires chain.
+ Operator = getDAGPatterns().get_intrinsic_w_chain_sdnode();
+ else // Otherwise, no chain.
+ Operator = getDAGPatterns().get_intrinsic_wo_chain_sdnode();
+
+ Children.insert(Children.begin(), std::make_shared<TreePatternNode>(
+ IntInit::get(RK, IID), 1));
+ }
+
+ if (Operator->isSubClassOf("ComplexPattern")) {
+ for (unsigned i = 0; i < Children.size(); ++i) {
+ TreePatternNodePtr Child = Children[i];
+
+ if (Child->getName().empty())
+ error("All arguments to a ComplexPattern must be named");
+
+ // Check that the ComplexPattern uses are consistent: "(MY_PAT $a, $b)"
+ // and "(MY_PAT $b, $a)" should not be allowed in the same pattern;
+ // neither should "(MY_PAT_1 $a, $b)" and "(MY_PAT_2 $a, $b)".
+ auto OperandId = std::make_pair(Operator, i);
+ auto PrevOp = ComplexPatternOperands.find(Child->getName());
+ if (PrevOp != ComplexPatternOperands.end()) {
+ if (PrevOp->getValue() != OperandId)
+ error("All ComplexPattern operands must appear consistently: "
+ "in the same order in just one ComplexPattern instance.");
+ } else
+ ComplexPatternOperands[Child->getName()] = OperandId;
+ }
+ }
+
+ TreePatternNodePtr Result =
+ std::make_shared<TreePatternNode>(Operator, std::move(Children),
+ NumResults);
+ Result->setName(OpName);
+
+ if (Dag->getName()) {
+ assert(Result->getName().empty());
+ Result->setName(Dag->getNameStr());
+ }
+ return Result;
+}
+
+/// SimplifyTree - See if we can simplify this tree to eliminate something that
+/// will never match in favor of something obvious that will. This is here
+/// strictly as a convenience to target authors because it allows them to write
+/// more type generic things and have useless type casts fold away.
+///
+/// This returns true if any change is made.
+static bool SimplifyTree(TreePatternNodePtr &N) {
+ if (N->isLeaf())
+ return false;
+
+ // If we have a bitconvert with a resolved type and if the source and
+ // destination types are the same, then the bitconvert is useless, remove it.
+ //
+ // We make an exception if the types are completely empty. This can come up
+ // when the pattern being simplified is in the Fragments list of a PatFrags,
+ // so that the operand is just an untyped "node". In that situation we leave
+ // bitconverts unsimplified, and simplify them later once the fragment is
+ // expanded into its true context.
+ if (N->getOperator()->getName() == "bitconvert" &&
+ N->getExtType(0).isValueTypeByHwMode(false) &&
+ !N->getExtType(0).empty() &&
+ N->getExtType(0) == N->getChild(0)->getExtType(0) &&
+ N->getName().empty()) {
+ N = N->getChildShared(0);
+ SimplifyTree(N);
+ return true;
+ }
+
+ // Walk all children.
+ bool MadeChange = false;
+ for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i) {
+ TreePatternNodePtr Child = N->getChildShared(i);
+ MadeChange |= SimplifyTree(Child);
+ N->setChild(i, std::move(Child));
+ }
+ return MadeChange;
+}
+
+
+
+/// InferAllTypes - Infer/propagate as many types throughout the expression
+/// patterns as possible. Return true if all types are inferred, false
+/// otherwise. Flags an error if a type contradiction is found.
+bool TreePattern::
+InferAllTypes(const StringMap<SmallVector<TreePatternNode*,1> > *InNamedTypes) {
+ if (NamedNodes.empty())
+ ComputeNamedNodes();
+
+ bool MadeChange = true;
+ while (MadeChange) {
+ MadeChange = false;
+ for (TreePatternNodePtr &Tree : Trees) {
+ MadeChange |= Tree->ApplyTypeConstraints(*this, false);
+ MadeChange |= SimplifyTree(Tree);
+ }
+
+ // If there are constraints on our named nodes, apply them.
+ for (auto &Entry : NamedNodes) {
+ SmallVectorImpl<TreePatternNode*> &Nodes = Entry.second;
+
+ // If we have input named node types, propagate their types to the named
+ // values here.
+ if (InNamedTypes) {
+ if (!InNamedTypes->count(Entry.getKey())) {
+ error("Node '" + std::string(Entry.getKey()) +
+ "' in output pattern but not input pattern");
+ return true;
+ }
+
+ const SmallVectorImpl<TreePatternNode*> &InNodes =
+ InNamedTypes->find(Entry.getKey())->second;
+
+ // The input types should be fully resolved by now.
+ for (TreePatternNode *Node : Nodes) {
+ // If this node is a register class, and it is the root of the pattern
+ // then we're mapping something onto an input register. We allow
+ // changing the type of the input register in this case. This allows
+ // us to match things like:
+ // def : Pat<(v1i64 (bitconvert(v2i32 DPR:$src))), (v1i64 DPR:$src)>;
+ if (Node == Trees[0].get() && Node->isLeaf()) {
+ DefInit *DI = dyn_cast<DefInit>(Node->getLeafValue());
+ if (DI && (DI->getDef()->isSubClassOf("RegisterClass") ||
+ DI->getDef()->isSubClassOf("RegisterOperand")))
+ continue;
+ }
+
+ assert(Node->getNumTypes() == 1 &&
+ InNodes[0]->getNumTypes() == 1 &&
+ "FIXME: cannot name multiple result nodes yet");
+ MadeChange |= Node->UpdateNodeType(0, InNodes[0]->getExtType(0),
+ *this);
+ }
+ }
+
+ // If there are multiple nodes with the same name, they must all have the
+ // same type.
+ if (Entry.second.size() > 1) {
+ for (unsigned i = 0, e = Nodes.size()-1; i != e; ++i) {
+ TreePatternNode *N1 = Nodes[i], *N2 = Nodes[i+1];
+ assert(N1->getNumTypes() == 1 && N2->getNumTypes() == 1 &&
+ "FIXME: cannot name multiple result nodes yet");
+
+ MadeChange |= N1->UpdateNodeType(0, N2->getExtType(0), *this);
+ MadeChange |= N2->UpdateNodeType(0, N1->getExtType(0), *this);
+ }
+ }
+ }
+ }
+
+ bool HasUnresolvedTypes = false;
+ for (const TreePatternNodePtr &Tree : Trees)
+ HasUnresolvedTypes |= Tree->ContainsUnresolvedType(*this);
+ return !HasUnresolvedTypes;
+}
+
+void TreePattern::print(raw_ostream &OS) const {
+ OS << getRecord()->getName();
+ if (!Args.empty()) {
+ OS << "(";
+ ListSeparator LS;
+ for (const std::string &Arg : Args)
+ OS << LS << Arg;
+ OS << ")";
+ }
+ OS << ": ";
+
+ if (Trees.size() > 1)
+ OS << "[\n";
+ for (const TreePatternNodePtr &Tree : Trees) {
+ OS << "\t";
+ Tree->print(OS);
+ OS << "\n";
+ }
+
+ if (Trees.size() > 1)
+ OS << "]\n";
+}
+
+void TreePattern::dump() const { print(errs()); }
+
+//===----------------------------------------------------------------------===//
+// CodeGenDAGPatterns implementation
+//
+
+CodeGenDAGPatterns::CodeGenDAGPatterns(RecordKeeper &R,
+ PatternRewriterFn PatternRewriter)
+ : Records(R), Target(R), LegalVTS(Target.getLegalValueTypes()),
+ PatternRewriter(PatternRewriter) {
+
+ Intrinsics = CodeGenIntrinsicTable(Records);
+ ParseNodeInfo();
+ ParseNodeTransforms();
+ ParseComplexPatterns();
+ ParsePatternFragments();
+ ParseDefaultOperands();
+ ParseInstructions();
+ ParsePatternFragments(/*OutFrags*/true);
+ ParsePatterns();
+
+ // Generate variants. For example, commutative patterns can match
+ // multiple ways. Add them to PatternsToMatch as well.
+ GenerateVariants();
+
+ // Break patterns with parameterized types into a series of patterns,
+ // where each one has a fixed type and is predicated on the conditions
+ // of the associated HW mode.
+ ExpandHwModeBasedTypes();
+
+ // Infer instruction flags. For example, we can detect loads,
+ // stores, and side effects in many cases by examining an
+ // instruction's pattern.
+ InferInstructionFlags();
+
+ // Verify that instruction flags match the patterns.
+ VerifyInstructionFlags();
+}
+
+Record *CodeGenDAGPatterns::getSDNodeNamed(StringRef Name) const {
+ Record *N = Records.getDef(Name);
+ if (!N || !N->isSubClassOf("SDNode"))
+ PrintFatalError("Error getting SDNode '" + Name + "'!");
+
+ return N;
+}
+
+// Parse all of the SDNode definitions for the target, populating SDNodes.
+void CodeGenDAGPatterns::ParseNodeInfo() {
+ std::vector<Record*> Nodes = Records.getAllDerivedDefinitions("SDNode");
+ const CodeGenHwModes &CGH = getTargetInfo().getHwModes();
+
+ while (!Nodes.empty()) {
+ Record *R = Nodes.back();
+ SDNodes.insert(std::make_pair(R, SDNodeInfo(R, CGH)));
+ Nodes.pop_back();
+ }
+
+ // Get the builtin intrinsic nodes.
+ intrinsic_void_sdnode = getSDNodeNamed("intrinsic_void");
+ intrinsic_w_chain_sdnode = getSDNodeNamed("intrinsic_w_chain");
+ intrinsic_wo_chain_sdnode = getSDNodeNamed("intrinsic_wo_chain");
+}
+
+/// ParseNodeTransforms - Parse all SDNodeXForm instances into the SDNodeXForms
+/// map, and emit them to the file as functions.
+void CodeGenDAGPatterns::ParseNodeTransforms() {
+ std::vector<Record*> Xforms = Records.getAllDerivedDefinitions("SDNodeXForm");
+ while (!Xforms.empty()) {
+ Record *XFormNode = Xforms.back();
+ Record *SDNode = XFormNode->getValueAsDef("Opcode");
+ StringRef Code = XFormNode->getValueAsString("XFormFunction");
+ SDNodeXForms.insert(
+ std::make_pair(XFormNode, NodeXForm(SDNode, std::string(Code))));
+
+ Xforms.pop_back();
+ }
+}
+
+void CodeGenDAGPatterns::ParseComplexPatterns() {
+ std::vector<Record*> AMs = Records.getAllDerivedDefinitions("ComplexPattern");
+ while (!AMs.empty()) {
+ ComplexPatterns.insert(std::make_pair(AMs.back(), AMs.back()));
+ AMs.pop_back();
+ }
+}
+
+
+/// ParsePatternFragments - Parse all of the PatFrag definitions in the .td
+/// file, building up the PatternFragments map. After we've collected them all,
+/// inline fragments together as necessary, so that there are no references left
+/// inside a pattern fragment to a pattern fragment.
+///
+void CodeGenDAGPatterns::ParsePatternFragments(bool OutFrags) {
+ std::vector<Record*> Fragments = Records.getAllDerivedDefinitions("PatFrags");
+
+ // First step, parse all of the fragments.
+ for (Record *Frag : Fragments) {
+ if (OutFrags != Frag->isSubClassOf("OutPatFrag"))
+ continue;
+
+ ListInit *LI = Frag->getValueAsListInit("Fragments");
+ TreePattern *P =
+ (PatternFragments[Frag] = std::make_unique<TreePattern>(
+ Frag, LI, !Frag->isSubClassOf("OutPatFrag"),
+ *this)).get();
+
+ // Validate the argument list, converting it to set, to discard duplicates.
+ std::vector<std::string> &Args = P->getArgList();
+ // Copy the args so we can take StringRefs to them.
+ auto ArgsCopy = Args;
+ SmallDenseSet<StringRef, 4> OperandsSet;
+ OperandsSet.insert(ArgsCopy.begin(), ArgsCopy.end());
+
+ if (OperandsSet.count(""))
+ P->error("Cannot have unnamed 'node' values in pattern fragment!");
+
+ // Parse the operands list.
+ DagInit *OpsList = Frag->getValueAsDag("Operands");
+ DefInit *OpsOp = dyn_cast<DefInit>(OpsList->getOperator());
+ // Special cases: ops == outs == ins. Different names are used to
+ // improve readability.
+ if (!OpsOp ||
+ (OpsOp->getDef()->getName() != "ops" &&
+ OpsOp->getDef()->getName() != "outs" &&
+ OpsOp->getDef()->getName() != "ins"))
+ P->error("Operands list should start with '(ops ... '!");
+
+ // Copy over the arguments.
+ Args.clear();
+ for (unsigned j = 0, e = OpsList->getNumArgs(); j != e; ++j) {
+ if (!isa<DefInit>(OpsList->getArg(j)) ||
+ cast<DefInit>(OpsList->getArg(j))->getDef()->getName() != "node")
+ P->error("Operands list should all be 'node' values.");
+ if (!OpsList->getArgName(j))
+ P->error("Operands list should have names for each operand!");
+ StringRef ArgNameStr = OpsList->getArgNameStr(j);
+ if (!OperandsSet.count(ArgNameStr))
+ P->error("'" + ArgNameStr +
+ "' does not occur in pattern or was multiply specified!");
+ OperandsSet.erase(ArgNameStr);
+ Args.push_back(std::string(ArgNameStr));
+ }
+
+ if (!OperandsSet.empty())
+ P->error("Operands list does not contain an entry for operand '" +
+ *OperandsSet.begin() + "'!");
+
+ // If there is a node transformation corresponding to this, keep track of
+ // it.
+ Record *Transform = Frag->getValueAsDef("OperandTransform");
+ if (!getSDNodeTransform(Transform).second.empty()) // not noop xform?
+ for (const auto &T : P->getTrees())
+ T->setTransformFn(Transform);
+ }
+
+ // Now that we've parsed all of the tree fragments, do a closure on them so
+ // that there are not references to PatFrags left inside of them.
+ for (Record *Frag : Fragments) {
+ if (OutFrags != Frag->isSubClassOf("OutPatFrag"))
+ continue;
+
+ TreePattern &ThePat = *PatternFragments[Frag];
+ ThePat.InlinePatternFragments();
+
+ // Infer as many types as possible. Don't worry about it if we don't infer
+ // all of them, some may depend on the inputs of the pattern. Also, don't
+ // validate type sets; validation may cause spurious failures e.g. if a
+ // fragment needs floating-point types but the current target does not have
+ // any (this is only an error if that fragment is ever used!).
+ {
+ TypeInfer::SuppressValidation SV(ThePat.getInfer());
+ ThePat.InferAllTypes();
+ ThePat.resetError();
+ }
+
+ // If debugging, print out the pattern fragment result.
+ LLVM_DEBUG(ThePat.dump());
+ }
+}
+
+void CodeGenDAGPatterns::ParseDefaultOperands() {
+ std::vector<Record*> DefaultOps;
+ DefaultOps = Records.getAllDerivedDefinitions("OperandWithDefaultOps");
+
+ // Find some SDNode.
+ assert(!SDNodes.empty() && "No SDNodes parsed?");
+ Init *SomeSDNode = DefInit::get(SDNodes.begin()->first);
+
+ for (unsigned i = 0, e = DefaultOps.size(); i != e; ++i) {
+ DagInit *DefaultInfo = DefaultOps[i]->getValueAsDag("DefaultOps");
+
+ // Clone the DefaultInfo dag node, changing the operator from 'ops' to
+ // SomeSDnode so that we can parse this.
+ std::vector<std::pair<Init*, StringInit*> > Ops;
+ for (unsigned op = 0, e = DefaultInfo->getNumArgs(); op != e; ++op)
+ Ops.push_back(std::make_pair(DefaultInfo->getArg(op),
+ DefaultInfo->getArgName(op)));
+ DagInit *DI = DagInit::get(SomeSDNode, nullptr, Ops);
+
+ // Create a TreePattern to parse this.
+ TreePattern P(DefaultOps[i], DI, false, *this);
+ assert(P.getNumTrees() == 1 && "This ctor can only produce one tree!");
+
+ // Copy the operands over into a DAGDefaultOperand.
+ DAGDefaultOperand DefaultOpInfo;
+
+ const TreePatternNodePtr &T = P.getTree(0);
+ for (unsigned op = 0, e = T->getNumChildren(); op != e; ++op) {
+ TreePatternNodePtr TPN = T->getChildShared(op);
+ while (TPN->ApplyTypeConstraints(P, false))
+ /* Resolve all types */;
+
+ if (TPN->ContainsUnresolvedType(P)) {
+ PrintFatalError("Value #" + Twine(i) + " of OperandWithDefaultOps '" +
+ DefaultOps[i]->getName() +
+ "' doesn't have a concrete type!");
+ }
+ DefaultOpInfo.DefaultOps.push_back(std::move(TPN));
+ }
+
+ // Insert it into the DefaultOperands map so we can find it later.
+ DefaultOperands[DefaultOps[i]] = DefaultOpInfo;
+ }
+}
+
+/// HandleUse - Given "Pat" a leaf in the pattern, check to see if it is an
+/// instruction input. Return true if this is a real use.
+static bool HandleUse(TreePattern &I, TreePatternNodePtr Pat,
+ std::map<std::string, TreePatternNodePtr> &InstInputs) {
+ // No name -> not interesting.
+ if (Pat->getName().empty()) {
+ if (Pat->isLeaf()) {
+ DefInit *DI = dyn_cast<DefInit>(Pat->getLeafValue());
+ if (DI && (DI->getDef()->isSubClassOf("RegisterClass") ||
+ DI->getDef()->isSubClassOf("RegisterOperand")))
+ I.error("Input " + DI->getDef()->getName() + " must be named!");
+ }
+ return false;
+ }
+
+ Record *Rec;
+ if (Pat->isLeaf()) {
+ DefInit *DI = dyn_cast<DefInit>(Pat->getLeafValue());
+ if (!DI)
+ I.error("Input $" + Pat->getName() + " must be an identifier!");
+ Rec = DI->getDef();
+ } else {
+ Rec = Pat->getOperator();
+ }
+
+ // SRCVALUE nodes are ignored.
+ if (Rec->getName() == "srcvalue")
+ return false;
+
+ TreePatternNodePtr &Slot = InstInputs[Pat->getName()];
+ if (!Slot) {
+ Slot = Pat;
+ return true;
+ }
+ Record *SlotRec;
+ if (Slot->isLeaf()) {
+ SlotRec = cast<DefInit>(Slot->getLeafValue())->getDef();
+ } else {
+ assert(Slot->getNumChildren() == 0 && "can't be a use with children!");
+ SlotRec = Slot->getOperator();
+ }
+
+ // Ensure that the inputs agree if we've already seen this input.
+ if (Rec != SlotRec)
+ I.error("All $" + Pat->getName() + " inputs must agree with each other");
+ // Ensure that the types can agree as well.
+ Slot->UpdateNodeType(0, Pat->getExtType(0), I);
+ Pat->UpdateNodeType(0, Slot->getExtType(0), I);
+ if (Slot->getExtTypes() != Pat->getExtTypes())
+ I.error("All $" + Pat->getName() + " inputs must agree with each other");
+ return true;
+}
+
+/// FindPatternInputsAndOutputs - Scan the specified TreePatternNode (which is
+/// part of "I", the instruction), computing the set of inputs and outputs of
+/// the pattern. Report errors if we see anything naughty.
+void CodeGenDAGPatterns::FindPatternInputsAndOutputs(
+ TreePattern &I, TreePatternNodePtr Pat,
+ std::map<std::string, TreePatternNodePtr> &InstInputs,
+ MapVector<std::string, TreePatternNodePtr, std::map<std::string, unsigned>>
+ &InstResults,
+ std::vector<Record *> &InstImpResults) {
+
+ // The instruction pattern still has unresolved fragments. For *named*
+ // nodes we must resolve those here. This may not result in multiple
+ // alternatives.
+ if (!Pat->getName().empty()) {
+ TreePattern SrcPattern(I.getRecord(), Pat, true, *this);
+ SrcPattern.InlinePatternFragments();
+ SrcPattern.InferAllTypes();
+ Pat = SrcPattern.getOnlyTree();
+ }
+
+ if (Pat->isLeaf()) {
+ bool isUse = HandleUse(I, Pat, InstInputs);
+ if (!isUse && Pat->getTransformFn())
+ I.error("Cannot specify a transform function for a non-input value!");
+ return;
+ }
+
+ if (Pat->getOperator()->getName() == "implicit") {
+ for (unsigned i = 0, e = Pat->getNumChildren(); i != e; ++i) {
+ TreePatternNode *Dest = Pat->getChild(i);
+ if (!Dest->isLeaf())
+ I.error("implicitly defined value should be a register!");
+
+ DefInit *Val = dyn_cast<DefInit>(Dest->getLeafValue());
+ if (!Val || !Val->getDef()->isSubClassOf("Register"))
+ I.error("implicitly defined value should be a register!");
+ InstImpResults.push_back(Val->getDef());
+ }
+ return;
+ }
+
+ if (Pat->getOperator()->getName() != "set") {
+ // If this is not a set, verify that the children nodes are not void typed,
+ // and recurse.
+ for (unsigned i = 0, e = Pat->getNumChildren(); i != e; ++i) {
+ if (Pat->getChild(i)->getNumTypes() == 0)
+ I.error("Cannot have void nodes inside of patterns!");
+ FindPatternInputsAndOutputs(I, Pat->getChildShared(i), InstInputs,
+ InstResults, InstImpResults);
+ }
+
+ // If this is a non-leaf node with no children, treat it basically as if
+ // it were a leaf. This handles nodes like (imm).
+ bool isUse = HandleUse(I, Pat, InstInputs);
+
+ if (!isUse && Pat->getTransformFn())
+ I.error("Cannot specify a transform function for a non-input value!");
+ return;
+ }
+
+ // Otherwise, this is a set, validate and collect instruction results.
+ if (Pat->getNumChildren() == 0)
+ I.error("set requires operands!");
+
+ if (Pat->getTransformFn())
+ I.error("Cannot specify a transform function on a set node!");
+
+ // Check the set destinations.
+ unsigned NumDests = Pat->getNumChildren()-1;
+ for (unsigned i = 0; i != NumDests; ++i) {
+ TreePatternNodePtr Dest = Pat->getChildShared(i);
+ // For set destinations we also must resolve fragments here.
+ TreePattern DestPattern(I.getRecord(), Dest, false, *this);
+ DestPattern.InlinePatternFragments();
+ DestPattern.InferAllTypes();
+ Dest = DestPattern.getOnlyTree();
+
+ if (!Dest->isLeaf())
+ I.error("set destination should be a register!");
+
+ DefInit *Val = dyn_cast<DefInit>(Dest->getLeafValue());
+ if (!Val) {
+ I.error("set destination should be a register!");
+ continue;
+ }
+
+ if (Val->getDef()->isSubClassOf("RegisterClass") ||
+ Val->getDef()->isSubClassOf("ValueType") ||
+ Val->getDef()->isSubClassOf("RegisterOperand") ||
+ Val->getDef()->isSubClassOf("PointerLikeRegClass")) {
+ if (Dest->getName().empty())
+ I.error("set destination must have a name!");
+ if (InstResults.count(Dest->getName()))
+ I.error("cannot set '" + Dest->getName() + "' multiple times");
+ InstResults[Dest->getName()] = Dest;
+ } else if (Val->getDef()->isSubClassOf("Register")) {
+ InstImpResults.push_back(Val->getDef());
+ } else {
+ I.error("set destination should be a register!");
+ }
+ }
+
+ // Verify and collect info from the computation.
+ FindPatternInputsAndOutputs(I, Pat->getChildShared(NumDests), InstInputs,
+ InstResults, InstImpResults);
+}
+
+//===----------------------------------------------------------------------===//
+// Instruction Analysis
+//===----------------------------------------------------------------------===//
+
+class InstAnalyzer {
+ const CodeGenDAGPatterns &CDP;
+public:
+ bool hasSideEffects;
+ bool mayStore;
+ bool mayLoad;
+ bool isBitcast;
+ bool isVariadic;
+ bool hasChain;
+
+ InstAnalyzer(const CodeGenDAGPatterns &cdp)
+ : CDP(cdp), hasSideEffects(false), mayStore(false), mayLoad(false),
+ isBitcast(false), isVariadic(false), hasChain(false) {}
+
+ void Analyze(const PatternToMatch &Pat) {
+ const TreePatternNode *N = Pat.getSrcPattern();
+ AnalyzeNode(N);
+ // These properties are detected only on the root node.
+ isBitcast = IsNodeBitcast(N);
+ }
+
+private:
+ bool IsNodeBitcast(const TreePatternNode *N) const {
+ if (hasSideEffects || mayLoad || mayStore || isVariadic)
+ return false;
+
+ if (N->isLeaf())
+ return false;
+ if (N->getNumChildren() != 1 || !N->getChild(0)->isLeaf())
+ return false;
+
+ if (N->getOperator()->isSubClassOf("ComplexPattern"))
+ return false;
+
+ const SDNodeInfo &OpInfo = CDP.getSDNodeInfo(N->getOperator());
+ if (OpInfo.getNumResults() != 1 || OpInfo.getNumOperands() != 1)
+ return false;
+ return OpInfo.getEnumName() == "ISD::BITCAST";
+ }
+
+public:
+ void AnalyzeNode(const TreePatternNode *N) {
+ if (N->isLeaf()) {
+ if (DefInit *DI = dyn_cast<DefInit>(N->getLeafValue())) {
+ Record *LeafRec = DI->getDef();
+ // Handle ComplexPattern leaves.
+ if (LeafRec->isSubClassOf("ComplexPattern")) {
+ const ComplexPattern &CP = CDP.getComplexPattern(LeafRec);
+ if (CP.hasProperty(SDNPMayStore)) mayStore = true;
+ if (CP.hasProperty(SDNPMayLoad)) mayLoad = true;
+ if (CP.hasProperty(SDNPSideEffect)) hasSideEffects = true;
+ }
+ }
+ return;
+ }
+
+ // Analyze children.
+ for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i)
+ AnalyzeNode(N->getChild(i));
+
+ // Notice properties of the node.
+ if (N->NodeHasProperty(SDNPMayStore, CDP)) mayStore = true;
+ if (N->NodeHasProperty(SDNPMayLoad, CDP)) mayLoad = true;
+ if (N->NodeHasProperty(SDNPSideEffect, CDP)) hasSideEffects = true;
+ if (N->NodeHasProperty(SDNPVariadic, CDP)) isVariadic = true;
+ if (N->NodeHasProperty(SDNPHasChain, CDP)) hasChain = true;
+
+ if (const CodeGenIntrinsic *IntInfo = N->getIntrinsicInfo(CDP)) {
+ ModRefInfo MR = IntInfo->ME.getModRef();
+ // If this is an intrinsic, analyze it.
+ if (isRefSet(MR))
+ mayLoad = true; // These may load memory.
+
+ if (isModSet(MR))
+ mayStore = true; // Intrinsics that can write to memory are 'mayStore'.
+
+ // Consider intrinsics that don't specify any restrictions on memory
+ // effects as having a side-effect.
+ if (IntInfo->ME == MemoryEffects::unknown() || IntInfo->hasSideEffects)
+ hasSideEffects = true;
+ }
+ }
+
+};
+
+static bool InferFromPattern(CodeGenInstruction &InstInfo,
+ const InstAnalyzer &PatInfo,
+ Record *PatDef) {
+ bool Error = false;
+
+ // Remember where InstInfo got its flags.
+ if (InstInfo.hasUndefFlags())
+ InstInfo.InferredFrom = PatDef;
+
+ // Check explicitly set flags for consistency.
+ if (InstInfo.hasSideEffects != PatInfo.hasSideEffects &&
+ !InstInfo.hasSideEffects_Unset) {
+ // Allow explicitly setting hasSideEffects = 1 on instructions, even when
+ // the pattern has no side effects. That could be useful for div/rem
+ // instructions that may trap.
+ if (!InstInfo.hasSideEffects) {
+ Error = true;
+ PrintError(PatDef->getLoc(), "Pattern doesn't match hasSideEffects = " +
+ Twine(InstInfo.hasSideEffects));
+ }
+ }
+
+ if (InstInfo.mayStore != PatInfo.mayStore && !InstInfo.mayStore_Unset) {
+ Error = true;
+ PrintError(PatDef->getLoc(), "Pattern doesn't match mayStore = " +
+ Twine(InstInfo.mayStore));
+ }
+
+ if (InstInfo.mayLoad != PatInfo.mayLoad && !InstInfo.mayLoad_Unset) {
+ // Allow explicitly setting mayLoad = 1, even when the pattern has no loads.
+ // Some targets translate immediates to loads.
+ if (!InstInfo.mayLoad) {
+ Error = true;
+ PrintError(PatDef->getLoc(), "Pattern doesn't match mayLoad = " +
+ Twine(InstInfo.mayLoad));
+ }
+ }
+
+ // Transfer inferred flags.
+ InstInfo.hasSideEffects |= PatInfo.hasSideEffects;
+ InstInfo.mayStore |= PatInfo.mayStore;
+ InstInfo.mayLoad |= PatInfo.mayLoad;
+
+ // These flags are silently added without any verification.
+ // FIXME: To match historical behavior of TableGen, for now add those flags
+ // only when we're inferring from the primary instruction pattern.
+ if (PatDef->isSubClassOf("Instruction")) {
+ InstInfo.isBitcast |= PatInfo.isBitcast;
+ InstInfo.hasChain |= PatInfo.hasChain;
+ InstInfo.hasChain_Inferred = true;
+ }
+
+ // Don't infer isVariadic. This flag means something different on SDNodes and
+ // instructions. For example, a CALL SDNode is variadic because it has the
+ // call arguments as operands, but a CALL instruction is not variadic - it
+ // has argument registers as implicit, not explicit uses.
+
+ return Error;
+}
+
+/// hasNullFragReference - Return true if the DAG has any reference to the
+/// null_frag operator.
+static bool hasNullFragReference(DagInit *DI) {
+ DefInit *OpDef = dyn_cast<DefInit>(DI->getOperator());
+ if (!OpDef) return false;
+ Record *Operator = OpDef->getDef();
+
+ // If this is the null fragment, return true.
+ if (Operator->getName() == "null_frag") return true;
+ // If any of the arguments reference the null fragment, return true.
+ for (unsigned i = 0, e = DI->getNumArgs(); i != e; ++i) {
+ if (auto Arg = dyn_cast<DefInit>(DI->getArg(i)))
+ if (Arg->getDef()->getName() == "null_frag")
+ return true;
+ DagInit *Arg = dyn_cast<DagInit>(DI->getArg(i));
+ if (Arg && hasNullFragReference(Arg))
+ return true;
+ }
+
+ return false;
+}
+
+/// hasNullFragReference - Return true if any DAG in the list references
+/// the null_frag operator.
+static bool hasNullFragReference(ListInit *LI) {
+ for (Init *I : LI->getValues()) {
+ DagInit *DI = dyn_cast<DagInit>(I);
+ assert(DI && "non-dag in an instruction Pattern list?!");
+ if (hasNullFragReference(DI))
+ return true;
+ }
+ return false;
+}
+
+/// Get all the instructions in a tree.
+static void
+getInstructionsInTree(TreePatternNode *Tree, SmallVectorImpl<Record*> &Instrs) {
+ if (Tree->isLeaf())
+ return;
+ if (Tree->getOperator()->isSubClassOf("Instruction"))
+ Instrs.push_back(Tree->getOperator());
+ for (unsigned i = 0, e = Tree->getNumChildren(); i != e; ++i)
+ getInstructionsInTree(Tree->getChild(i), Instrs);
+}
+
+/// Check the class of a pattern leaf node against the instruction operand it
+/// represents.
+static bool checkOperandClass(CGIOperandList::OperandInfo &OI,
+ Record *Leaf) {
+ if (OI.Rec == Leaf)
+ return true;
+
+ // Allow direct value types to be used in instruction set patterns.
+ // The type will be checked later.
+ if (Leaf->isSubClassOf("ValueType"))
+ return true;
+
+ // Patterns can also be ComplexPattern instances.
+ if (Leaf->isSubClassOf("ComplexPattern"))
+ return true;
+
+ return false;
+}
+
+void CodeGenDAGPatterns::parseInstructionPattern(
+ CodeGenInstruction &CGI, ListInit *Pat, DAGInstMap &DAGInsts) {
+
+ assert(!DAGInsts.count(CGI.TheDef) && "Instruction already parsed!");
+
+ // Parse the instruction.
+ TreePattern I(CGI.TheDef, Pat, true, *this);
+
+ // InstInputs - Keep track of all of the inputs of the instruction, along
+ // with the record they are declared as.
+ std::map<std::string, TreePatternNodePtr> InstInputs;
+
+ // InstResults - Keep track of all the virtual registers that are 'set'
+ // in the instruction, including what reg class they are.
+ MapVector<std::string, TreePatternNodePtr, std::map<std::string, unsigned>>
+ InstResults;
+
+ std::vector<Record*> InstImpResults;
+
+ // Verify that the top-level forms in the instruction are of void type, and
+ // fill in the InstResults map.
+ SmallString<32> TypesString;
+ for (unsigned j = 0, e = I.getNumTrees(); j != e; ++j) {
+ TypesString.clear();
+ TreePatternNodePtr Pat = I.getTree(j);
+ if (Pat->getNumTypes() != 0) {
+ raw_svector_ostream OS(TypesString);
+ ListSeparator LS;
+ for (unsigned k = 0, ke = Pat->getNumTypes(); k != ke; ++k) {
+ OS << LS;
+ Pat->getExtType(k).writeToStream(OS);
+ }
+ I.error("Top-level forms in instruction pattern should have"
+ " void types, has types " +
+ OS.str());
+ }
+
+ // Find inputs and outputs, and verify the structure of the uses/defs.
+ FindPatternInputsAndOutputs(I, Pat, InstInputs, InstResults,
+ InstImpResults);
+ }
+
+ // Now that we have inputs and outputs of the pattern, inspect the operands
+ // list for the instruction. This determines the order that operands are
+ // added to the machine instruction the node corresponds to.
+ unsigned NumResults = InstResults.size();
+
+ // Parse the operands list from the (ops) list, validating it.
+ assert(I.getArgList().empty() && "Args list should still be empty here!");
+
+ // Check that all of the results occur first in the list.
+ std::vector<Record*> Results;
+ std::vector<unsigned> ResultIndices;
+ SmallVector<TreePatternNodePtr, 2> ResNodes;
+ for (unsigned i = 0; i != NumResults; ++i) {
+ if (i == CGI.Operands.size()) {
+ const std::string &OpName =
+ llvm::find_if(
+ InstResults,
+ [](const std::pair<std::string, TreePatternNodePtr> &P) {
+ return P.second;
+ })
+ ->first;
+
+ I.error("'" + OpName + "' set but does not appear in operand list!");
+ }
+
+ const std::string &OpName = CGI.Operands[i].Name;
+
+ // Check that it exists in InstResults.
+ auto InstResultIter = InstResults.find(OpName);
+ if (InstResultIter == InstResults.end() || !InstResultIter->second)
+ I.error("Operand $" + OpName + " does not exist in operand list!");
+
+ TreePatternNodePtr RNode = InstResultIter->second;
+ Record *R = cast<DefInit>(RNode->getLeafValue())->getDef();
+ ResNodes.push_back(std::move(RNode));
+ if (!R)
+ I.error("Operand $" + OpName + " should be a set destination: all "
+ "outputs must occur before inputs in operand list!");
+
+ if (!checkOperandClass(CGI.Operands[i], R))
+ I.error("Operand $" + OpName + " class mismatch!");
+
+ // Remember the return type.
+ Results.push_back(CGI.Operands[i].Rec);
+
+ // Remember the result index.
+ ResultIndices.push_back(std::distance(InstResults.begin(), InstResultIter));
+
+ // Okay, this one checks out.
+ InstResultIter->second = nullptr;
+ }
+
+ // Loop over the inputs next.
+ std::vector<TreePatternNodePtr> ResultNodeOperands;
+ std::vector<Record*> Operands;
+ for (unsigned i = NumResults, e = CGI.Operands.size(); i != e; ++i) {
+ CGIOperandList::OperandInfo &Op = CGI.Operands[i];
+ const std::string &OpName = Op.Name;
+ if (OpName.empty())
+ I.error("Operand #" + Twine(i) + " in operands list has no name!");
+
+ if (!InstInputs.count(OpName)) {
+ // If this is an operand with a DefaultOps set filled in, we can ignore
+ // this. When we codegen it, we will do so as always executed.
+ if (Op.Rec->isSubClassOf("OperandWithDefaultOps")) {
+ // Does it have a non-empty DefaultOps field? If so, ignore this
+ // operand.
+ if (!getDefaultOperand(Op.Rec).DefaultOps.empty())
+ continue;
+ }
+ I.error("Operand $" + OpName +
+ " does not appear in the instruction pattern");
+ }
+ TreePatternNodePtr InVal = InstInputs[OpName];
+ InstInputs.erase(OpName); // It occurred, remove from map.
+
+ if (InVal->isLeaf() && isa<DefInit>(InVal->getLeafValue())) {
+ Record *InRec = cast<DefInit>(InVal->getLeafValue())->getDef();
+ if (!checkOperandClass(Op, InRec))
+ I.error("Operand $" + OpName + "'s register class disagrees"
+ " between the operand and pattern");
+ }
+ Operands.push_back(Op.Rec);
+
+ // Construct the result for the dest-pattern operand list.
+ TreePatternNodePtr OpNode = InVal->clone();
+
+ // No predicate is useful on the result.
+ OpNode->clearPredicateCalls();
+
+ // Promote the xform function to be an explicit node if set.
+ if (Record *Xform = OpNode->getTransformFn()) {
+ OpNode->setTransformFn(nullptr);
+ std::vector<TreePatternNodePtr> Children;
+ Children.push_back(OpNode);
+ OpNode = std::make_shared<TreePatternNode>(Xform, std::move(Children),
+ OpNode->getNumTypes());
+ }
+
+ ResultNodeOperands.push_back(std::move(OpNode));
+ }
+
+ if (!InstInputs.empty())
+ I.error("Input operand $" + InstInputs.begin()->first +
+ " occurs in pattern but not in operands list!");
+
+ TreePatternNodePtr ResultPattern = std::make_shared<TreePatternNode>(
+ I.getRecord(), std::move(ResultNodeOperands),
+ GetNumNodeResults(I.getRecord(), *this));
+ // Copy fully inferred output node types to instruction result pattern.
+ for (unsigned i = 0; i != NumResults; ++i) {
+ assert(ResNodes[i]->getNumTypes() == 1 && "FIXME: Unhandled");
+ ResultPattern->setType(i, ResNodes[i]->getExtType(0));
+ ResultPattern->setResultIndex(i, ResultIndices[i]);
+ }
+
+ // FIXME: Assume only the first tree is the pattern. The others are clobber
+ // nodes.
+ TreePatternNodePtr Pattern = I.getTree(0);
+ TreePatternNodePtr SrcPattern;
+ if (Pattern->getOperator()->getName() == "set") {
+ SrcPattern = Pattern->getChild(Pattern->getNumChildren()-1)->clone();
+ } else{
+ // Not a set (store or something?)
+ SrcPattern = Pattern;
+ }
+
+ // Create and insert the instruction.
+ // FIXME: InstImpResults should not be part of DAGInstruction.
+ Record *R = I.getRecord();
+ DAGInsts.emplace(std::piecewise_construct, std::forward_as_tuple(R),
+ std::forward_as_tuple(Results, Operands, InstImpResults,
+ SrcPattern, ResultPattern));
+
+ LLVM_DEBUG(I.dump());
+}
+
+/// ParseInstructions - Parse all of the instructions, inlining and resolving
+/// any fragments involved. This populates the Instructions list with fully
+/// resolved instructions.
+void CodeGenDAGPatterns::ParseInstructions() {
+ std::vector<Record*> Instrs = Records.getAllDerivedDefinitions("Instruction");
+
+ for (Record *Instr : Instrs) {
+ ListInit *LI = nullptr;
+
+ if (isa<ListInit>(Instr->getValueInit("Pattern")))
+ LI = Instr->getValueAsListInit("Pattern");
+
+ // If there is no pattern, only collect minimal information about the
+ // instruction for its operand list. We have to assume that there is one
+ // result, as we have no detailed info. A pattern which references the
+ // null_frag operator is as-if no pattern were specified. Normally this
+ // is from a multiclass expansion w/ a SDPatternOperator passed in as
+ // null_frag.
+ if (!LI || LI->empty() || hasNullFragReference(LI)) {
+ std::vector<Record*> Results;
+ std::vector<Record*> Operands;
+
+ CodeGenInstruction &InstInfo = Target.getInstruction(Instr);
+
+ if (InstInfo.Operands.size() != 0) {
+ for (unsigned j = 0, e = InstInfo.Operands.NumDefs; j < e; ++j)
+ Results.push_back(InstInfo.Operands[j].Rec);
+
+ // The rest are inputs.
+ for (unsigned j = InstInfo.Operands.NumDefs,
+ e = InstInfo.Operands.size(); j < e; ++j)
+ Operands.push_back(InstInfo.Operands[j].Rec);
+ }
+
+ // Create and insert the instruction.
+ std::vector<Record*> ImpResults;
+ Instructions.insert(std::make_pair(Instr,
+ DAGInstruction(Results, Operands, ImpResults)));
+ continue; // no pattern.
+ }
+
+ CodeGenInstruction &CGI = Target.getInstruction(Instr);
+ parseInstructionPattern(CGI, LI, Instructions);
+ }
+
+ // If we can, convert the instructions to be patterns that are matched!
+ for (auto &Entry : Instructions) {
+ Record *Instr = Entry.first;
+ DAGInstruction &TheInst = Entry.second;
+ TreePatternNodePtr SrcPattern = TheInst.getSrcPattern();
+ TreePatternNodePtr ResultPattern = TheInst.getResultPattern();
+
+ if (SrcPattern && ResultPattern) {
+ TreePattern Pattern(Instr, SrcPattern, true, *this);
+ TreePattern Result(Instr, ResultPattern, false, *this);
+ ParseOnePattern(Instr, Pattern, Result, TheInst.getImpResults());
+ }
+ }
+}
+
+typedef std::pair<TreePatternNode *, unsigned> NameRecord;
+
+static void FindNames(TreePatternNode *P,
+ std::map<std::string, NameRecord> &Names,
+ TreePattern *PatternTop) {
+ if (!P->getName().empty()) {
+ NameRecord &Rec = Names[P->getName()];
+ // If this is the first instance of the name, remember the node.
+ if (Rec.second++ == 0)
+ Rec.first = P;
+ else if (Rec.first->getExtTypes() != P->getExtTypes())
+ PatternTop->error("repetition of value: $" + P->getName() +
+ " where different uses have different types!");
+ }
+
+ if (!P->isLeaf()) {
+ for (unsigned i = 0, e = P->getNumChildren(); i != e; ++i)
+ FindNames(P->getChild(i), Names, PatternTop);
+ }
+}
+
+void CodeGenDAGPatterns::AddPatternToMatch(TreePattern *Pattern,
+ PatternToMatch &&PTM) {
+ // Do some sanity checking on the pattern we're about to match.
+ std::string Reason;
+ if (!PTM.getSrcPattern()->canPatternMatch(Reason, *this)) {
+ PrintWarning(Pattern->getRecord()->getLoc(),
+ Twine("Pattern can never match: ") + Reason);
+ return;
+ }
+
+ // If the source pattern's root is a complex pattern, that complex pattern
+ // must specify the nodes it can potentially match.
+ if (const ComplexPattern *CP =
+ PTM.getSrcPattern()->getComplexPatternInfo(*this))
+ if (CP->getRootNodes().empty())
+ Pattern->error("ComplexPattern at root must specify list of opcodes it"
+ " could match");
+
+
+ // Find all of the named values in the input and output, ensure they have the
+ // same type.
+ std::map<std::string, NameRecord> SrcNames, DstNames;
+ FindNames(PTM.getSrcPattern(), SrcNames, Pattern);
+ FindNames(PTM.getDstPattern(), DstNames, Pattern);
+
+ // Scan all of the named values in the destination pattern, rejecting them if
+ // they don't exist in the input pattern.
+ for (const auto &Entry : DstNames) {
+ if (SrcNames[Entry.first].first == nullptr)
+ Pattern->error("Pattern has input without matching name in output: $" +
+ Entry.first);
+ }
+
+ // Scan all of the named values in the source pattern, rejecting them if the
+ // name isn't used in the dest, and isn't used to tie two values together.
+ for (const auto &Entry : SrcNames)
+ if (DstNames[Entry.first].first == nullptr &&
+ SrcNames[Entry.first].second == 1)
+ Pattern->error("Pattern has dead named input: $" + Entry.first);
+
+ PatternsToMatch.push_back(std::move(PTM));
+}
+
+void CodeGenDAGPatterns::InferInstructionFlags() {
+ ArrayRef<const CodeGenInstruction*> Instructions =
+ Target.getInstructionsByEnumValue();
+
+ unsigned Errors = 0;
+
+ // Try to infer flags from all patterns in PatternToMatch. These include
+ // both the primary instruction patterns (which always come first) and
+ // patterns defined outside the instruction.
+ for (const PatternToMatch &PTM : ptms()) {
+ // We can only infer from single-instruction patterns, otherwise we won't
+ // know which instruction should get the flags.
+ SmallVector<Record*, 8> PatInstrs;
+ getInstructionsInTree(PTM.getDstPattern(), PatInstrs);
+ if (PatInstrs.size() != 1)
+ continue;
+
+ // Get the single instruction.
+ CodeGenInstruction &InstInfo = Target.getInstruction(PatInstrs.front());
+
+ // Only infer properties from the first pattern. We'll verify the others.
+ if (InstInfo.InferredFrom)
+ continue;
+
+ InstAnalyzer PatInfo(*this);
+ PatInfo.Analyze(PTM);
+ Errors += InferFromPattern(InstInfo, PatInfo, PTM.getSrcRecord());
+ }
+
+ if (Errors)
+ PrintFatalError("pattern conflicts");
+
+ // If requested by the target, guess any undefined properties.
+ if (Target.guessInstructionProperties()) {
+ for (unsigned i = 0, e = Instructions.size(); i != e; ++i) {
+ CodeGenInstruction *InstInfo =
+ const_cast<CodeGenInstruction *>(Instructions[i]);
+ if (InstInfo->InferredFrom)
+ continue;
+ // The mayLoad and mayStore flags default to false.
+ // Conservatively assume hasSideEffects if it wasn't explicit.
+ if (InstInfo->hasSideEffects_Unset)
+ InstInfo->hasSideEffects = true;
+ }
+ return;
+ }
+
+ // Complain about any flags that are still undefined.
+ for (unsigned i = 0, e = Instructions.size(); i != e; ++i) {
+ CodeGenInstruction *InstInfo =
+ const_cast<CodeGenInstruction *>(Instructions[i]);
+ if (InstInfo->InferredFrom)
+ continue;
+ if (InstInfo->hasSideEffects_Unset)
+ PrintError(InstInfo->TheDef->getLoc(),
+ "Can't infer hasSideEffects from patterns");
+ if (InstInfo->mayStore_Unset)
+ PrintError(InstInfo->TheDef->getLoc(),
+ "Can't infer mayStore from patterns");
+ if (InstInfo->mayLoad_Unset)
+ PrintError(InstInfo->TheDef->getLoc(),
+ "Can't infer mayLoad from patterns");
+ }
+}
+
+
+/// Verify instruction flags against pattern node properties.
+void CodeGenDAGPatterns::VerifyInstructionFlags() {
+ unsigned Errors = 0;
+ for (const PatternToMatch &PTM : ptms()) {
+ SmallVector<Record*, 8> Instrs;
+ getInstructionsInTree(PTM.getDstPattern(), Instrs);
+ if (Instrs.empty())
+ continue;
+
+ // Count the number of instructions with each flag set.
+ unsigned NumSideEffects = 0;
+ unsigned NumStores = 0;
+ unsigned NumLoads = 0;
+ for (const Record *Instr : Instrs) {
+ const CodeGenInstruction &InstInfo = Target.getInstruction(Instr);
+ NumSideEffects += InstInfo.hasSideEffects;
+ NumStores += InstInfo.mayStore;
+ NumLoads += InstInfo.mayLoad;
+ }
+
+ // Analyze the source pattern.
+ InstAnalyzer PatInfo(*this);
+ PatInfo.Analyze(PTM);
+
+ // Collect error messages.
+ SmallVector<std::string, 4> Msgs;
+
+ // Check for missing flags in the output.
+ // Permit extra flags for now at least.
+ if (PatInfo.hasSideEffects && !NumSideEffects)
+ Msgs.push_back("pattern has side effects, but hasSideEffects isn't set");
+
+ // Don't verify store flags on instructions with side effects. At least for
+ // intrinsics, side effects implies mayStore.
+ if (!PatInfo.hasSideEffects && PatInfo.mayStore && !NumStores)
+ Msgs.push_back("pattern may store, but mayStore isn't set");
+
+ // Similarly, mayStore implies mayLoad on intrinsics.
+ if (!PatInfo.mayStore && PatInfo.mayLoad && !NumLoads)
+ Msgs.push_back("pattern may load, but mayLoad isn't set");
+
+ // Print error messages.
+ if (Msgs.empty())
+ continue;
+ ++Errors;
+
+ for (const std::string &Msg : Msgs)
+ PrintError(PTM.getSrcRecord()->getLoc(), Twine(Msg) + " on the " +
+ (Instrs.size() == 1 ?
+ "instruction" : "output instructions"));
+ // Provide the location of the relevant instruction definitions.
+ for (const Record *Instr : Instrs) {
+ if (Instr != PTM.getSrcRecord())
+ PrintError(Instr->getLoc(), "defined here");
+ const CodeGenInstruction &InstInfo = Target.getInstruction(Instr);
+ if (InstInfo.InferredFrom &&
+ InstInfo.InferredFrom != InstInfo.TheDef &&
+ InstInfo.InferredFrom != PTM.getSrcRecord())
+ PrintError(InstInfo.InferredFrom->getLoc(), "inferred from pattern");
+ }
+ }
+ if (Errors)
+ PrintFatalError("Errors in DAG patterns");
+}
+
+/// Given a pattern result with an unresolved type, see if we can find one
+/// instruction with an unresolved result type. Force this result type to an
+/// arbitrary element if it's possible types to converge results.
+static bool ForceArbitraryInstResultType(TreePatternNode *N, TreePattern &TP) {
+ if (N->isLeaf())
+ return false;
+
+ // Analyze children.
+ for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i)
+ if (ForceArbitraryInstResultType(N->getChild(i), TP))
+ return true;
+
+ if (!N->getOperator()->isSubClassOf("Instruction"))
+ return false;
+
+ // If this type is already concrete or completely unknown we can't do
+ // anything.
+ TypeInfer &TI = TP.getInfer();
+ for (unsigned i = 0, e = N->getNumTypes(); i != e; ++i) {
+ if (N->getExtType(i).empty() || TI.isConcrete(N->getExtType(i), false))
+ continue;
+
+ // Otherwise, force its type to an arbitrary choice.
+ if (TI.forceArbitrary(N->getExtType(i)))
+ return true;
+ }
+
+ return false;
+}
+
+// Promote xform function to be an explicit node wherever set.
+static TreePatternNodePtr PromoteXForms(TreePatternNodePtr N) {
+ if (Record *Xform = N->getTransformFn()) {
+ N->setTransformFn(nullptr);
+ std::vector<TreePatternNodePtr> Children;
+ Children.push_back(PromoteXForms(N));
+ return std::make_shared<TreePatternNode>(Xform, std::move(Children),
+ N->getNumTypes());
+ }
+
+ if (!N->isLeaf())
+ for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i) {
+ TreePatternNodePtr Child = N->getChildShared(i);
+ N->setChild(i, PromoteXForms(Child));
+ }
+ return N;
+}
+
+void CodeGenDAGPatterns::ParseOnePattern(Record *TheDef,
+ TreePattern &Pattern, TreePattern &Result,
+ const std::vector<Record *> &InstImpResults) {
+
+ // Inline pattern fragments and expand multiple alternatives.
+ Pattern.InlinePatternFragments();
+ Result.InlinePatternFragments();
+
+ if (Result.getNumTrees() != 1)
+ Result.error("Cannot use multi-alternative fragments in result pattern!");
+
+ // Infer types.
+ bool IterateInference;
+ bool InferredAllPatternTypes, InferredAllResultTypes;
+ do {
+ // Infer as many types as possible. If we cannot infer all of them, we
+ // can never do anything with this pattern: report it to the user.
+ InferredAllPatternTypes =
+ Pattern.InferAllTypes(&Pattern.getNamedNodesMap());
+
+ // Infer as many types as possible. If we cannot infer all of them, we
+ // can never do anything with this pattern: report it to the user.
+ InferredAllResultTypes =
+ Result.InferAllTypes(&Pattern.getNamedNodesMap());
+
+ IterateInference = false;
+
+ // Apply the type of the result to the source pattern. This helps us
+ // resolve cases where the input type is known to be a pointer type (which
+ // is considered resolved), but the result knows it needs to be 32- or
+ // 64-bits. Infer the other way for good measure.
+ for (const auto &T : Pattern.getTrees())
+ for (unsigned i = 0, e = std::min(Result.getOnlyTree()->getNumTypes(),
+ T->getNumTypes());
+ i != e; ++i) {
+ IterateInference |= T->UpdateNodeType(
+ i, Result.getOnlyTree()->getExtType(i), Result);
+ IterateInference |= Result.getOnlyTree()->UpdateNodeType(
+ i, T->getExtType(i), Result);
+ }
+
+ // If our iteration has converged and the input pattern's types are fully
+ // resolved but the result pattern is not fully resolved, we may have a
+ // situation where we have two instructions in the result pattern and
+ // the instructions require a common register class, but don't care about
+ // what actual MVT is used. This is actually a bug in our modelling:
+ // output patterns should have register classes, not MVTs.
+ //
+ // In any case, to handle this, we just go through and disambiguate some
+ // arbitrary types to the result pattern's nodes.
+ if (!IterateInference && InferredAllPatternTypes &&
+ !InferredAllResultTypes)
+ IterateInference =
+ ForceArbitraryInstResultType(Result.getTree(0).get(), Result);
+ } while (IterateInference);
+
+ // Verify that we inferred enough types that we can do something with the
+ // pattern and result. If these fire the user has to add type casts.
+ if (!InferredAllPatternTypes)
+ Pattern.error("Could not infer all types in pattern!");
+ if (!InferredAllResultTypes) {
+ Pattern.dump();
+ Result.error("Could not infer all types in pattern result!");
+ }
+
+ // Promote xform function to be an explicit node wherever set.
+ TreePatternNodePtr DstShared = PromoteXForms(Result.getOnlyTree());
+
+ TreePattern Temp(Result.getRecord(), DstShared, false, *this);
+ Temp.InferAllTypes();
+
+ ListInit *Preds = TheDef->getValueAsListInit("Predicates");
+ int Complexity = TheDef->getValueAsInt("AddedComplexity");
+
+ if (PatternRewriter)
+ PatternRewriter(&Pattern);
+
+ // A pattern may end up with an "impossible" type, i.e. a situation
+ // where all types have been eliminated for some node in this pattern.
+ // This could occur for intrinsics that only make sense for a specific
+ // value type, and use a specific register class. If, for some mode,
+ // that register class does not accept that type, the type inference
+ // will lead to a contradiction, which is not an error however, but
+ // a sign that this pattern will simply never match.
+ if (Temp.getOnlyTree()->hasPossibleType())
+ for (const auto &T : Pattern.getTrees())
+ if (T->hasPossibleType())
+ AddPatternToMatch(&Pattern,
+ PatternToMatch(TheDef, Preds, T, Temp.getOnlyTree(),
+ InstImpResults, Complexity,
+ TheDef->getID()));
+}
+
+void CodeGenDAGPatterns::ParsePatterns() {
+ std::vector<Record*> Patterns = Records.getAllDerivedDefinitions("Pattern");
+
+ for (Record *CurPattern : Patterns) {
+ DagInit *Tree = CurPattern->getValueAsDag("PatternToMatch");
+
+ // If the pattern references the null_frag, there's nothing to do.
+ if (hasNullFragReference(Tree))
+ continue;
+
+ TreePattern Pattern(CurPattern, Tree, true, *this);
+
+ ListInit *LI = CurPattern->getValueAsListInit("ResultInstrs");
+ if (LI->empty()) continue; // no pattern.
+
+ // Parse the instruction.
+ TreePattern Result(CurPattern, LI, false, *this);
+
+ if (Result.getNumTrees() != 1)
+ Result.error("Cannot handle instructions producing instructions "
+ "with temporaries yet!");
+
+ // Validate that the input pattern is correct.
+ std::map<std::string, TreePatternNodePtr> InstInputs;
+ MapVector<std::string, TreePatternNodePtr, std::map<std::string, unsigned>>
+ InstResults;
+ std::vector<Record*> InstImpResults;
+ for (unsigned j = 0, ee = Pattern.getNumTrees(); j != ee; ++j)
+ FindPatternInputsAndOutputs(Pattern, Pattern.getTree(j), InstInputs,
+ InstResults, InstImpResults);
+
+ ParseOnePattern(CurPattern, Pattern, Result, InstImpResults);
+ }
+}
+
+static void collectModes(std::set<unsigned> &Modes, const TreePatternNode *N) {
+ for (const TypeSetByHwMode &VTS : N->getExtTypes())
+ for (const auto &I : VTS)
+ Modes.insert(I.first);
+
+ for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i)
+ collectModes(Modes, N->getChild(i));
+}
+
+void CodeGenDAGPatterns::ExpandHwModeBasedTypes() {
+ const CodeGenHwModes &CGH = getTargetInfo().getHwModes();
+ std::vector<PatternToMatch> Copy;
+ PatternsToMatch.swap(Copy);
+
+ auto AppendPattern = [this](PatternToMatch &P, unsigned Mode,
+ StringRef Check) {
+ TreePatternNodePtr NewSrc = P.getSrcPattern()->clone();
+ TreePatternNodePtr NewDst = P.getDstPattern()->clone();
+ if (!NewSrc->setDefaultMode(Mode) || !NewDst->setDefaultMode(Mode)) {
+ return;
+ }
+
+ PatternsToMatch.emplace_back(P.getSrcRecord(), P.getPredicates(),
+ std::move(NewSrc), std::move(NewDst),
+ P.getDstRegs(), P.getAddedComplexity(),
+ Record::getNewUID(Records), Mode, Check);
+ };
+
+ for (PatternToMatch &P : Copy) {
+ TreePatternNodePtr SrcP = nullptr, DstP = nullptr;
+ if (P.getSrcPattern()->hasProperTypeByHwMode())
+ SrcP = P.getSrcPatternShared();
+ if (P.getDstPattern()->hasProperTypeByHwMode())
+ DstP = P.getDstPatternShared();
+ if (!SrcP && !DstP) {
+ PatternsToMatch.push_back(P);
+ continue;
+ }
+
+ std::set<unsigned> Modes;
+ if (SrcP)
+ collectModes(Modes, SrcP.get());
+ if (DstP)
+ collectModes(Modes, DstP.get());
+
+ // The predicate for the default mode needs to be constructed for each
+ // pattern separately.
+ // Since not all modes must be present in each pattern, if a mode m is
+ // absent, then there is no point in constructing a check for m. If such
+ // a check was created, it would be equivalent to checking the default
+ // mode, except not all modes' predicates would be a part of the checking
+ // code. The subsequently generated check for the default mode would then
+ // have the exact same patterns, but a different predicate code. To avoid
+ // duplicated patterns with different predicate checks, construct the
+ // default check as a negation of all predicates that are actually present
+ // in the source/destination patterns.
+ SmallString<128> DefaultCheck;
+
+ for (unsigned M : Modes) {
+ if (M == DefaultMode)
+ continue;
+
+ // Fill the map entry for this mode.
+ const HwMode &HM = CGH.getMode(M);
+ AppendPattern(P, M, "(MF->getSubtarget().checkFeatures(\"" + HM.Features + "\"))");
+
+ // Add negations of the HM's predicates to the default predicate.
+ if (!DefaultCheck.empty())
+ DefaultCheck += " && ";
+ DefaultCheck += "(!(MF->getSubtarget().checkFeatures(\"";
+ DefaultCheck += HM.Features;
+ DefaultCheck += "\")))";
+ }
+
+ bool HasDefault = Modes.count(DefaultMode);
+ if (HasDefault)
+ AppendPattern(P, DefaultMode, DefaultCheck);
+ }
+}
+
+/// Dependent variable map for CodeGenDAGPattern variant generation
+typedef StringMap<int> DepVarMap;
+
+static void FindDepVarsOf(TreePatternNode *N, DepVarMap &DepMap) {
+ if (N->isLeaf()) {
+ if (N->hasName() && isa<DefInit>(N->getLeafValue()))
+ DepMap[N->getName()]++;
+ } else {
+ for (size_t i = 0, e = N->getNumChildren(); i != e; ++i)
+ FindDepVarsOf(N->getChild(i), DepMap);
+ }
+}
+
+/// Find dependent variables within child patterns
+static void FindDepVars(TreePatternNode *N, MultipleUseVarSet &DepVars) {
+ DepVarMap depcounts;
+ FindDepVarsOf(N, depcounts);
+ for (const auto &Pair : depcounts) {
+ if (Pair.getValue() > 1)
+ DepVars.insert(Pair.getKey());
+ }
+}
+
+#ifndef NDEBUG
+/// Dump the dependent variable set:
+static void DumpDepVars(MultipleUseVarSet &DepVars) {
+ if (DepVars.empty()) {
+ LLVM_DEBUG(errs() << "<empty set>");
+ } else {
+ LLVM_DEBUG(errs() << "[ ");
+ for (const auto &DepVar : DepVars) {
+ LLVM_DEBUG(errs() << DepVar.getKey() << " ");
+ }
+ LLVM_DEBUG(errs() << "]");
+ }
+}
+#endif
+
+
+/// CombineChildVariants - Given a bunch of permutations of each child of the
+/// 'operator' node, put them together in all possible ways.
+static void CombineChildVariants(
+ TreePatternNodePtr Orig,
+ const std::vector<std::vector<TreePatternNodePtr>> &ChildVariants,
+ std::vector<TreePatternNodePtr> &OutVariants, CodeGenDAGPatterns &CDP,
+ const MultipleUseVarSet &DepVars) {
+ // Make sure that each operand has at least one variant to choose from.
+ for (const auto &Variants : ChildVariants)
+ if (Variants.empty())
+ return;
+
+ // The end result is an all-pairs construction of the resultant pattern.
+ std::vector<unsigned> Idxs;
+ Idxs.resize(ChildVariants.size());
+ bool NotDone;
+ do {
+#ifndef NDEBUG
+ LLVM_DEBUG(if (!Idxs.empty()) {
+ errs() << Orig->getOperator()->getName() << ": Idxs = [ ";
+ for (unsigned Idx : Idxs) {
+ errs() << Idx << " ";
+ }
+ errs() << "]\n";
+ });
+#endif
+ // Create the variant and add it to the output list.
+ std::vector<TreePatternNodePtr> NewChildren;
+ for (unsigned i = 0, e = ChildVariants.size(); i != e; ++i)
+ NewChildren.push_back(ChildVariants[i][Idxs[i]]);
+ TreePatternNodePtr R = std::make_shared<TreePatternNode>(
+ Orig->getOperator(), std::move(NewChildren), Orig->getNumTypes());
+
+ // Copy over properties.
+ R->setName(Orig->getName());
+ R->setNamesAsPredicateArg(Orig->getNamesAsPredicateArg());
+ R->setPredicateCalls(Orig->getPredicateCalls());
+ R->setTransformFn(Orig->getTransformFn());
+ for (unsigned i = 0, e = Orig->getNumTypes(); i != e; ++i)
+ R->setType(i, Orig->getExtType(i));
+
+ // If this pattern cannot match, do not include it as a variant.
+ std::string ErrString;
+ // Scan to see if this pattern has already been emitted. We can get
+ // duplication due to things like commuting:
+ // (and GPRC:$a, GPRC:$b) -> (and GPRC:$b, GPRC:$a)
+ // which are the same pattern. Ignore the dups.
+ if (R->canPatternMatch(ErrString, CDP) &&
+ none_of(OutVariants, [&](TreePatternNodePtr Variant) {
+ return R->isIsomorphicTo(Variant.get(), DepVars);
+ }))
+ OutVariants.push_back(R);
+
+ // Increment indices to the next permutation by incrementing the
+ // indices from last index backward, e.g., generate the sequence
+ // [0, 0], [0, 1], [1, 0], [1, 1].
+ int IdxsIdx;
+ for (IdxsIdx = Idxs.size() - 1; IdxsIdx >= 0; --IdxsIdx) {
+ if (++Idxs[IdxsIdx] == ChildVariants[IdxsIdx].size())
+ Idxs[IdxsIdx] = 0;
+ else
+ break;
+ }
+ NotDone = (IdxsIdx >= 0);
+ } while (NotDone);
+}
+
+/// CombineChildVariants - A helper function for binary operators.
+///
+static void CombineChildVariants(TreePatternNodePtr Orig,
+ const std::vector<TreePatternNodePtr> &LHS,
+ const std::vector<TreePatternNodePtr> &RHS,
+ std::vector<TreePatternNodePtr> &OutVariants,
+ CodeGenDAGPatterns &CDP,
+ const MultipleUseVarSet &DepVars) {
+ std::vector<std::vector<TreePatternNodePtr>> ChildVariants;
+ ChildVariants.push_back(LHS);
+ ChildVariants.push_back(RHS);
+ CombineChildVariants(Orig, ChildVariants, OutVariants, CDP, DepVars);
+}
+
+static void
+GatherChildrenOfAssociativeOpcode(TreePatternNodePtr N,
+ std::vector<TreePatternNodePtr> &Children) {
+ assert(N->getNumChildren()==2 &&"Associative but doesn't have 2 children!");
+ Record *Operator = N->getOperator();
+
+ // Only permit raw nodes.
+ if (!N->getName().empty() || !N->getPredicateCalls().empty() ||
+ N->getTransformFn()) {
+ Children.push_back(N);
+ return;
+ }
+
+ if (N->getChild(0)->isLeaf() || N->getChild(0)->getOperator() != Operator)
+ Children.push_back(N->getChildShared(0));
+ else
+ GatherChildrenOfAssociativeOpcode(N->getChildShared(0), Children);
+
+ if (N->getChild(1)->isLeaf() || N->getChild(1)->getOperator() != Operator)
+ Children.push_back(N->getChildShared(1));
+ else
+ GatherChildrenOfAssociativeOpcode(N->getChildShared(1), Children);
+}
+
+/// GenerateVariantsOf - Given a pattern N, generate all permutations we can of
+/// the (potentially recursive) pattern by using algebraic laws.
+///
+static void GenerateVariantsOf(TreePatternNodePtr N,
+ std::vector<TreePatternNodePtr> &OutVariants,
+ CodeGenDAGPatterns &CDP,
+ const MultipleUseVarSet &DepVars) {
+ // We cannot permute leaves or ComplexPattern uses.
+ if (N->isLeaf() || N->getOperator()->isSubClassOf("ComplexPattern")) {
+ OutVariants.push_back(N);
+ return;
+ }
+
+ // Look up interesting info about the node.
+ const SDNodeInfo &NodeInfo = CDP.getSDNodeInfo(N->getOperator());
+
+ // If this node is associative, re-associate.
+ if (NodeInfo.hasProperty(SDNPAssociative)) {
+ // Re-associate by pulling together all of the linked operators
+ std::vector<TreePatternNodePtr> MaximalChildren;
+ GatherChildrenOfAssociativeOpcode(N, MaximalChildren);
+
+ // Only handle child sizes of 3. Otherwise we'll end up trying too many
+ // permutations.
+ if (MaximalChildren.size() == 3) {
+ // Find the variants of all of our maximal children.
+ std::vector<TreePatternNodePtr> AVariants, BVariants, CVariants;
+ GenerateVariantsOf(MaximalChildren[0], AVariants, CDP, DepVars);
+ GenerateVariantsOf(MaximalChildren[1], BVariants, CDP, DepVars);
+ GenerateVariantsOf(MaximalChildren[2], CVariants, CDP, DepVars);
+
+ // There are only two ways we can permute the tree:
+ // (A op B) op C and A op (B op C)
+ // Within these forms, we can also permute A/B/C.
+
+ // Generate legal pair permutations of A/B/C.
+ std::vector<TreePatternNodePtr> ABVariants;
+ std::vector<TreePatternNodePtr> BAVariants;
+ std::vector<TreePatternNodePtr> ACVariants;
+ std::vector<TreePatternNodePtr> CAVariants;
+ std::vector<TreePatternNodePtr> BCVariants;
+ std::vector<TreePatternNodePtr> CBVariants;
+ CombineChildVariants(N, AVariants, BVariants, ABVariants, CDP, DepVars);
+ CombineChildVariants(N, BVariants, AVariants, BAVariants, CDP, DepVars);
+ CombineChildVariants(N, AVariants, CVariants, ACVariants, CDP, DepVars);
+ CombineChildVariants(N, CVariants, AVariants, CAVariants, CDP, DepVars);
+ CombineChildVariants(N, BVariants, CVariants, BCVariants, CDP, DepVars);
+ CombineChildVariants(N, CVariants, BVariants, CBVariants, CDP, DepVars);
+
+ // Combine those into the result: (x op x) op x
+ CombineChildVariants(N, ABVariants, CVariants, OutVariants, CDP, DepVars);
+ CombineChildVariants(N, BAVariants, CVariants, OutVariants, CDP, DepVars);
+ CombineChildVariants(N, ACVariants, BVariants, OutVariants, CDP, DepVars);
+ CombineChildVariants(N, CAVariants, BVariants, OutVariants, CDP, DepVars);
+ CombineChildVariants(N, BCVariants, AVariants, OutVariants, CDP, DepVars);
+ CombineChildVariants(N, CBVariants, AVariants, OutVariants, CDP, DepVars);
+
+ // Combine those into the result: x op (x op x)
+ CombineChildVariants(N, CVariants, ABVariants, OutVariants, CDP, DepVars);
+ CombineChildVariants(N, CVariants, BAVariants, OutVariants, CDP, DepVars);
+ CombineChildVariants(N, BVariants, ACVariants, OutVariants, CDP, DepVars);
+ CombineChildVariants(N, BVariants, CAVariants, OutVariants, CDP, DepVars);
+ CombineChildVariants(N, AVariants, BCVariants, OutVariants, CDP, DepVars);
+ CombineChildVariants(N, AVariants, CBVariants, OutVariants, CDP, DepVars);
+ return;
+ }
+ }
+
+ // Compute permutations of all children.
+ std::vector<std::vector<TreePatternNodePtr>> ChildVariants;
+ ChildVariants.resize(N->getNumChildren());
+ for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i)
+ GenerateVariantsOf(N->getChildShared(i), ChildVariants[i], CDP, DepVars);
+
+ // Build all permutations based on how the children were formed.
+ CombineChildVariants(N, ChildVariants, OutVariants, CDP, DepVars);
+
+ // If this node is commutative, consider the commuted order.
+ bool isCommIntrinsic = N->isCommutativeIntrinsic(CDP);
+ if (NodeInfo.hasProperty(SDNPCommutative) || isCommIntrinsic) {
+ unsigned Skip = isCommIntrinsic ? 1 : 0; // First operand is intrinsic id.
+ assert(N->getNumChildren() >= (2 + Skip) &&
+ "Commutative but doesn't have 2 children!");
+ // Don't allow commuting children which are actually register references.
+ bool NoRegisters = true;
+ unsigned i = 0 + Skip;
+ unsigned e = 2 + Skip;
+ for (; i != e; ++i) {
+ TreePatternNode *Child = N->getChild(i);
+ if (Child->isLeaf())
+ if (DefInit *DI = dyn_cast<DefInit>(Child->getLeafValue())) {
+ Record *RR = DI->getDef();
+ if (RR->isSubClassOf("Register"))
+ NoRegisters = false;
+ }
+ }
+ // Consider the commuted order.
+ if (NoRegisters) {
+ std::vector<std::vector<TreePatternNodePtr>> Variants;
+ unsigned i = 0;
+ if (isCommIntrinsic)
+ Variants.push_back(std::move(ChildVariants[i++])); // Intrinsic id.
+ Variants.push_back(std::move(ChildVariants[i + 1]));
+ Variants.push_back(std::move(ChildVariants[i]));
+ i += 2;
+ // Remaining operands are not commuted.
+ for (; i != N->getNumChildren(); ++i)
+ Variants.push_back(std::move(ChildVariants[i]));
+ CombineChildVariants(N, Variants, OutVariants, CDP, DepVars);
+ }
+ }
+}
+
+
+// GenerateVariants - Generate variants. For example, commutative patterns can
+// match multiple ways. Add them to PatternsToMatch as well.
+void CodeGenDAGPatterns::GenerateVariants() {
+ LLVM_DEBUG(errs() << "Generating instruction variants.\n");
+
+ // Loop over all of the patterns we've collected, checking to see if we can
+ // generate variants of the instruction, through the exploitation of
+ // identities. This permits the target to provide aggressive matching without
+ // the .td file having to contain tons of variants of instructions.
+ //
+ // Note that this loop adds new patterns to the PatternsToMatch list, but we
+ // intentionally do not reconsider these. Any variants of added patterns have
+ // already been added.
+ //
+ for (unsigned i = 0, e = PatternsToMatch.size(); i != e; ++i) {
+ MultipleUseVarSet DepVars;
+ std::vector<TreePatternNodePtr> Variants;
+ FindDepVars(PatternsToMatch[i].getSrcPattern(), DepVars);
+ LLVM_DEBUG(errs() << "Dependent/multiply used variables: ");
+ LLVM_DEBUG(DumpDepVars(DepVars));
+ LLVM_DEBUG(errs() << "\n");
+ GenerateVariantsOf(PatternsToMatch[i].getSrcPatternShared(), Variants,
+ *this, DepVars);
+
+ assert(PatternsToMatch[i].getHwModeFeatures().empty() &&
+ "HwModes should not have been expanded yet!");
+
+ assert(!Variants.empty() && "Must create at least original variant!");
+ if (Variants.size() == 1) // No additional variants for this pattern.
+ continue;
+
+ LLVM_DEBUG(errs() << "FOUND VARIANTS OF: ";
+ PatternsToMatch[i].getSrcPattern()->dump(); errs() << "\n");
+
+ for (unsigned v = 0, e = Variants.size(); v != e; ++v) {
+ TreePatternNodePtr Variant = Variants[v];
+
+ LLVM_DEBUG(errs() << " VAR#" << v << ": "; Variant->dump();
+ errs() << "\n");
+
+ // Scan to see if an instruction or explicit pattern already matches this.
+ bool AlreadyExists = false;
+ for (unsigned p = 0, e = PatternsToMatch.size(); p != e; ++p) {
+ // Skip if the top level predicates do not match.
+ if ((i != p) && (PatternsToMatch[i].getPredicates() !=
+ PatternsToMatch[p].getPredicates()))
+ continue;
+ // Check to see if this variant already exists.
+ if (Variant->isIsomorphicTo(PatternsToMatch[p].getSrcPattern(),
+ DepVars)) {
+ LLVM_DEBUG(errs() << " *** ALREADY EXISTS, ignoring variant.\n");
+ AlreadyExists = true;
+ break;
+ }
+ }
+ // If we already have it, ignore the variant.
+ if (AlreadyExists) continue;
+
+ // Otherwise, add it to the list of patterns we have.
+ PatternsToMatch.emplace_back(
+ PatternsToMatch[i].getSrcRecord(), PatternsToMatch[i].getPredicates(),
+ Variant, PatternsToMatch[i].getDstPatternShared(),
+ PatternsToMatch[i].getDstRegs(),
+ PatternsToMatch[i].getAddedComplexity(), Record::getNewUID(Records),
+ PatternsToMatch[i].getForceMode(),
+ PatternsToMatch[i].getHwModeFeatures());
+ }
+
+ LLVM_DEBUG(errs() << "\n");
+ }
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/CodeGenDAGPatterns.h b/contrib/libs/llvm16/utils/TableGen/CodeGenDAGPatterns.h
new file mode 100644
index 0000000000..ec35e66800
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/CodeGenDAGPatterns.h
@@ -0,0 +1,1275 @@
+//===- CodeGenDAGPatterns.h - Read DAG patterns from .td file ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the CodeGenDAGPatterns class, which is used to read and
+// represent the patterns present in a .td file for instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_CODEGENDAGPATTERNS_H
+#define LLVM_UTILS_TABLEGEN_CODEGENDAGPATTERNS_H
+
+#include "CodeGenIntrinsics.h"
+#include "CodeGenTarget.h"
+#include "SDNodeProperties.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include <algorithm>
+#include <array>
+#include <functional>
+#include <map>
+#include <numeric>
+#include <vector>
+
+namespace llvm {
+
+class Record;
+class Init;
+class ListInit;
+class DagInit;
+class SDNodeInfo;
+class TreePattern;
+class TreePatternNode;
+class CodeGenDAGPatterns;
+
+/// Shared pointer for TreePatternNode.
+using TreePatternNodePtr = std::shared_ptr<TreePatternNode>;
+
+/// This represents a set of MVTs. Since the underlying type for the MVT
+/// is uint8_t, there are at most 256 values. To reduce the number of memory
+/// allocations and deallocations, represent the set as a sequence of bits.
+/// To reduce the allocations even further, make MachineValueTypeSet own
+/// the storage and use std::array as the bit container.
+struct MachineValueTypeSet {
+ static_assert(std::is_same<std::underlying_type_t<MVT::SimpleValueType>,
+ uint8_t>::value,
+ "Change uint8_t here to the SimpleValueType's type");
+ static unsigned constexpr Capacity = std::numeric_limits<uint8_t>::max()+1;
+ using WordType = uint64_t;
+ static unsigned constexpr WordWidth = CHAR_BIT*sizeof(WordType);
+ static unsigned constexpr NumWords = Capacity/WordWidth;
+ static_assert(NumWords*WordWidth == Capacity,
+ "Capacity should be a multiple of WordWidth");
+
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ MachineValueTypeSet() {
+ clear();
+ }
+
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ unsigned size() const {
+ unsigned Count = 0;
+ for (WordType W : Words)
+ Count += llvm::popcount(W);
+ return Count;
+ }
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ void clear() {
+ std::memset(Words.data(), 0, NumWords*sizeof(WordType));
+ }
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ bool empty() const {
+ for (WordType W : Words)
+ if (W != 0)
+ return false;
+ return true;
+ }
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ unsigned count(MVT T) const {
+ return (Words[T.SimpleTy / WordWidth] >> (T.SimpleTy % WordWidth)) & 1;
+ }
+ std::pair<MachineValueTypeSet&,bool> insert(MVT T) {
+ bool V = count(T.SimpleTy);
+ Words[T.SimpleTy / WordWidth] |= WordType(1) << (T.SimpleTy % WordWidth);
+ return {*this, V};
+ }
+ MachineValueTypeSet &insert(const MachineValueTypeSet &S) {
+ for (unsigned i = 0; i != NumWords; ++i)
+ Words[i] |= S.Words[i];
+ return *this;
+ }
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ void erase(MVT T) {
+ Words[T.SimpleTy / WordWidth] &= ~(WordType(1) << (T.SimpleTy % WordWidth));
+ }
+
+ void writeToStream(raw_ostream &OS) const;
+
+ struct const_iterator {
+ // Some implementations of the C++ library require these traits to be
+ // defined.
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = MVT;
+ using difference_type = ptrdiff_t;
+ using pointer = const MVT*;
+ using reference = const MVT&;
+
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ MVT operator*() const {
+ assert(Pos != Capacity);
+ return MVT::SimpleValueType(Pos);
+ }
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ const_iterator(const MachineValueTypeSet *S, bool End) : Set(S) {
+ Pos = End ? Capacity : find_from_pos(0);
+ }
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ const_iterator &operator++() {
+ assert(Pos != Capacity);
+ Pos = find_from_pos(Pos+1);
+ return *this;
+ }
+
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ bool operator==(const const_iterator &It) const {
+ return Set == It.Set && Pos == It.Pos;
+ }
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ bool operator!=(const const_iterator &It) const {
+ return !operator==(It);
+ }
+
+ private:
+ unsigned find_from_pos(unsigned P) const {
+ unsigned SkipWords = P / WordWidth;
+ unsigned SkipBits = P % WordWidth;
+ unsigned Count = SkipWords * WordWidth;
+
+ // If P is in the middle of a word, process it manually here, because
+ // the trailing bits need to be masked off to use findFirstSet.
+ if (SkipBits != 0) {
+ WordType W = Set->Words[SkipWords];
+ W &= maskLeadingOnes<WordType>(WordWidth-SkipBits);
+ if (W != 0)
+ return Count + llvm::countr_zero(W);
+ Count += WordWidth;
+ SkipWords++;
+ }
+
+ for (unsigned i = SkipWords; i != NumWords; ++i) {
+ WordType W = Set->Words[i];
+ if (W != 0)
+ return Count + llvm::countr_zero(W);
+ Count += WordWidth;
+ }
+ return Capacity;
+ }
+
+ const MachineValueTypeSet *Set;
+ unsigned Pos;
+ };
+
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ const_iterator begin() const { return const_iterator(this, false); }
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ const_iterator end() const { return const_iterator(this, true); }
+
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ bool operator==(const MachineValueTypeSet &S) const {
+ return Words == S.Words;
+ }
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ bool operator!=(const MachineValueTypeSet &S) const {
+ return !operator==(S);
+ }
+
+private:
+ friend struct const_iterator;
+ std::array<WordType,NumWords> Words;
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const MachineValueTypeSet &T);
+
+struct TypeSetByHwMode : public InfoByHwMode<MachineValueTypeSet> {
+ using SetType = MachineValueTypeSet;
+ SmallVector<unsigned, 16> AddrSpaces;
+
+ TypeSetByHwMode() = default;
+ TypeSetByHwMode(const TypeSetByHwMode &VTS) = default;
+ TypeSetByHwMode &operator=(const TypeSetByHwMode &) = default;
+ TypeSetByHwMode(MVT::SimpleValueType VT)
+ : TypeSetByHwMode(ValueTypeByHwMode(VT)) {}
+ TypeSetByHwMode(ValueTypeByHwMode VT)
+ : TypeSetByHwMode(ArrayRef<ValueTypeByHwMode>(&VT, 1)) {}
+ TypeSetByHwMode(ArrayRef<ValueTypeByHwMode> VTList);
+
+ SetType &getOrCreate(unsigned Mode) {
+ return Map[Mode];
+ }
+
+ bool isValueTypeByHwMode(bool AllowEmpty) const;
+ ValueTypeByHwMode getValueTypeByHwMode() const;
+
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ bool isMachineValueType() const {
+ return isDefaultOnly() && Map.begin()->second.size() == 1;
+ }
+
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ MVT getMachineValueType() const {
+ assert(isMachineValueType());
+ return *Map.begin()->second.begin();
+ }
+
+ bool isPossible() const;
+
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ bool isDefaultOnly() const {
+ return Map.size() == 1 && Map.begin()->first == DefaultMode;
+ }
+
+ bool isPointer() const {
+ return getValueTypeByHwMode().isPointer();
+ }
+
+ unsigned getPtrAddrSpace() const {
+ assert(isPointer());
+ return getValueTypeByHwMode().PtrAddrSpace;
+ }
+
+ bool insert(const ValueTypeByHwMode &VVT);
+ bool constrain(const TypeSetByHwMode &VTS);
+ template <typename Predicate> bool constrain(Predicate P);
+ template <typename Predicate>
+ bool assign_if(const TypeSetByHwMode &VTS, Predicate P);
+
+ void writeToStream(raw_ostream &OS) const;
+
+ bool operator==(const TypeSetByHwMode &VTS) const;
+ bool operator!=(const TypeSetByHwMode &VTS) const { return !(*this == VTS); }
+
+ void dump() const;
+ bool validate() const;
+
+private:
+ unsigned PtrAddrSpace = std::numeric_limits<unsigned>::max();
+ /// Intersect two sets. Return true if anything has changed.
+ bool intersect(SetType &Out, const SetType &In);
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const TypeSetByHwMode &T);
+
+struct TypeInfer {
+ TypeInfer(TreePattern &T) : TP(T), ForceMode(0) {}
+
+ bool isConcrete(const TypeSetByHwMode &VTS, bool AllowEmpty) const {
+ return VTS.isValueTypeByHwMode(AllowEmpty);
+ }
+ ValueTypeByHwMode getConcrete(const TypeSetByHwMode &VTS,
+ bool AllowEmpty) const {
+ assert(VTS.isValueTypeByHwMode(AllowEmpty));
+ return VTS.getValueTypeByHwMode();
+ }
+
+ /// The protocol in the following functions (Merge*, force*, Enforce*,
+ /// expand*) is to return "true" if a change has been made, "false"
+ /// otherwise.
+
+ bool MergeInTypeInfo(TypeSetByHwMode &Out, const TypeSetByHwMode &In);
+ bool MergeInTypeInfo(TypeSetByHwMode &Out, MVT::SimpleValueType InVT) {
+ return MergeInTypeInfo(Out, TypeSetByHwMode(InVT));
+ }
+ bool MergeInTypeInfo(TypeSetByHwMode &Out, ValueTypeByHwMode InVT) {
+ return MergeInTypeInfo(Out, TypeSetByHwMode(InVT));
+ }
+
+ /// Reduce the set \p Out to have at most one element for each mode.
+ bool forceArbitrary(TypeSetByHwMode &Out);
+
+ /// The following four functions ensure that upon return the set \p Out
+ /// will only contain types of the specified kind: integer, floating-point,
+ /// scalar, or vector.
+ /// If \p Out is empty, all legal types of the specified kind will be added
+ /// to it. Otherwise, all types that are not of the specified kind will be
+ /// removed from \p Out.
+ bool EnforceInteger(TypeSetByHwMode &Out);
+ bool EnforceFloatingPoint(TypeSetByHwMode &Out);
+ bool EnforceScalar(TypeSetByHwMode &Out);
+ bool EnforceVector(TypeSetByHwMode &Out);
+
+ /// If \p Out is empty, fill it with all legal types. Otherwise, leave it
+ /// unchanged.
+ bool EnforceAny(TypeSetByHwMode &Out);
+ /// Make sure that for each type in \p Small, there exists a larger type
+ /// in \p Big. \p SmallIsVT indicates that this is being called for
+ /// SDTCisVTSmallerThanOp. In that case the TypeSetByHwMode is re-created for
+ /// each call and needs special consideration in how we detect changes.
+ bool EnforceSmallerThan(TypeSetByHwMode &Small, TypeSetByHwMode &Big,
+ bool SmallIsVT = false);
+ /// 1. Ensure that for each type T in \p Vec, T is a vector type, and that
+ /// for each type U in \p Elem, U is a scalar type.
+ /// 2. Ensure that for each (scalar) type U in \p Elem, there exists a
+ /// (vector) type T in \p Vec, such that U is the element type of T.
+ bool EnforceVectorEltTypeIs(TypeSetByHwMode &Vec, TypeSetByHwMode &Elem);
+ bool EnforceVectorEltTypeIs(TypeSetByHwMode &Vec,
+ const ValueTypeByHwMode &VVT);
+ /// Ensure that for each type T in \p Sub, T is a vector type, and there
+ /// exists a type U in \p Vec such that U is a vector type with the same
+ /// element type as T and at least as many elements as T.
+ bool EnforceVectorSubVectorTypeIs(TypeSetByHwMode &Vec,
+ TypeSetByHwMode &Sub);
+ /// 1. Ensure that \p V has a scalar type iff \p W has a scalar type.
+ /// 2. Ensure that for each vector type T in \p V, there exists a vector
+ /// type U in \p W, such that T and U have the same number of elements.
+ /// 3. Ensure that for each vector type U in \p W, there exists a vector
+ /// type T in \p V, such that T and U have the same number of elements
+ /// (reverse of 2).
+ bool EnforceSameNumElts(TypeSetByHwMode &V, TypeSetByHwMode &W);
+ /// 1. Ensure that for each type T in \p A, there exists a type U in \p B,
+ /// such that T and U have equal size in bits.
+ /// 2. Ensure that for each type U in \p B, there exists a type T in \p A
+ /// such that T and U have equal size in bits (reverse of 1).
+ bool EnforceSameSize(TypeSetByHwMode &A, TypeSetByHwMode &B);
+
+ /// For each overloaded type (i.e. of form *Any), replace it with the
+ /// corresponding subset of legal, specific types.
+ void expandOverloads(TypeSetByHwMode &VTS);
+ void expandOverloads(TypeSetByHwMode::SetType &Out,
+ const TypeSetByHwMode::SetType &Legal);
+
+ struct ValidateOnExit {
+ ValidateOnExit(TypeSetByHwMode &T, TypeInfer &TI) : Infer(TI), VTS(T) {}
+ #ifndef NDEBUG
+ ~ValidateOnExit();
+ #else
+ ~ValidateOnExit() {} // Empty destructor with NDEBUG.
+ #endif
+ TypeInfer &Infer;
+ TypeSetByHwMode &VTS;
+ };
+
+ struct SuppressValidation {
+ SuppressValidation(TypeInfer &TI) : Infer(TI), SavedValidate(TI.Validate) {
+ Infer.Validate = false;
+ }
+ ~SuppressValidation() {
+ Infer.Validate = SavedValidate;
+ }
+ TypeInfer &Infer;
+ bool SavedValidate;
+ };
+
+ TreePattern &TP;
+ unsigned ForceMode; // Mode to use when set.
+ bool CodeGen = false; // Set during generation of matcher code.
+ bool Validate = true; // Indicate whether to validate types.
+
+private:
+ const TypeSetByHwMode &getLegalTypes();
+
+ /// Cached legal types (in default mode).
+ bool LegalTypesCached = false;
+ TypeSetByHwMode LegalCache;
+};
+
+/// Set type used to track multiply used variables in patterns
+typedef StringSet<> MultipleUseVarSet;
+
+/// SDTypeConstraint - This is a discriminated union of constraints,
+/// corresponding to the SDTypeConstraint tablegen class in Target.td.
+struct SDTypeConstraint {
+ SDTypeConstraint(Record *R, const CodeGenHwModes &CGH);
+
+ unsigned OperandNo; // The operand # this constraint applies to.
+ enum {
+ SDTCisVT, SDTCisPtrTy, SDTCisInt, SDTCisFP, SDTCisVec, SDTCisSameAs,
+ SDTCisVTSmallerThanOp, SDTCisOpSmallerThanOp, SDTCisEltOfVec,
+ SDTCisSubVecOfVec, SDTCVecEltisVT, SDTCisSameNumEltsAs, SDTCisSameSizeAs
+ } ConstraintType;
+
+ union { // The discriminated union.
+ struct {
+ unsigned OtherOperandNum;
+ } SDTCisSameAs_Info;
+ struct {
+ unsigned OtherOperandNum;
+ } SDTCisVTSmallerThanOp_Info;
+ struct {
+ unsigned BigOperandNum;
+ } SDTCisOpSmallerThanOp_Info;
+ struct {
+ unsigned OtherOperandNum;
+ } SDTCisEltOfVec_Info;
+ struct {
+ unsigned OtherOperandNum;
+ } SDTCisSubVecOfVec_Info;
+ struct {
+ unsigned OtherOperandNum;
+ } SDTCisSameNumEltsAs_Info;
+ struct {
+ unsigned OtherOperandNum;
+ } SDTCisSameSizeAs_Info;
+ } x;
+
+ // The VT for SDTCisVT and SDTCVecEltisVT.
+ // Must not be in the union because it has a non-trivial destructor.
+ ValueTypeByHwMode VVT;
+
+ /// ApplyTypeConstraint - Given a node in a pattern, apply this type
+ /// constraint to the nodes operands. This returns true if it makes a
+ /// change, false otherwise. If a type contradiction is found, an error
+ /// is flagged.
+ bool ApplyTypeConstraint(TreePatternNode *N, const SDNodeInfo &NodeInfo,
+ TreePattern &TP) const;
+};
+
+/// ScopedName - A name of a node associated with a "scope" that indicates
+/// the context (e.g. instance of Pattern or PatFrag) in which the name was
+/// used. This enables substitution of pattern fragments while keeping track
+/// of what name(s) were originally given to various nodes in the tree.
+class ScopedName {
+ unsigned Scope;
+ std::string Identifier;
+public:
+ ScopedName(unsigned Scope, StringRef Identifier)
+ : Scope(Scope), Identifier(std::string(Identifier)) {
+ assert(Scope != 0 &&
+ "Scope == 0 is used to indicate predicates without arguments");
+ }
+
+ unsigned getScope() const { return Scope; }
+ const std::string &getIdentifier() const { return Identifier; }
+
+ bool operator==(const ScopedName &o) const;
+ bool operator!=(const ScopedName &o) const;
+};
+
+/// SDNodeInfo - One of these records is created for each SDNode instance in
+/// the target .td file. This represents the various dag nodes we will be
+/// processing.
+class SDNodeInfo {
+ Record *Def;
+ StringRef EnumName;
+ StringRef SDClassName;
+ unsigned Properties;
+ unsigned NumResults;
+ int NumOperands;
+ std::vector<SDTypeConstraint> TypeConstraints;
+public:
+ // Parse the specified record.
+ SDNodeInfo(Record *R, const CodeGenHwModes &CGH);
+
+ unsigned getNumResults() const { return NumResults; }
+
+ /// getNumOperands - This is the number of operands required or -1 if
+ /// variadic.
+ int getNumOperands() const { return NumOperands; }
+ Record *getRecord() const { return Def; }
+ StringRef getEnumName() const { return EnumName; }
+ StringRef getSDClassName() const { return SDClassName; }
+
+ const std::vector<SDTypeConstraint> &getTypeConstraints() const {
+ return TypeConstraints;
+ }
+
+ /// getKnownType - If the type constraints on this node imply a fixed type
+ /// (e.g. all stores return void, etc), then return it as an
+ /// MVT::SimpleValueType. Otherwise, return MVT::Other.
+ MVT::SimpleValueType getKnownType(unsigned ResNo) const;
+
+ /// hasProperty - Return true if this node has the specified property.
+ ///
+ bool hasProperty(enum SDNP Prop) const { return Properties & (1 << Prop); }
+
+ /// ApplyTypeConstraints - Given a node in a pattern, apply the type
+ /// constraints for this node to the operands of the node. This returns
+ /// true if it makes a change, false otherwise. If a type contradiction is
+ /// found, an error is flagged.
+ bool ApplyTypeConstraints(TreePatternNode *N, TreePattern &TP) const;
+};
+
+/// TreePredicateFn - This is an abstraction that represents the predicates on
+/// a PatFrag node. This is a simple one-word wrapper around a pointer to
+/// provide nice accessors.
+class TreePredicateFn {
+ /// PatFragRec - This is the TreePattern for the PatFrag that we
+ /// originally came from.
+ TreePattern *PatFragRec;
+public:
+ /// TreePredicateFn constructor. Here 'N' is a subclass of PatFrag.
+ TreePredicateFn(TreePattern *N);
+
+
+ TreePattern *getOrigPatFragRecord() const { return PatFragRec; }
+
+ /// isAlwaysTrue - Return true if this is a noop predicate.
+ bool isAlwaysTrue() const;
+
+ bool isImmediatePattern() const { return hasImmCode(); }
+
+ /// getImmediatePredicateCode - Return the code that evaluates this pattern if
+ /// this is an immediate predicate. It is an error to call this on a
+ /// non-immediate pattern.
+ std::string getImmediatePredicateCode() const {
+ std::string Result = getImmCode();
+ assert(!Result.empty() && "Isn't an immediate pattern!");
+ return Result;
+ }
+
+ bool operator==(const TreePredicateFn &RHS) const {
+ return PatFragRec == RHS.PatFragRec;
+ }
+
+ bool operator!=(const TreePredicateFn &RHS) const { return !(*this == RHS); }
+
+ /// Return the name to use in the generated code to reference this, this is
+ /// "Predicate_foo" if from a pattern fragment "foo".
+ std::string getFnName() const;
+
+ /// getCodeToRunOnSDNode - Return the code for the function body that
+ /// evaluates this predicate. The argument is expected to be in "Node",
+ /// not N. This handles casting and conversion to a concrete node type as
+ /// appropriate.
+ std::string getCodeToRunOnSDNode() const;
+
+ /// Get the data type of the argument to getImmediatePredicateCode().
+ StringRef getImmType() const;
+
+ /// Get a string that describes the type returned by getImmType() but is
+ /// usable as part of an identifier.
+ StringRef getImmTypeIdentifier() const;
+
+ // Predicate code uses the PatFrag's captured operands.
+ bool usesOperands() const;
+
+ // Check if the HasNoUse predicate is set.
+ bool hasNoUse() const;
+
+ // Is the desired predefined predicate for a load?
+ bool isLoad() const;
+ // Is the desired predefined predicate for a store?
+ bool isStore() const;
+ // Is the desired predefined predicate for an atomic?
+ bool isAtomic() const;
+
+ /// Is this predicate the predefined unindexed load predicate?
+ /// Is this predicate the predefined unindexed store predicate?
+ bool isUnindexed() const;
+ /// Is this predicate the predefined non-extending load predicate?
+ bool isNonExtLoad() const;
+ /// Is this predicate the predefined any-extend load predicate?
+ bool isAnyExtLoad() const;
+ /// Is this predicate the predefined sign-extend load predicate?
+ bool isSignExtLoad() const;
+ /// Is this predicate the predefined zero-extend load predicate?
+ bool isZeroExtLoad() const;
+ /// Is this predicate the predefined non-truncating store predicate?
+ bool isNonTruncStore() const;
+ /// Is this predicate the predefined truncating store predicate?
+ bool isTruncStore() const;
+
+ /// Is this predicate the predefined monotonic atomic predicate?
+ bool isAtomicOrderingMonotonic() const;
+ /// Is this predicate the predefined acquire atomic predicate?
+ bool isAtomicOrderingAcquire() const;
+ /// Is this predicate the predefined release atomic predicate?
+ bool isAtomicOrderingRelease() const;
+ /// Is this predicate the predefined acquire-release atomic predicate?
+ bool isAtomicOrderingAcquireRelease() const;
+ /// Is this predicate the predefined sequentially consistent atomic predicate?
+ bool isAtomicOrderingSequentiallyConsistent() const;
+
+ /// Is this predicate the predefined acquire-or-stronger atomic predicate?
+ bool isAtomicOrderingAcquireOrStronger() const;
+ /// Is this predicate the predefined weaker-than-acquire atomic predicate?
+ bool isAtomicOrderingWeakerThanAcquire() const;
+
+ /// Is this predicate the predefined release-or-stronger atomic predicate?
+ bool isAtomicOrderingReleaseOrStronger() const;
+ /// Is this predicate the predefined weaker-than-release atomic predicate?
+ bool isAtomicOrderingWeakerThanRelease() const;
+
+ /// If non-null, indicates that this predicate is a predefined memory VT
+ /// predicate for a load/store and returns the ValueType record for the memory VT.
+ Record *getMemoryVT() const;
+ /// If non-null, indicates that this predicate is a predefined memory VT
+ /// predicate (checking only the scalar type) for load/store and returns the
+ /// ValueType record for the memory VT.
+ Record *getScalarMemoryVT() const;
+
+ ListInit *getAddressSpaces() const;
+ int64_t getMinAlignment() const;
+
+ // If true, indicates that GlobalISel-based C++ code was supplied.
+ bool hasGISelPredicateCode() const;
+ std::string getGISelPredicateCode() const;
+
+private:
+ bool hasPredCode() const;
+ bool hasImmCode() const;
+ std::string getPredCode() const;
+ std::string getImmCode() const;
+ bool immCodeUsesAPInt() const;
+ bool immCodeUsesAPFloat() const;
+
+ bool isPredefinedPredicateEqualTo(StringRef Field, bool Value) const;
+};
+
+struct TreePredicateCall {
+ TreePredicateFn Fn;
+
+ // Scope -- unique identifier for retrieving named arguments. 0 is used when
+ // the predicate does not use named arguments.
+ unsigned Scope;
+
+ TreePredicateCall(const TreePredicateFn &Fn, unsigned Scope)
+ : Fn(Fn), Scope(Scope) {}
+
+ bool operator==(const TreePredicateCall &o) const {
+ return Fn == o.Fn && Scope == o.Scope;
+ }
+ bool operator!=(const TreePredicateCall &o) const {
+ return !(*this == o);
+ }
+};
+
+class TreePatternNode {
+ /// The type of each node result. Before and during type inference, each
+ /// result may be a set of possible types. After (successful) type inference,
+ /// each is a single concrete type.
+ std::vector<TypeSetByHwMode> Types;
+
+ /// The index of each result in results of the pattern.
+ std::vector<unsigned> ResultPerm;
+
+ /// Operator - The Record for the operator if this is an interior node (not
+ /// a leaf).
+ Record *Operator;
+
+ /// Val - The init value (e.g. the "GPRC" record, or "7") for a leaf.
+ ///
+ Init *Val;
+
+ /// Name - The name given to this node with the :$foo notation.
+ ///
+ std::string Name;
+
+ std::vector<ScopedName> NamesAsPredicateArg;
+
+ /// PredicateCalls - The predicate functions to execute on this node to check
+ /// for a match. If this list is empty, no predicate is involved.
+ std::vector<TreePredicateCall> PredicateCalls;
+
+ /// TransformFn - The transformation function to execute on this node before
+ /// it can be substituted into the resulting instruction on a pattern match.
+ Record *TransformFn;
+
+ std::vector<TreePatternNodePtr> Children;
+
+public:
+ TreePatternNode(Record *Op, std::vector<TreePatternNodePtr> Ch,
+ unsigned NumResults)
+ : Operator(Op), Val(nullptr), TransformFn(nullptr),
+ Children(std::move(Ch)) {
+ Types.resize(NumResults);
+ ResultPerm.resize(NumResults);
+ std::iota(ResultPerm.begin(), ResultPerm.end(), 0);
+ }
+ TreePatternNode(Init *val, unsigned NumResults) // leaf ctor
+ : Operator(nullptr), Val(val), TransformFn(nullptr) {
+ Types.resize(NumResults);
+ ResultPerm.resize(NumResults);
+ std::iota(ResultPerm.begin(), ResultPerm.end(), 0);
+ }
+
+ bool hasName() const { return !Name.empty(); }
+ const std::string &getName() const { return Name; }
+ void setName(StringRef N) { Name.assign(N.begin(), N.end()); }
+
+ const std::vector<ScopedName> &getNamesAsPredicateArg() const {
+ return NamesAsPredicateArg;
+ }
+ void setNamesAsPredicateArg(const std::vector<ScopedName>& Names) {
+ NamesAsPredicateArg = Names;
+ }
+ void addNameAsPredicateArg(const ScopedName &N) {
+ NamesAsPredicateArg.push_back(N);
+ }
+
+ bool isLeaf() const { return Val != nullptr; }
+
+ // Type accessors.
+ unsigned getNumTypes() const { return Types.size(); }
+ ValueTypeByHwMode getType(unsigned ResNo) const {
+ return Types[ResNo].getValueTypeByHwMode();
+ }
+ const std::vector<TypeSetByHwMode> &getExtTypes() const { return Types; }
+ const TypeSetByHwMode &getExtType(unsigned ResNo) const {
+ return Types[ResNo];
+ }
+ TypeSetByHwMode &getExtType(unsigned ResNo) { return Types[ResNo]; }
+ void setType(unsigned ResNo, const TypeSetByHwMode &T) { Types[ResNo] = T; }
+ MVT::SimpleValueType getSimpleType(unsigned ResNo) const {
+ return Types[ResNo].getMachineValueType().SimpleTy;
+ }
+
+ bool hasConcreteType(unsigned ResNo) const {
+ return Types[ResNo].isValueTypeByHwMode(false);
+ }
+ bool isTypeCompletelyUnknown(unsigned ResNo, TreePattern &TP) const {
+ return Types[ResNo].empty();
+ }
+
+ unsigned getNumResults() const { return ResultPerm.size(); }
+ unsigned getResultIndex(unsigned ResNo) const { return ResultPerm[ResNo]; }
+ void setResultIndex(unsigned ResNo, unsigned RI) { ResultPerm[ResNo] = RI; }
+
+ Init *getLeafValue() const { assert(isLeaf()); return Val; }
+ Record *getOperator() const { assert(!isLeaf()); return Operator; }
+
+ unsigned getNumChildren() const { return Children.size(); }
+ TreePatternNode *getChild(unsigned N) const { return Children[N].get(); }
+ const TreePatternNodePtr &getChildShared(unsigned N) const {
+ return Children[N];
+ }
+ void setChild(unsigned i, TreePatternNodePtr N) { Children[i] = N; }
+
+ /// hasChild - Return true if N is any of our children.
+ bool hasChild(const TreePatternNode *N) const {
+ for (unsigned i = 0, e = Children.size(); i != e; ++i)
+ if (Children[i].get() == N)
+ return true;
+ return false;
+ }
+
+ bool hasProperTypeByHwMode() const;
+ bool hasPossibleType() const;
+ bool setDefaultMode(unsigned Mode);
+
+ bool hasAnyPredicate() const { return !PredicateCalls.empty(); }
+
+ const std::vector<TreePredicateCall> &getPredicateCalls() const {
+ return PredicateCalls;
+ }
+ void clearPredicateCalls() { PredicateCalls.clear(); }
+ void setPredicateCalls(const std::vector<TreePredicateCall> &Calls) {
+ assert(PredicateCalls.empty() && "Overwriting non-empty predicate list!");
+ PredicateCalls = Calls;
+ }
+ void addPredicateCall(const TreePredicateCall &Call) {
+ assert(!Call.Fn.isAlwaysTrue() && "Empty predicate string!");
+ assert(!is_contained(PredicateCalls, Call) && "predicate applied recursively");
+ PredicateCalls.push_back(Call);
+ }
+ void addPredicateCall(const TreePredicateFn &Fn, unsigned Scope) {
+ assert((Scope != 0) == Fn.usesOperands());
+ addPredicateCall(TreePredicateCall(Fn, Scope));
+ }
+
+ Record *getTransformFn() const { return TransformFn; }
+ void setTransformFn(Record *Fn) { TransformFn = Fn; }
+
+ /// getIntrinsicInfo - If this node corresponds to an intrinsic, return the
+ /// CodeGenIntrinsic information for it, otherwise return a null pointer.
+ const CodeGenIntrinsic *getIntrinsicInfo(const CodeGenDAGPatterns &CDP) const;
+
+ /// getComplexPatternInfo - If this node corresponds to a ComplexPattern,
+ /// return the ComplexPattern information, otherwise return null.
+ const ComplexPattern *
+ getComplexPatternInfo(const CodeGenDAGPatterns &CGP) const;
+
+ /// Returns the number of MachineInstr operands that would be produced by this
+ /// node if it mapped directly to an output Instruction's
+ /// operand. ComplexPattern specifies this explicitly; MIOperandInfo gives it
+ /// for Operands; otherwise 1.
+ unsigned getNumMIResults(const CodeGenDAGPatterns &CGP) const;
+
+ /// NodeHasProperty - Return true if this node has the specified property.
+ bool NodeHasProperty(SDNP Property, const CodeGenDAGPatterns &CGP) const;
+
+ /// TreeHasProperty - Return true if any node in this tree has the specified
+ /// property.
+ bool TreeHasProperty(SDNP Property, const CodeGenDAGPatterns &CGP) const;
+
+ /// isCommutativeIntrinsic - Return true if the node is an intrinsic which is
+ /// marked isCommutative.
+ bool isCommutativeIntrinsic(const CodeGenDAGPatterns &CDP) const;
+
+ void print(raw_ostream &OS) const;
+ void dump() const;
+
+public: // Higher level manipulation routines.
+
+ /// clone - Return a new copy of this tree.
+ ///
+ TreePatternNodePtr clone() const;
+
+ /// RemoveAllTypes - Recursively strip all the types of this tree.
+ void RemoveAllTypes();
+
+ /// isIsomorphicTo - Return true if this node is recursively isomorphic to
+ /// the specified node. For this comparison, all of the state of the node
+ /// is considered, except for the assigned name. Nodes with differing names
+ /// that are otherwise identical are considered isomorphic.
+ bool isIsomorphicTo(const TreePatternNode *N,
+ const MultipleUseVarSet &DepVars) const;
+
+ /// SubstituteFormalArguments - Replace the formal arguments in this tree
+ /// with actual values specified by ArgMap.
+ void
+ SubstituteFormalArguments(std::map<std::string, TreePatternNodePtr> &ArgMap);
+
+ /// InlinePatternFragments - If this pattern refers to any pattern
+ /// fragments, return the set of inlined versions (this can be more than
+ /// one if a PatFrags record has multiple alternatives).
+ void InlinePatternFragments(TreePatternNodePtr T,
+ TreePattern &TP,
+ std::vector<TreePatternNodePtr> &OutAlternatives);
+
+ /// ApplyTypeConstraints - Apply all of the type constraints relevant to
+ /// this node and its children in the tree. This returns true if it makes a
+ /// change, false otherwise. If a type contradiction is found, flag an error.
+ bool ApplyTypeConstraints(TreePattern &TP, bool NotRegisters);
+
+ /// UpdateNodeType - Set the node type of N to VT if VT contains
+ /// information. If N already contains a conflicting type, then flag an
+ /// error. This returns true if any information was updated.
+ ///
+ bool UpdateNodeType(unsigned ResNo, const TypeSetByHwMode &InTy,
+ TreePattern &TP);
+ bool UpdateNodeType(unsigned ResNo, MVT::SimpleValueType InTy,
+ TreePattern &TP);
+ bool UpdateNodeType(unsigned ResNo, ValueTypeByHwMode InTy,
+ TreePattern &TP);
+
+ // Update node type with types inferred from an instruction operand or result
+ // def from the ins/outs lists.
+ // Return true if the type changed.
+ bool UpdateNodeTypeFromInst(unsigned ResNo, Record *Operand, TreePattern &TP);
+
+ /// ContainsUnresolvedType - Return true if this tree contains any
+ /// unresolved types.
+ bool ContainsUnresolvedType(TreePattern &TP) const;
+
+ /// canPatternMatch - If it is impossible for this pattern to match on this
+ /// target, fill in Reason and return false. Otherwise, return true.
+ bool canPatternMatch(std::string &Reason, const CodeGenDAGPatterns &CDP);
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const TreePatternNode &TPN) {
+ TPN.print(OS);
+ return OS;
+}
+
+
+/// TreePattern - Represent a pattern, used for instructions, pattern
+/// fragments, etc.
+///
+class TreePattern {
+ /// Trees - The list of pattern trees which corresponds to this pattern.
+ /// Note that PatFrag's only have a single tree.
+ ///
+ std::vector<TreePatternNodePtr> Trees;
+
+ /// NamedNodes - This is all of the nodes that have names in the trees in this
+ /// pattern.
+ StringMap<SmallVector<TreePatternNode *, 1>> NamedNodes;
+
+ /// TheRecord - The actual TableGen record corresponding to this pattern.
+ ///
+ Record *TheRecord;
+
+ /// Args - This is a list of all of the arguments to this pattern (for
+ /// PatFrag patterns), which are the 'node' markers in this pattern.
+ std::vector<std::string> Args;
+
+ /// CDP - the top-level object coordinating this madness.
+ ///
+ CodeGenDAGPatterns &CDP;
+
+ /// isInputPattern - True if this is an input pattern, something to match.
+ /// False if this is an output pattern, something to emit.
+ bool isInputPattern;
+
+ /// hasError - True if the currently processed nodes have unresolvable types
+ /// or other non-fatal errors
+ bool HasError;
+
+ /// It's important that the usage of operands in ComplexPatterns is
+ /// consistent: each named operand can be defined by at most one
+ /// ComplexPattern. This records the ComplexPattern instance and the operand
+ /// number for each operand encountered in a ComplexPattern to aid in that
+ /// check.
+ StringMap<std::pair<Record *, unsigned>> ComplexPatternOperands;
+
+ TypeInfer Infer;
+
+public:
+
+ /// TreePattern constructor - Parse the specified DagInits into the
+ /// current record.
+ TreePattern(Record *TheRec, ListInit *RawPat, bool isInput,
+ CodeGenDAGPatterns &ise);
+ TreePattern(Record *TheRec, DagInit *Pat, bool isInput,
+ CodeGenDAGPatterns &ise);
+ TreePattern(Record *TheRec, TreePatternNodePtr Pat, bool isInput,
+ CodeGenDAGPatterns &ise);
+
+ /// getTrees - Return the tree patterns which corresponds to this pattern.
+ ///
+ const std::vector<TreePatternNodePtr> &getTrees() const { return Trees; }
+ unsigned getNumTrees() const { return Trees.size(); }
+ const TreePatternNodePtr &getTree(unsigned i) const { return Trees[i]; }
+ void setTree(unsigned i, TreePatternNodePtr Tree) { Trees[i] = Tree; }
+ const TreePatternNodePtr &getOnlyTree() const {
+ assert(Trees.size() == 1 && "Doesn't have exactly one pattern!");
+ return Trees[0];
+ }
+
+ const StringMap<SmallVector<TreePatternNode *, 1>> &getNamedNodesMap() {
+ if (NamedNodes.empty())
+ ComputeNamedNodes();
+ return NamedNodes;
+ }
+
+ /// getRecord - Return the actual TableGen record corresponding to this
+ /// pattern.
+ ///
+ Record *getRecord() const { return TheRecord; }
+
+ unsigned getNumArgs() const { return Args.size(); }
+ const std::string &getArgName(unsigned i) const {
+ assert(i < Args.size() && "Argument reference out of range!");
+ return Args[i];
+ }
+ std::vector<std::string> &getArgList() { return Args; }
+
+ CodeGenDAGPatterns &getDAGPatterns() const { return CDP; }
+
+ /// InlinePatternFragments - If this pattern refers to any pattern
+ /// fragments, inline them into place, giving us a pattern without any
+ /// PatFrags references. This may increase the number of trees in the
+ /// pattern if a PatFrags has multiple alternatives.
+ void InlinePatternFragments() {
+ std::vector<TreePatternNodePtr> Copy = Trees;
+ Trees.clear();
+ for (unsigned i = 0, e = Copy.size(); i != e; ++i)
+ Copy[i]->InlinePatternFragments(Copy[i], *this, Trees);
+ }
+
+ /// InferAllTypes - Infer/propagate as many types throughout the expression
+ /// patterns as possible. Return true if all types are inferred, false
+ /// otherwise. Bail out if a type contradiction is found.
+ bool InferAllTypes(
+ const StringMap<SmallVector<TreePatternNode *, 1>> *NamedTypes = nullptr);
+
+ /// error - If this is the first error in the current resolution step,
+ /// print it and set the error flag. Otherwise, continue silently.
+ void error(const Twine &Msg);
+ bool hasError() const {
+ return HasError;
+ }
+ void resetError() {
+ HasError = false;
+ }
+
+ TypeInfer &getInfer() { return Infer; }
+
+ void print(raw_ostream &OS) const;
+ void dump() const;
+
+private:
+ TreePatternNodePtr ParseTreePattern(Init *DI, StringRef OpName);
+ void ComputeNamedNodes();
+ void ComputeNamedNodes(TreePatternNode *N);
+};
+
+
+inline bool TreePatternNode::UpdateNodeType(unsigned ResNo,
+ const TypeSetByHwMode &InTy,
+ TreePattern &TP) {
+ TypeSetByHwMode VTS(InTy);
+ TP.getInfer().expandOverloads(VTS);
+ return TP.getInfer().MergeInTypeInfo(Types[ResNo], VTS);
+}
+
+inline bool TreePatternNode::UpdateNodeType(unsigned ResNo,
+ MVT::SimpleValueType InTy,
+ TreePattern &TP) {
+ TypeSetByHwMode VTS(InTy);
+ TP.getInfer().expandOverloads(VTS);
+ return TP.getInfer().MergeInTypeInfo(Types[ResNo], VTS);
+}
+
+inline bool TreePatternNode::UpdateNodeType(unsigned ResNo,
+ ValueTypeByHwMode InTy,
+ TreePattern &TP) {
+ TypeSetByHwMode VTS(InTy);
+ TP.getInfer().expandOverloads(VTS);
+ return TP.getInfer().MergeInTypeInfo(Types[ResNo], VTS);
+}
+
+
+/// DAGDefaultOperand - One of these is created for each OperandWithDefaultOps
+/// that has a set ExecuteAlways / DefaultOps field.
+struct DAGDefaultOperand {
+ std::vector<TreePatternNodePtr> DefaultOps;
+};
+
+class DAGInstruction {
+ std::vector<Record*> Results;
+ std::vector<Record*> Operands;
+ std::vector<Record*> ImpResults;
+ TreePatternNodePtr SrcPattern;
+ TreePatternNodePtr ResultPattern;
+
+public:
+ DAGInstruction(const std::vector<Record*> &results,
+ const std::vector<Record*> &operands,
+ const std::vector<Record*> &impresults,
+ TreePatternNodePtr srcpattern = nullptr,
+ TreePatternNodePtr resultpattern = nullptr)
+ : Results(results), Operands(operands), ImpResults(impresults),
+ SrcPattern(srcpattern), ResultPattern(resultpattern) {}
+
+ unsigned getNumResults() const { return Results.size(); }
+ unsigned getNumOperands() const { return Operands.size(); }
+ unsigned getNumImpResults() const { return ImpResults.size(); }
+ const std::vector<Record*>& getImpResults() const { return ImpResults; }
+
+ Record *getResult(unsigned RN) const {
+ assert(RN < Results.size());
+ return Results[RN];
+ }
+
+ Record *getOperand(unsigned ON) const {
+ assert(ON < Operands.size());
+ return Operands[ON];
+ }
+
+ Record *getImpResult(unsigned RN) const {
+ assert(RN < ImpResults.size());
+ return ImpResults[RN];
+ }
+
+ TreePatternNodePtr getSrcPattern() const { return SrcPattern; }
+ TreePatternNodePtr getResultPattern() const { return ResultPattern; }
+};
+
+/// PatternToMatch - Used by CodeGenDAGPatterns to keep tab of patterns
+/// processed to produce isel.
+class PatternToMatch {
+ Record *SrcRecord; // Originating Record for the pattern.
+ ListInit *Predicates; // Top level predicate conditions to match.
+ TreePatternNodePtr SrcPattern; // Source pattern to match.
+ TreePatternNodePtr DstPattern; // Resulting pattern.
+ std::vector<Record*> Dstregs; // Physical register defs being matched.
+ std::string HwModeFeatures;
+ int AddedComplexity; // Add to matching pattern complexity.
+ unsigned ID; // Unique ID for the record.
+ unsigned ForceMode; // Force this mode in type inference when set.
+
+public:
+ PatternToMatch(Record *srcrecord, ListInit *preds, TreePatternNodePtr src,
+ TreePatternNodePtr dst, std::vector<Record *> dstregs,
+ int complexity, unsigned uid, unsigned setmode = 0,
+ const Twine &hwmodefeatures = "")
+ : SrcRecord(srcrecord), Predicates(preds), SrcPattern(src),
+ DstPattern(dst), Dstregs(std::move(dstregs)),
+ HwModeFeatures(hwmodefeatures.str()), AddedComplexity(complexity),
+ ID(uid), ForceMode(setmode) {}
+
+ Record *getSrcRecord() const { return SrcRecord; }
+ ListInit *getPredicates() const { return Predicates; }
+ TreePatternNode *getSrcPattern() const { return SrcPattern.get(); }
+ TreePatternNodePtr getSrcPatternShared() const { return SrcPattern; }
+ TreePatternNode *getDstPattern() const { return DstPattern.get(); }
+ TreePatternNodePtr getDstPatternShared() const { return DstPattern; }
+ const std::vector<Record*> &getDstRegs() const { return Dstregs; }
+ StringRef getHwModeFeatures() const { return HwModeFeatures; }
+ int getAddedComplexity() const { return AddedComplexity; }
+ unsigned getID() const { return ID; }
+ unsigned getForceMode() const { return ForceMode; }
+
+ std::string getPredicateCheck() const;
+ void getPredicateRecords(SmallVectorImpl<Record *> &PredicateRecs) const;
+
+ /// Compute the complexity metric for the input pattern. This roughly
+ /// corresponds to the number of nodes that are covered.
+ int getPatternComplexity(const CodeGenDAGPatterns &CGP) const;
+};
+
+class CodeGenDAGPatterns {
+ RecordKeeper &Records;
+ CodeGenTarget Target;
+ CodeGenIntrinsicTable Intrinsics;
+
+ std::map<Record*, SDNodeInfo, LessRecordByID> SDNodes;
+ std::map<Record*, std::pair<Record*, std::string>, LessRecordByID>
+ SDNodeXForms;
+ std::map<Record*, ComplexPattern, LessRecordByID> ComplexPatterns;
+ std::map<Record *, std::unique_ptr<TreePattern>, LessRecordByID>
+ PatternFragments;
+ std::map<Record*, DAGDefaultOperand, LessRecordByID> DefaultOperands;
+ std::map<Record*, DAGInstruction, LessRecordByID> Instructions;
+
+ // Specific SDNode definitions:
+ Record *intrinsic_void_sdnode;
+ Record *intrinsic_w_chain_sdnode, *intrinsic_wo_chain_sdnode;
+
+ /// PatternsToMatch - All of the things we are matching on the DAG. The first
+ /// value is the pattern to match, the second pattern is the result to
+ /// emit.
+ std::vector<PatternToMatch> PatternsToMatch;
+
+ TypeSetByHwMode LegalVTS;
+
+ using PatternRewriterFn = std::function<void (TreePattern *)>;
+ PatternRewriterFn PatternRewriter;
+
+ unsigned NumScopes = 0;
+
+public:
+ CodeGenDAGPatterns(RecordKeeper &R,
+ PatternRewriterFn PatternRewriter = nullptr);
+
+ CodeGenTarget &getTargetInfo() { return Target; }
+ const CodeGenTarget &getTargetInfo() const { return Target; }
+ const TypeSetByHwMode &getLegalTypes() const { return LegalVTS; }
+
+ Record *getSDNodeNamed(StringRef Name) const;
+
+ const SDNodeInfo &getSDNodeInfo(Record *R) const {
+ auto F = SDNodes.find(R);
+ assert(F != SDNodes.end() && "Unknown node!");
+ return F->second;
+ }
+
+ // Node transformation lookups.
+ typedef std::pair<Record*, std::string> NodeXForm;
+ const NodeXForm &getSDNodeTransform(Record *R) const {
+ auto F = SDNodeXForms.find(R);
+ assert(F != SDNodeXForms.end() && "Invalid transform!");
+ return F->second;
+ }
+
+ const ComplexPattern &getComplexPattern(Record *R) const {
+ auto F = ComplexPatterns.find(R);
+ assert(F != ComplexPatterns.end() && "Unknown addressing mode!");
+ return F->second;
+ }
+
+ const CodeGenIntrinsic &getIntrinsic(Record *R) const {
+ for (unsigned i = 0, e = Intrinsics.size(); i != e; ++i)
+ if (Intrinsics[i].TheDef == R) return Intrinsics[i];
+ llvm_unreachable("Unknown intrinsic!");
+ }
+
+ const CodeGenIntrinsic &getIntrinsicInfo(unsigned IID) const {
+ if (IID-1 < Intrinsics.size())
+ return Intrinsics[IID-1];
+ llvm_unreachable("Bad intrinsic ID!");
+ }
+
+ unsigned getIntrinsicID(Record *R) const {
+ for (unsigned i = 0, e = Intrinsics.size(); i != e; ++i)
+ if (Intrinsics[i].TheDef == R) return i;
+ llvm_unreachable("Unknown intrinsic!");
+ }
+
+ const DAGDefaultOperand &getDefaultOperand(Record *R) const {
+ auto F = DefaultOperands.find(R);
+ assert(F != DefaultOperands.end() &&"Isn't an analyzed default operand!");
+ return F->second;
+ }
+
+ // Pattern Fragment information.
+ TreePattern *getPatternFragment(Record *R) const {
+ auto F = PatternFragments.find(R);
+ assert(F != PatternFragments.end() && "Invalid pattern fragment request!");
+ return F->second.get();
+ }
+ TreePattern *getPatternFragmentIfRead(Record *R) const {
+ auto F = PatternFragments.find(R);
+ if (F == PatternFragments.end())
+ return nullptr;
+ return F->second.get();
+ }
+
+ typedef std::map<Record *, std::unique_ptr<TreePattern>,
+ LessRecordByID>::const_iterator pf_iterator;
+ pf_iterator pf_begin() const { return PatternFragments.begin(); }
+ pf_iterator pf_end() const { return PatternFragments.end(); }
+ iterator_range<pf_iterator> ptfs() const { return PatternFragments; }
+
+ // Patterns to match information.
+ typedef std::vector<PatternToMatch>::const_iterator ptm_iterator;
+ ptm_iterator ptm_begin() const { return PatternsToMatch.begin(); }
+ ptm_iterator ptm_end() const { return PatternsToMatch.end(); }
+ iterator_range<ptm_iterator> ptms() const { return PatternsToMatch; }
+
+ /// Parse the Pattern for an instruction, and insert the result in DAGInsts.
+ typedef std::map<Record*, DAGInstruction, LessRecordByID> DAGInstMap;
+ void parseInstructionPattern(
+ CodeGenInstruction &CGI, ListInit *Pattern,
+ DAGInstMap &DAGInsts);
+
+ const DAGInstruction &getInstruction(Record *R) const {
+ auto F = Instructions.find(R);
+ assert(F != Instructions.end() && "Unknown instruction!");
+ return F->second;
+ }
+
+ Record *get_intrinsic_void_sdnode() const {
+ return intrinsic_void_sdnode;
+ }
+ Record *get_intrinsic_w_chain_sdnode() const {
+ return intrinsic_w_chain_sdnode;
+ }
+ Record *get_intrinsic_wo_chain_sdnode() const {
+ return intrinsic_wo_chain_sdnode;
+ }
+
+ unsigned allocateScope() { return ++NumScopes; }
+
+ bool operandHasDefault(Record *Op) const {
+ return Op->isSubClassOf("OperandWithDefaultOps") &&
+ !getDefaultOperand(Op).DefaultOps.empty();
+ }
+
+private:
+ void ParseNodeInfo();
+ void ParseNodeTransforms();
+ void ParseComplexPatterns();
+ void ParsePatternFragments(bool OutFrags = false);
+ void ParseDefaultOperands();
+ void ParseInstructions();
+ void ParsePatterns();
+ void ExpandHwModeBasedTypes();
+ void InferInstructionFlags();
+ void GenerateVariants();
+ void VerifyInstructionFlags();
+
+ void ParseOnePattern(Record *TheDef,
+ TreePattern &Pattern, TreePattern &Result,
+ const std::vector<Record *> &InstImpResults);
+ void AddPatternToMatch(TreePattern *Pattern, PatternToMatch &&PTM);
+ void FindPatternInputsAndOutputs(
+ TreePattern &I, TreePatternNodePtr Pat,
+ std::map<std::string, TreePatternNodePtr> &InstInputs,
+ MapVector<std::string, TreePatternNodePtr,
+ std::map<std::string, unsigned>> &InstResults,
+ std::vector<Record *> &InstImpResults);
+};
+
+
+inline bool SDNodeInfo::ApplyTypeConstraints(TreePatternNode *N,
+ TreePattern &TP) const {
+ bool MadeChange = false;
+ for (unsigned i = 0, e = TypeConstraints.size(); i != e; ++i)
+ MadeChange |= TypeConstraints[i].ApplyTypeConstraint(N, *this, TP);
+ return MadeChange;
+ }
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/libs/llvm16/utils/TableGen/CodeGenHwModes.cpp b/contrib/libs/llvm16/utils/TableGen/CodeGenHwModes.cpp
new file mode 100644
index 0000000000..2fec46c441
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/CodeGenHwModes.cpp
@@ -0,0 +1,113 @@
+//===--- CodeGenHwModes.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Classes to parse and store HW mode information for instruction selection
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenHwModes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+
+using namespace llvm;
+
+StringRef CodeGenHwModes::DefaultModeName = "DefaultMode";
+
+HwMode::HwMode(Record *R) {
+ Name = R->getName();
+ Features = std::string(R->getValueAsString("Features"));
+}
+
+LLVM_DUMP_METHOD
+void HwMode::dump() const {
+ dbgs() << Name << ": " << Features << '\n';
+}
+
+HwModeSelect::HwModeSelect(Record *R, CodeGenHwModes &CGH) {
+ std::vector<Record*> Modes = R->getValueAsListOfDefs("Modes");
+ std::vector<Record*> Objects = R->getValueAsListOfDefs("Objects");
+ if (Modes.size() != Objects.size()) {
+ PrintError(R->getLoc(), "in record " + R->getName() +
+ " derived from HwModeSelect: the lists Modes and Objects should "
+ "have the same size");
+ report_fatal_error("error in target description.");
+ }
+ for (unsigned i = 0, e = Modes.size(); i != e; ++i) {
+ unsigned ModeId = CGH.getHwModeId(Modes[i]->getName());
+ Items.push_back(std::make_pair(ModeId, Objects[i]));
+ }
+}
+
+LLVM_DUMP_METHOD
+void HwModeSelect::dump() const {
+ dbgs() << '{';
+ for (const PairType &P : Items)
+ dbgs() << " (" << P.first << ',' << P.second->getName() << ')';
+ dbgs() << " }\n";
+}
+
+CodeGenHwModes::CodeGenHwModes(RecordKeeper &RK) : Records(RK) {
+ std::vector<Record*> MRs = Records.getAllDerivedDefinitions("HwMode");
+ // The default mode needs a definition in the .td sources for TableGen
+ // to accept references to it. We need to ignore the definition here.
+ for (auto I = MRs.begin(), E = MRs.end(); I != E; ++I) {
+ if ((*I)->getName() != DefaultModeName)
+ continue;
+ MRs.erase(I);
+ break;
+ }
+
+ for (Record *R : MRs) {
+ Modes.emplace_back(R);
+ unsigned NewId = Modes.size();
+ ModeIds.insert(std::make_pair(Modes[NewId-1].Name, NewId));
+ }
+
+ std::vector<Record*> MSs = Records.getAllDerivedDefinitions("HwModeSelect");
+ for (Record *R : MSs) {
+ auto P = ModeSelects.emplace(std::make_pair(R, HwModeSelect(R, *this)));
+ assert(P.second);
+ (void)P;
+ }
+}
+
+unsigned CodeGenHwModes::getHwModeId(StringRef Name) const {
+ if (Name == DefaultModeName)
+ return DefaultMode;
+ auto F = ModeIds.find(Name);
+ assert(F != ModeIds.end() && "Unknown mode name");
+ return F->second;
+}
+
+const HwModeSelect &CodeGenHwModes::getHwModeSelect(Record *R) const {
+ auto F = ModeSelects.find(R);
+ assert(F != ModeSelects.end() && "Record is not a \"mode select\"");
+ return F->second;
+}
+
+LLVM_DUMP_METHOD
+void CodeGenHwModes::dump() const {
+ dbgs() << "Modes: {\n";
+ for (const HwMode &M : Modes) {
+ dbgs() << " ";
+ M.dump();
+ }
+ dbgs() << "}\n";
+
+ dbgs() << "ModeIds: {\n";
+ for (const auto &P : ModeIds)
+ dbgs() << " " << P.first() << " -> " << P.second << '\n';
+ dbgs() << "}\n";
+
+ dbgs() << "ModeSelects: {\n";
+ for (const auto &P : ModeSelects) {
+ dbgs() << " " << P.first->getName() << " -> ";
+ P.second.dump();
+ }
+ dbgs() << "}\n";
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/CodeGenHwModes.h b/contrib/libs/llvm16/utils/TableGen/CodeGenHwModes.h
new file mode 100644
index 0000000000..55507cbca3
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/CodeGenHwModes.h
@@ -0,0 +1,64 @@
+//===--- CodeGenHwModes.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Classes to parse and store HW mode information for instruction selection.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_CODEGENHWMODES_H
+#define LLVM_UTILS_TABLEGEN_CODEGENHWMODES_H
+
+#include "llvm/ADT/StringMap.h"
+#include <cassert>
+#include <map>
+#include <string>
+#include <vector>
+
+// HwModeId -> list of predicates (definition)
+
+namespace llvm {
+ class Record;
+ class RecordKeeper;
+
+ struct CodeGenHwModes;
+
+ struct HwMode {
+ HwMode(Record *R);
+ StringRef Name;
+ std::string Features;
+ void dump() const;
+ };
+
+ struct HwModeSelect {
+ HwModeSelect(Record *R, CodeGenHwModes &CGH);
+ typedef std::pair<unsigned, Record*> PairType;
+ std::vector<PairType> Items;
+ void dump() const;
+ };
+
+ struct CodeGenHwModes {
+ enum : unsigned { DefaultMode = 0 };
+ static StringRef DefaultModeName;
+
+ CodeGenHwModes(RecordKeeper &R);
+ unsigned getHwModeId(StringRef Name) const;
+ const HwMode &getMode(unsigned Id) const {
+ assert(Id != 0 && "Mode id of 0 is reserved for the default mode");
+ return Modes[Id-1];
+ }
+ const HwModeSelect &getHwModeSelect(Record *R) const;
+ unsigned getNumModeIds() const { return Modes.size()+1; }
+ void dump() const;
+
+ private:
+ RecordKeeper &Records;
+ StringMap<unsigned> ModeIds; // HwMode (string) -> HwModeId
+ std::vector<HwMode> Modes;
+ std::map<Record*,HwModeSelect> ModeSelects;
+ };
+}
+
+#endif // LLVM_UTILS_TABLEGEN_CODEGENHWMODES_H
diff --git a/contrib/libs/llvm16/utils/TableGen/CodeGenInstruction.cpp b/contrib/libs/llvm16/utils/TableGen/CodeGenInstruction.cpp
new file mode 100644
index 0000000000..238c6a1b6b
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/CodeGenInstruction.cpp
@@ -0,0 +1,857 @@
+//===- CodeGenInstruction.cpp - CodeGen Instruction Class Wrapper ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the CodeGenInstruction class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenInstruction.h"
+#include "CodeGenTarget.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include <set>
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// CGIOperandList Implementation
+//===----------------------------------------------------------------------===//
+
+CGIOperandList::CGIOperandList(Record *R) : TheDef(R) {
+ isPredicable = false;
+ hasOptionalDef = false;
+ isVariadic = false;
+
+ DagInit *OutDI = R->getValueAsDag("OutOperandList");
+
+ if (DefInit *Init = dyn_cast<DefInit>(OutDI->getOperator())) {
+ if (Init->getDef()->getName() != "outs")
+ PrintFatalError(R->getLoc(),
+ R->getName() +
+ ": invalid def name for output list: use 'outs'");
+ } else
+ PrintFatalError(R->getLoc(),
+ R->getName() + ": invalid output list: use 'outs'");
+
+ NumDefs = OutDI->getNumArgs();
+
+ DagInit *InDI = R->getValueAsDag("InOperandList");
+ if (DefInit *Init = dyn_cast<DefInit>(InDI->getOperator())) {
+ if (Init->getDef()->getName() != "ins")
+ PrintFatalError(R->getLoc(),
+ R->getName() +
+ ": invalid def name for input list: use 'ins'");
+ } else
+ PrintFatalError(R->getLoc(),
+ R->getName() + ": invalid input list: use 'ins'");
+
+ unsigned MIOperandNo = 0;
+ std::set<std::string> OperandNames;
+ unsigned e = InDI->getNumArgs() + OutDI->getNumArgs();
+ OperandList.reserve(e);
+ bool VariadicOuts = false;
+ for (unsigned i = 0; i != e; ++i){
+ Init *ArgInit;
+ StringRef ArgName;
+ if (i < NumDefs) {
+ ArgInit = OutDI->getArg(i);
+ ArgName = OutDI->getArgNameStr(i);
+ } else {
+ ArgInit = InDI->getArg(i-NumDefs);
+ ArgName = InDI->getArgNameStr(i-NumDefs);
+ }
+
+ DagInit *SubArgDag = dyn_cast<DagInit>(ArgInit);
+ if (SubArgDag)
+ ArgInit = SubArgDag->getOperator();
+
+ DefInit *Arg = dyn_cast<DefInit>(ArgInit);
+ if (!Arg)
+ PrintFatalError(R->getLoc(), "Illegal operand for the '" + R->getName() +
+ "' instruction!");
+
+ Record *Rec = Arg->getDef();
+ std::string PrintMethod = "printOperand";
+ std::string EncoderMethod;
+ std::string OperandType = "OPERAND_UNKNOWN";
+ std::string OperandNamespace = "MCOI";
+ unsigned NumOps = 1;
+ DagInit *MIOpInfo = nullptr;
+ if (Rec->isSubClassOf("RegisterOperand")) {
+ PrintMethod = std::string(Rec->getValueAsString("PrintMethod"));
+ OperandType = std::string(Rec->getValueAsString("OperandType"));
+ OperandNamespace = std::string(Rec->getValueAsString("OperandNamespace"));
+ EncoderMethod = std::string(Rec->getValueAsString("EncoderMethod"));
+ } else if (Rec->isSubClassOf("Operand")) {
+ PrintMethod = std::string(Rec->getValueAsString("PrintMethod"));
+ OperandType = std::string(Rec->getValueAsString("OperandType"));
+ OperandNamespace = std::string(Rec->getValueAsString("OperandNamespace"));
+ // If there is an explicit encoder method, use it.
+ EncoderMethod = std::string(Rec->getValueAsString("EncoderMethod"));
+ MIOpInfo = Rec->getValueAsDag("MIOperandInfo");
+
+ // Verify that MIOpInfo has an 'ops' root value.
+ if (!isa<DefInit>(MIOpInfo->getOperator()) ||
+ cast<DefInit>(MIOpInfo->getOperator())->getDef()->getName() != "ops")
+ PrintFatalError(R->getLoc(),
+ "Bad value for MIOperandInfo in operand '" +
+ Rec->getName() + "'\n");
+
+ // If we have MIOpInfo, then we have #operands equal to number of entries
+ // in MIOperandInfo.
+ if (unsigned NumArgs = MIOpInfo->getNumArgs())
+ NumOps = NumArgs;
+
+ if (Rec->isSubClassOf("PredicateOp"))
+ isPredicable = true;
+ else if (Rec->isSubClassOf("OptionalDefOperand"))
+ hasOptionalDef = true;
+ } else if (Rec->getName() == "variable_ops") {
+ if (i < NumDefs)
+ VariadicOuts = true;
+ isVariadic = true;
+ continue;
+ } else if (Rec->isSubClassOf("RegisterClass")) {
+ OperandType = "OPERAND_REGISTER";
+ } else if (!Rec->isSubClassOf("PointerLikeRegClass") &&
+ !Rec->isSubClassOf("unknown_class")) {
+ PrintFatalError(R->getLoc(), "Unknown operand class '" + Rec->getName() +
+ "' in '" + R->getName() +
+ "' instruction!");
+ }
+
+ // Check that the operand has a name and that it's unique.
+ if (ArgName.empty())
+ PrintFatalError(R->getLoc(), "In instruction '" + R->getName() +
+ "', operand #" + Twine(i) +
+ " has no name!");
+ if (!OperandNames.insert(std::string(ArgName)).second)
+ PrintFatalError(R->getLoc(),
+ "In instruction '" + R->getName() + "', operand #" +
+ Twine(i) +
+ " has the same name as a previous operand!");
+
+ OperandInfo &OpInfo = OperandList.emplace_back(
+ Rec, std::string(ArgName), std::string(PrintMethod),
+ OperandNamespace + "::" + OperandType, MIOperandNo, NumOps, MIOpInfo);
+
+ if (SubArgDag) {
+ if (SubArgDag->getNumArgs() != NumOps) {
+ PrintFatalError(R->getLoc(), "In instruction '" + R->getName() +
+ "', operand #" + Twine(i) + " has " +
+ Twine(SubArgDag->getNumArgs()) +
+ " sub-arg names, expected " +
+ Twine(NumOps) + ".");
+ }
+
+ for (unsigned j = 0; j < NumOps; ++j) {
+ if (!isa<UnsetInit>(SubArgDag->getArg(j)))
+ PrintFatalError(R->getLoc(),
+ "In instruction '" + R->getName() + "', operand #" +
+ Twine(i) + " sub-arg #" + Twine(j) +
+ " has unexpected operand (expected only $name).");
+
+ StringRef SubArgName = SubArgDag->getArgNameStr(j);
+ if (SubArgName.empty())
+ PrintFatalError(R->getLoc(), "In instruction '" + R->getName() +
+ "', operand #" + Twine(i) +
+ " has no name!");
+ if (!OperandNames.insert(std::string(SubArgName)).second)
+ PrintFatalError(R->getLoc(),
+ "In instruction '" + R->getName() + "', operand #" +
+ Twine(i) + " sub-arg #" + Twine(j) +
+ " has the same name as a previous operand!");
+
+ if (auto MaybeEncoderMethod =
+ cast<DefInit>(MIOpInfo->getArg(j))
+ ->getDef()
+ ->getValueAsOptionalString("EncoderMethod")) {
+ OpInfo.EncoderMethodNames[j] = *MaybeEncoderMethod;
+ }
+
+ OpInfo.SubOpNames[j] = SubArgName;
+ SubOpAliases[SubArgName] = std::make_pair(MIOperandNo, j);
+ }
+ } else if (!EncoderMethod.empty()) {
+ // If we have no explicit sub-op dag, but have an top-level encoder
+ // method, the single encoder will multiple sub-ops, itself.
+ OpInfo.EncoderMethodNames[0] = EncoderMethod;
+ for (unsigned j = 1; j < NumOps; ++j)
+ OpInfo.DoNotEncode[j] = true;
+ }
+
+ MIOperandNo += NumOps;
+ }
+
+ if (VariadicOuts)
+ --NumDefs;
+}
+
+
+/// getOperandNamed - Return the index of the operand with the specified
+/// non-empty name. If the instruction does not have an operand with the
+/// specified name, abort.
+///
+unsigned CGIOperandList::getOperandNamed(StringRef Name) const {
+ unsigned OpIdx;
+ if (hasOperandNamed(Name, OpIdx))
+ return OpIdx;
+ PrintFatalError(TheDef->getLoc(), "'" + TheDef->getName() +
+ "' does not have an operand named '$" +
+ Name + "'!");
+}
+
+/// hasOperandNamed - Query whether the instruction has an operand of the
+/// given name. If so, return true and set OpIdx to the index of the
+/// operand. Otherwise, return false.
+bool CGIOperandList::hasOperandNamed(StringRef Name, unsigned &OpIdx) const {
+ assert(!Name.empty() && "Cannot search for operand with no name!");
+ for (unsigned i = 0, e = OperandList.size(); i != e; ++i)
+ if (OperandList[i].Name == Name) {
+ OpIdx = i;
+ return true;
+ }
+ return false;
+}
+
+bool CGIOperandList::hasSubOperandAlias(
+ StringRef Name, std::pair<unsigned, unsigned> &SubOp) const {
+ assert(!Name.empty() && "Cannot search for operand with no name!");
+ auto SubOpIter = SubOpAliases.find(Name);
+ if (SubOpIter != SubOpAliases.end()) {
+ SubOp = SubOpIter->second;
+ return true;
+ }
+ return false;
+}
+
+std::pair<unsigned,unsigned>
+CGIOperandList::ParseOperandName(StringRef Op, bool AllowWholeOp) {
+ if (Op.empty() || Op[0] != '$')
+ PrintFatalError(TheDef->getLoc(),
+ TheDef->getName() + ": Illegal operand name: '" + Op + "'");
+
+ StringRef OpName = Op.substr(1);
+ StringRef SubOpName;
+
+ // Check to see if this is $foo.bar.
+ StringRef::size_type DotIdx = OpName.find_first_of('.');
+ if (DotIdx != StringRef::npos) {
+ SubOpName = OpName.substr(DotIdx+1);
+ if (SubOpName.empty())
+ PrintFatalError(TheDef->getLoc(),
+ TheDef->getName() +
+ ": illegal empty suboperand name in '" + Op + "'");
+ OpName = OpName.substr(0, DotIdx);
+ }
+
+ unsigned OpIdx;
+
+ if (std::pair<unsigned, unsigned> SubOp; hasSubOperandAlias(OpName, SubOp)) {
+ // Found a name for a piece of an operand, just return it directly.
+ if (!SubOpName.empty()) {
+ PrintFatalError(
+ TheDef->getLoc(),
+ TheDef->getName() +
+ ": Cannot use dotted suboperand name within suboperand '" +
+ OpName + "'");
+ }
+ return SubOp;
+ }
+
+ OpIdx = getOperandNamed(OpName);
+
+ if (SubOpName.empty()) { // If no suboperand name was specified:
+ // If one was needed, throw.
+ if (OperandList[OpIdx].MINumOperands > 1 && !AllowWholeOp &&
+ SubOpName.empty())
+ PrintFatalError(TheDef->getLoc(),
+ TheDef->getName() +
+ ": Illegal to refer to"
+ " whole operand part of complex operand '" +
+ Op + "'");
+
+ // Otherwise, return the operand.
+ return std::make_pair(OpIdx, 0U);
+ }
+
+ // Find the suboperand number involved.
+ DagInit *MIOpInfo = OperandList[OpIdx].MIOperandInfo;
+ if (!MIOpInfo)
+ PrintFatalError(TheDef->getLoc(), TheDef->getName() +
+ ": unknown suboperand name in '" +
+ Op + "'");
+
+ // Find the operand with the right name.
+ for (unsigned i = 0, e = MIOpInfo->getNumArgs(); i != e; ++i)
+ if (MIOpInfo->getArgNameStr(i) == SubOpName)
+ return std::make_pair(OpIdx, i);
+
+ // Otherwise, didn't find it!
+ PrintFatalError(TheDef->getLoc(), TheDef->getName() +
+ ": unknown suboperand name in '" + Op +
+ "'");
+ return std::make_pair(0U, 0U);
+}
+
+static void ParseConstraint(StringRef CStr, CGIOperandList &Ops,
+ Record *Rec) {
+ // EARLY_CLOBBER: @early $reg
+ StringRef::size_type wpos = CStr.find_first_of(" \t");
+ StringRef::size_type start = CStr.find_first_not_of(" \t");
+ StringRef Tok = CStr.substr(start, wpos - start);
+ if (Tok == "@earlyclobber") {
+ StringRef Name = CStr.substr(wpos+1);
+ wpos = Name.find_first_not_of(" \t");
+ if (wpos == StringRef::npos)
+ PrintFatalError(
+ Rec->getLoc(), "Illegal format for @earlyclobber constraint in '" +
+ Rec->getName() + "': '" + CStr + "'");
+ Name = Name.substr(wpos);
+ std::pair<unsigned,unsigned> Op = Ops.ParseOperandName(Name, false);
+
+ // Build the string for the operand
+ if (!Ops[Op.first].Constraints[Op.second].isNone())
+ PrintFatalError(
+ Rec->getLoc(), "Operand '" + Name + "' of '" + Rec->getName() +
+ "' cannot have multiple constraints!");
+ Ops[Op.first].Constraints[Op.second] =
+ CGIOperandList::ConstraintInfo::getEarlyClobber();
+ return;
+ }
+
+ // Only other constraint is "TIED_TO" for now.
+ StringRef::size_type pos = CStr.find_first_of('=');
+ if (pos == StringRef::npos)
+ PrintFatalError(
+ Rec->getLoc(), "Unrecognized constraint '" + CStr +
+ "' in '" + Rec->getName() + "'");
+ start = CStr.find_first_not_of(" \t");
+
+ // TIED_TO: $src1 = $dst
+ wpos = CStr.find_first_of(" \t", start);
+ if (wpos == StringRef::npos || wpos > pos)
+ PrintFatalError(
+ Rec->getLoc(), "Illegal format for tied-to constraint in '" +
+ Rec->getName() + "': '" + CStr + "'");
+ StringRef LHSOpName = CStr.substr(start, wpos - start);
+ std::pair<unsigned,unsigned> LHSOp = Ops.ParseOperandName(LHSOpName, false);
+
+ wpos = CStr.find_first_not_of(" \t", pos + 1);
+ if (wpos == StringRef::npos)
+ PrintFatalError(
+ Rec->getLoc(), "Illegal format for tied-to constraint: '" + CStr + "'");
+
+ StringRef RHSOpName = CStr.substr(wpos);
+ std::pair<unsigned,unsigned> RHSOp = Ops.ParseOperandName(RHSOpName, false);
+
+ // Sort the operands into order, which should put the output one
+ // first. But keep the original order, for use in diagnostics.
+ bool FirstIsDest = (LHSOp < RHSOp);
+ std::pair<unsigned,unsigned> DestOp = (FirstIsDest ? LHSOp : RHSOp);
+ StringRef DestOpName = (FirstIsDest ? LHSOpName : RHSOpName);
+ std::pair<unsigned,unsigned> SrcOp = (FirstIsDest ? RHSOp : LHSOp);
+ StringRef SrcOpName = (FirstIsDest ? RHSOpName : LHSOpName);
+
+ // Ensure one operand is a def and the other is a use.
+ if (DestOp.first >= Ops.NumDefs)
+ PrintFatalError(
+ Rec->getLoc(), "Input operands '" + LHSOpName + "' and '" + RHSOpName +
+ "' of '" + Rec->getName() + "' cannot be tied!");
+ if (SrcOp.first < Ops.NumDefs)
+ PrintFatalError(
+ Rec->getLoc(), "Output operands '" + LHSOpName + "' and '" + RHSOpName +
+ "' of '" + Rec->getName() + "' cannot be tied!");
+
+ // The constraint has to go on the operand with higher index, i.e.
+ // the source one. Check there isn't another constraint there
+ // already.
+ if (!Ops[SrcOp.first].Constraints[SrcOp.second].isNone())
+ PrintFatalError(
+ Rec->getLoc(), "Operand '" + SrcOpName + "' of '" + Rec->getName() +
+ "' cannot have multiple constraints!");
+
+ unsigned DestFlatOpNo = Ops.getFlattenedOperandNumber(DestOp);
+ auto NewConstraint = CGIOperandList::ConstraintInfo::getTied(DestFlatOpNo);
+
+ // Check that the earlier operand is not the target of another tie
+ // before making it the target of this one.
+ for (const CGIOperandList::OperandInfo &Op : Ops) {
+ for (unsigned i = 0; i < Op.MINumOperands; i++)
+ if (Op.Constraints[i] == NewConstraint)
+ PrintFatalError(
+ Rec->getLoc(), "Operand '" + DestOpName + "' of '" + Rec->getName() +
+ "' cannot have multiple operands tied to it!");
+ }
+
+ Ops[SrcOp.first].Constraints[SrcOp.second] = NewConstraint;
+}
+
+static void ParseConstraints(StringRef CStr, CGIOperandList &Ops, Record *Rec) {
+ if (CStr.empty()) return;
+
+ StringRef delims(",");
+ StringRef::size_type bidx, eidx;
+
+ bidx = CStr.find_first_not_of(delims);
+ while (bidx != StringRef::npos) {
+ eidx = CStr.find_first_of(delims, bidx);
+ if (eidx == StringRef::npos)
+ eidx = CStr.size();
+
+ ParseConstraint(CStr.substr(bidx, eidx - bidx), Ops, Rec);
+ bidx = CStr.find_first_not_of(delims, eidx);
+ }
+}
+
+void CGIOperandList::ProcessDisableEncoding(StringRef DisableEncoding) {
+ while (true) {
+ StringRef OpName;
+ std::tie(OpName, DisableEncoding) = getToken(DisableEncoding, " ,\t");
+ if (OpName.empty()) break;
+
+ // Figure out which operand this is.
+ std::pair<unsigned,unsigned> Op = ParseOperandName(OpName, false);
+
+ // Mark the operand as not-to-be encoded.
+ OperandList[Op.first].DoNotEncode[Op.second] = true;
+ }
+
+}
+
+//===----------------------------------------------------------------------===//
+// CodeGenInstruction Implementation
+//===----------------------------------------------------------------------===//
+
+CodeGenInstruction::CodeGenInstruction(Record *R)
+ : TheDef(R), Operands(R), InferredFrom(nullptr) {
+ Namespace = R->getValueAsString("Namespace");
+ AsmString = std::string(R->getValueAsString("AsmString"));
+
+ isPreISelOpcode = R->getValueAsBit("isPreISelOpcode");
+ isReturn = R->getValueAsBit("isReturn");
+ isEHScopeReturn = R->getValueAsBit("isEHScopeReturn");
+ isBranch = R->getValueAsBit("isBranch");
+ isIndirectBranch = R->getValueAsBit("isIndirectBranch");
+ isCompare = R->getValueAsBit("isCompare");
+ isMoveImm = R->getValueAsBit("isMoveImm");
+ isMoveReg = R->getValueAsBit("isMoveReg");
+ isBitcast = R->getValueAsBit("isBitcast");
+ isSelect = R->getValueAsBit("isSelect");
+ isBarrier = R->getValueAsBit("isBarrier");
+ isCall = R->getValueAsBit("isCall");
+ isAdd = R->getValueAsBit("isAdd");
+ isTrap = R->getValueAsBit("isTrap");
+ canFoldAsLoad = R->getValueAsBit("canFoldAsLoad");
+ isPredicable = !R->getValueAsBit("isUnpredicable") && (
+ Operands.isPredicable || R->getValueAsBit("isPredicable"));
+ isConvertibleToThreeAddress = R->getValueAsBit("isConvertibleToThreeAddress");
+ isCommutable = R->getValueAsBit("isCommutable");
+ isTerminator = R->getValueAsBit("isTerminator");
+ isReMaterializable = R->getValueAsBit("isReMaterializable");
+ hasDelaySlot = R->getValueAsBit("hasDelaySlot");
+ usesCustomInserter = R->getValueAsBit("usesCustomInserter");
+ hasPostISelHook = R->getValueAsBit("hasPostISelHook");
+ hasCtrlDep = R->getValueAsBit("hasCtrlDep");
+ isNotDuplicable = R->getValueAsBit("isNotDuplicable");
+ isRegSequence = R->getValueAsBit("isRegSequence");
+ isExtractSubreg = R->getValueAsBit("isExtractSubreg");
+ isInsertSubreg = R->getValueAsBit("isInsertSubreg");
+ isConvergent = R->getValueAsBit("isConvergent");
+ hasNoSchedulingInfo = R->getValueAsBit("hasNoSchedulingInfo");
+ FastISelShouldIgnore = R->getValueAsBit("FastISelShouldIgnore");
+ variadicOpsAreDefs = R->getValueAsBit("variadicOpsAreDefs");
+ isAuthenticated = R->getValueAsBit("isAuthenticated");
+
+ bool Unset;
+ mayLoad = R->getValueAsBitOrUnset("mayLoad", Unset);
+ mayLoad_Unset = Unset;
+ mayStore = R->getValueAsBitOrUnset("mayStore", Unset);
+ mayStore_Unset = Unset;
+ mayRaiseFPException = R->getValueAsBit("mayRaiseFPException");
+ hasSideEffects = R->getValueAsBitOrUnset("hasSideEffects", Unset);
+ hasSideEffects_Unset = Unset;
+
+ isAsCheapAsAMove = R->getValueAsBit("isAsCheapAsAMove");
+ hasExtraSrcRegAllocReq = R->getValueAsBit("hasExtraSrcRegAllocReq");
+ hasExtraDefRegAllocReq = R->getValueAsBit("hasExtraDefRegAllocReq");
+ isCodeGenOnly = R->getValueAsBit("isCodeGenOnly");
+ isPseudo = R->getValueAsBit("isPseudo");
+ isMeta = R->getValueAsBit("isMeta");
+ ImplicitDefs = R->getValueAsListOfDefs("Defs");
+ ImplicitUses = R->getValueAsListOfDefs("Uses");
+
+ // This flag is only inferred from the pattern.
+ hasChain = false;
+ hasChain_Inferred = false;
+
+ // Parse Constraints.
+ ParseConstraints(R->getValueAsString("Constraints"), Operands, R);
+
+ // Parse the DisableEncoding field.
+ Operands.ProcessDisableEncoding(
+ R->getValueAsString("DisableEncoding"));
+
+ // First check for a ComplexDeprecationPredicate.
+ if (R->getValue("ComplexDeprecationPredicate")) {
+ HasComplexDeprecationPredicate = true;
+ DeprecatedReason =
+ std::string(R->getValueAsString("ComplexDeprecationPredicate"));
+ } else if (RecordVal *Dep = R->getValue("DeprecatedFeatureMask")) {
+ // Check if we have a Subtarget feature mask.
+ HasComplexDeprecationPredicate = false;
+ DeprecatedReason = Dep->getValue()->getAsString();
+ } else {
+ // This instruction isn't deprecated.
+ HasComplexDeprecationPredicate = false;
+ DeprecatedReason = "";
+ }
+}
+
+/// HasOneImplicitDefWithKnownVT - If the instruction has at least one
+/// implicit def and it has a known VT, return the VT, otherwise return
+/// MVT::Other.
+MVT::SimpleValueType CodeGenInstruction::
+HasOneImplicitDefWithKnownVT(const CodeGenTarget &TargetInfo) const {
+ if (ImplicitDefs.empty()) return MVT::Other;
+
+ // Check to see if the first implicit def has a resolvable type.
+ Record *FirstImplicitDef = ImplicitDefs[0];
+ assert(FirstImplicitDef->isSubClassOf("Register"));
+ const std::vector<ValueTypeByHwMode> &RegVTs =
+ TargetInfo.getRegisterVTs(FirstImplicitDef);
+ if (RegVTs.size() == 1 && RegVTs[0].isSimple())
+ return RegVTs[0].getSimple().SimpleTy;
+ return MVT::Other;
+}
+
+
+/// FlattenAsmStringVariants - Flatten the specified AsmString to only
+/// include text from the specified variant, returning the new string.
+std::string CodeGenInstruction::
+FlattenAsmStringVariants(StringRef Cur, unsigned Variant) {
+ std::string Res;
+
+ for (;;) {
+ // Find the start of the next variant string.
+ size_t VariantsStart = 0;
+ for (size_t e = Cur.size(); VariantsStart != e; ++VariantsStart)
+ if (Cur[VariantsStart] == '{' &&
+ (VariantsStart == 0 || (Cur[VariantsStart-1] != '$' &&
+ Cur[VariantsStart-1] != '\\')))
+ break;
+
+ // Add the prefix to the result.
+ Res += Cur.slice(0, VariantsStart);
+ if (VariantsStart == Cur.size())
+ break;
+
+ ++VariantsStart; // Skip the '{'.
+
+ // Scan to the end of the variants string.
+ size_t VariantsEnd = VariantsStart;
+ unsigned NestedBraces = 1;
+ for (size_t e = Cur.size(); VariantsEnd != e; ++VariantsEnd) {
+ if (Cur[VariantsEnd] == '}' && Cur[VariantsEnd-1] != '\\') {
+ if (--NestedBraces == 0)
+ break;
+ } else if (Cur[VariantsEnd] == '{')
+ ++NestedBraces;
+ }
+
+ // Select the Nth variant (or empty).
+ StringRef Selection = Cur.slice(VariantsStart, VariantsEnd);
+ for (unsigned i = 0; i != Variant; ++i)
+ Selection = Selection.split('|').second;
+ Res += Selection.split('|').first;
+
+ assert(VariantsEnd != Cur.size() &&
+ "Unterminated variants in assembly string!");
+ Cur = Cur.substr(VariantsEnd + 1);
+ }
+
+ return Res;
+}
+
+bool CodeGenInstruction::isOperandImpl(StringRef OpListName, unsigned i,
+ StringRef PropertyName) const {
+ DagInit *ConstraintList = TheDef->getValueAsDag(OpListName);
+ if (!ConstraintList || i >= ConstraintList->getNumArgs())
+ return false;
+
+ DefInit *Constraint = dyn_cast<DefInit>(ConstraintList->getArg(i));
+ if (!Constraint)
+ return false;
+
+ return Constraint->getDef()->isSubClassOf("TypedOperand") &&
+ Constraint->getDef()->getValueAsBit(PropertyName);
+}
+
+//===----------------------------------------------------------------------===//
+/// CodeGenInstAlias Implementation
+//===----------------------------------------------------------------------===//
+
+/// tryAliasOpMatch - This is a helper function for the CodeGenInstAlias
+/// constructor. It checks if an argument in an InstAlias pattern matches
+/// the corresponding operand of the instruction. It returns true on a
+/// successful match, with ResOp set to the result operand to be used.
+bool CodeGenInstAlias::tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo,
+ Record *InstOpRec, bool hasSubOps,
+ ArrayRef<SMLoc> Loc, CodeGenTarget &T,
+ ResultOperand &ResOp) {
+ Init *Arg = Result->getArg(AliasOpNo);
+ DefInit *ADI = dyn_cast<DefInit>(Arg);
+ Record *ResultRecord = ADI ? ADI->getDef() : nullptr;
+
+ if (ADI && ADI->getDef() == InstOpRec) {
+ // If the operand is a record, it must have a name, and the record type
+ // must match up with the instruction's argument type.
+ if (!Result->getArgName(AliasOpNo))
+ PrintFatalError(Loc, "result argument #" + Twine(AliasOpNo) +
+ " must have a name!");
+ ResOp = ResultOperand(std::string(Result->getArgNameStr(AliasOpNo)),
+ ResultRecord);
+ return true;
+ }
+
+ // For register operands, the source register class can be a subclass
+ // of the instruction register class, not just an exact match.
+ if (InstOpRec->isSubClassOf("RegisterOperand"))
+ InstOpRec = InstOpRec->getValueAsDef("RegClass");
+
+ if (ADI && ADI->getDef()->isSubClassOf("RegisterOperand"))
+ ADI = ADI->getDef()->getValueAsDef("RegClass")->getDefInit();
+
+ if (ADI && ADI->getDef()->isSubClassOf("RegisterClass")) {
+ if (!InstOpRec->isSubClassOf("RegisterClass"))
+ return false;
+ if (!T.getRegisterClass(InstOpRec)
+ .hasSubClass(&T.getRegisterClass(ADI->getDef())))
+ return false;
+ ResOp = ResultOperand(std::string(Result->getArgNameStr(AliasOpNo)),
+ ResultRecord);
+ return true;
+ }
+
+ // Handle explicit registers.
+ if (ADI && ADI->getDef()->isSubClassOf("Register")) {
+ if (InstOpRec->isSubClassOf("OptionalDefOperand")) {
+ DagInit *DI = InstOpRec->getValueAsDag("MIOperandInfo");
+ // The operand info should only have a single (register) entry. We
+ // want the register class of it.
+ InstOpRec = cast<DefInit>(DI->getArg(0))->getDef();
+ }
+
+ if (!InstOpRec->isSubClassOf("RegisterClass"))
+ return false;
+
+ if (!T.getRegisterClass(InstOpRec)
+ .contains(T.getRegBank().getReg(ADI->getDef())))
+ PrintFatalError(Loc, "fixed register " + ADI->getDef()->getName() +
+ " is not a member of the " + InstOpRec->getName() +
+ " register class!");
+
+ if (Result->getArgName(AliasOpNo))
+ PrintFatalError(Loc, "result fixed register argument must "
+ "not have a name!");
+
+ ResOp = ResultOperand(ResultRecord);
+ return true;
+ }
+
+ // Handle "zero_reg" for optional def operands.
+ if (ADI && ADI->getDef()->getName() == "zero_reg") {
+
+ // Check if this is an optional def.
+ // Tied operands where the source is a sub-operand of a complex operand
+ // need to represent both operands in the alias destination instruction.
+ // Allow zero_reg for the tied portion. This can and should go away once
+ // the MC representation of things doesn't use tied operands at all.
+ //if (!InstOpRec->isSubClassOf("OptionalDefOperand"))
+ // throw TGError(Loc, "reg0 used for result that is not an "
+ // "OptionalDefOperand!");
+
+ ResOp = ResultOperand(static_cast<Record*>(nullptr));
+ return true;
+ }
+
+ // Literal integers.
+ if (IntInit *II = dyn_cast<IntInit>(Arg)) {
+ if (hasSubOps || !InstOpRec->isSubClassOf("Operand"))
+ return false;
+ // Integer arguments can't have names.
+ if (Result->getArgName(AliasOpNo))
+ PrintFatalError(Loc, "result argument #" + Twine(AliasOpNo) +
+ " must not have a name!");
+ ResOp = ResultOperand(II->getValue());
+ return true;
+ }
+
+ // Bits<n> (also used for 0bxx literals)
+ if (BitsInit *BI = dyn_cast<BitsInit>(Arg)) {
+ if (hasSubOps || !InstOpRec->isSubClassOf("Operand"))
+ return false;
+ if (!BI->isComplete())
+ return false;
+ // Convert the bits init to an integer and use that for the result.
+ IntInit *II = dyn_cast_or_null<IntInit>(
+ BI->convertInitializerTo(IntRecTy::get(BI->getRecordKeeper())));
+ if (!II)
+ return false;
+ ResOp = ResultOperand(II->getValue());
+ return true;
+ }
+
+ // If both are Operands with the same MVT, allow the conversion. It's
+ // up to the user to make sure the values are appropriate, just like
+ // for isel Pat's.
+ if (InstOpRec->isSubClassOf("Operand") && ADI &&
+ ADI->getDef()->isSubClassOf("Operand")) {
+ // FIXME: What other attributes should we check here? Identical
+ // MIOperandInfo perhaps?
+ if (InstOpRec->getValueInit("Type") != ADI->getDef()->getValueInit("Type"))
+ return false;
+ ResOp = ResultOperand(std::string(Result->getArgNameStr(AliasOpNo)),
+ ADI->getDef());
+ return true;
+ }
+
+ return false;
+}
+
+unsigned CodeGenInstAlias::ResultOperand::getMINumOperands() const {
+ if (!isRecord())
+ return 1;
+
+ Record *Rec = getRecord();
+ if (!Rec->isSubClassOf("Operand"))
+ return 1;
+
+ DagInit *MIOpInfo = Rec->getValueAsDag("MIOperandInfo");
+ if (MIOpInfo->getNumArgs() == 0) {
+ // Unspecified, so it defaults to 1
+ return 1;
+ }
+
+ return MIOpInfo->getNumArgs();
+}
+
+CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T)
+ : TheDef(R) {
+ Result = R->getValueAsDag("ResultInst");
+ AsmString = std::string(R->getValueAsString("AsmString"));
+
+ // Verify that the root of the result is an instruction.
+ DefInit *DI = dyn_cast<DefInit>(Result->getOperator());
+ if (!DI || !DI->getDef()->isSubClassOf("Instruction"))
+ PrintFatalError(R->getLoc(),
+ "result of inst alias should be an instruction");
+
+ ResultInst = &T.getInstruction(DI->getDef());
+
+ // NameClass - If argument names are repeated, we need to verify they have
+ // the same class.
+ StringMap<Record*> NameClass;
+ for (unsigned i = 0, e = Result->getNumArgs(); i != e; ++i) {
+ DefInit *ADI = dyn_cast<DefInit>(Result->getArg(i));
+ if (!ADI || !Result->getArgName(i))
+ continue;
+ // Verify we don't have something like: (someinst GR16:$foo, GR32:$foo)
+ // $foo can exist multiple times in the result list, but it must have the
+ // same type.
+ Record *&Entry = NameClass[Result->getArgNameStr(i)];
+ if (Entry && Entry != ADI->getDef())
+ PrintFatalError(R->getLoc(), "result value $" + Result->getArgNameStr(i) +
+ " is both " + Entry->getName() + " and " +
+ ADI->getDef()->getName() + "!");
+ Entry = ADI->getDef();
+ }
+
+ // Decode and validate the arguments of the result.
+ unsigned AliasOpNo = 0;
+ for (unsigned i = 0, e = ResultInst->Operands.size(); i != e; ++i) {
+
+ // Tied registers don't have an entry in the result dag unless they're part
+ // of a complex operand, in which case we include them anyways, as we
+ // don't have any other way to specify the whole operand.
+ if (ResultInst->Operands[i].MINumOperands == 1 &&
+ ResultInst->Operands[i].getTiedRegister() != -1) {
+ // Tied operands of different RegisterClass should be explicit within an
+ // instruction's syntax and so cannot be skipped.
+ int TiedOpNum = ResultInst->Operands[i].getTiedRegister();
+ if (ResultInst->Operands[i].Rec->getName() ==
+ ResultInst->Operands[TiedOpNum].Rec->getName())
+ continue;
+ }
+
+ if (AliasOpNo >= Result->getNumArgs())
+ PrintFatalError(R->getLoc(), "not enough arguments for instruction!");
+
+ Record *InstOpRec = ResultInst->Operands[i].Rec;
+ unsigned NumSubOps = ResultInst->Operands[i].MINumOperands;
+ ResultOperand ResOp(static_cast<int64_t>(0));
+ if (tryAliasOpMatch(Result, AliasOpNo, InstOpRec, (NumSubOps > 1),
+ R->getLoc(), T, ResOp)) {
+ // If this is a simple operand, or a complex operand with a custom match
+ // class, then we can match is verbatim.
+ if (NumSubOps == 1 ||
+ (InstOpRec->getValue("ParserMatchClass") &&
+ InstOpRec->getValueAsDef("ParserMatchClass")
+ ->getValueAsString("Name") != "Imm")) {
+ ResultOperands.push_back(ResOp);
+ ResultInstOperandIndex.push_back(std::make_pair(i, -1));
+ ++AliasOpNo;
+
+ // Otherwise, we need to match each of the suboperands individually.
+ } else {
+ DagInit *MIOI = ResultInst->Operands[i].MIOperandInfo;
+ for (unsigned SubOp = 0; SubOp != NumSubOps; ++SubOp) {
+ Record *SubRec = cast<DefInit>(MIOI->getArg(SubOp))->getDef();
+
+ // Take care to instantiate each of the suboperands with the correct
+ // nomenclature: $foo.bar
+ ResultOperands.emplace_back(
+ Result->getArgName(AliasOpNo)->getAsUnquotedString() + "." +
+ MIOI->getArgName(SubOp)->getAsUnquotedString(), SubRec);
+ ResultInstOperandIndex.push_back(std::make_pair(i, SubOp));
+ }
+ ++AliasOpNo;
+ }
+ continue;
+ }
+
+ // If the argument did not match the instruction operand, and the operand
+ // is composed of multiple suboperands, try matching the suboperands.
+ if (NumSubOps > 1) {
+ DagInit *MIOI = ResultInst->Operands[i].MIOperandInfo;
+ for (unsigned SubOp = 0; SubOp != NumSubOps; ++SubOp) {
+ if (AliasOpNo >= Result->getNumArgs())
+ PrintFatalError(R->getLoc(), "not enough arguments for instruction!");
+ Record *SubRec = cast<DefInit>(MIOI->getArg(SubOp))->getDef();
+ if (tryAliasOpMatch(Result, AliasOpNo, SubRec, false,
+ R->getLoc(), T, ResOp)) {
+ ResultOperands.push_back(ResOp);
+ ResultInstOperandIndex.push_back(std::make_pair(i, SubOp));
+ ++AliasOpNo;
+ } else {
+ PrintFatalError(R->getLoc(), "result argument #" + Twine(AliasOpNo) +
+ " does not match instruction operand class " +
+ (SubOp == 0 ? InstOpRec->getName() :SubRec->getName()));
+ }
+ }
+ continue;
+ }
+ PrintFatalError(R->getLoc(), "result argument #" + Twine(AliasOpNo) +
+ " does not match instruction operand class " +
+ InstOpRec->getName());
+ }
+
+ if (AliasOpNo != Result->getNumArgs())
+ PrintFatalError(R->getLoc(), "too many operands for instruction!");
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/CodeGenInstruction.h b/contrib/libs/llvm16/utils/TableGen/CodeGenInstruction.h
new file mode 100644
index 0000000000..72626caada
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/CodeGenInstruction.h
@@ -0,0 +1,410 @@
+//===- CodeGenInstruction.h - Instruction Class Wrapper ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a wrapper class for the 'Instruction' TableGen class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_CODEGENINSTRUCTION_H
+#define LLVM_UTILS_TABLEGEN_CODEGENINSTRUCTION_H
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/MachineValueType.h"
+#include <cassert>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+class SMLoc;
+template <typename T> class ArrayRef;
+ class Record;
+ class DagInit;
+ class CodeGenTarget;
+
+ class CGIOperandList {
+ public:
+ class ConstraintInfo {
+ enum { None, EarlyClobber, Tied } Kind = None;
+ unsigned OtherTiedOperand = 0;
+
+ public:
+ ConstraintInfo() = default;
+
+ static ConstraintInfo getEarlyClobber() {
+ ConstraintInfo I;
+ I.Kind = EarlyClobber;
+ I.OtherTiedOperand = 0;
+ return I;
+ }
+
+ static ConstraintInfo getTied(unsigned Op) {
+ ConstraintInfo I;
+ I.Kind = Tied;
+ I.OtherTiedOperand = Op;
+ return I;
+ }
+
+ bool isNone() const { return Kind == None; }
+ bool isEarlyClobber() const { return Kind == EarlyClobber; }
+ bool isTied() const { return Kind == Tied; }
+
+ unsigned getTiedOperand() const {
+ assert(isTied());
+ return OtherTiedOperand;
+ }
+
+ bool operator==(const ConstraintInfo &RHS) const {
+ if (Kind != RHS.Kind)
+ return false;
+ if (Kind == Tied && OtherTiedOperand != RHS.OtherTiedOperand)
+ return false;
+ return true;
+ }
+ bool operator!=(const ConstraintInfo &RHS) const {
+ return !(*this == RHS);
+ }
+ };
+
+ /// OperandInfo - The information we keep track of for each operand in the
+ /// operand list for a tablegen instruction.
+ struct OperandInfo {
+ /// Rec - The definition this operand is declared as.
+ ///
+ Record *Rec;
+
+ /// Name - If this operand was assigned a symbolic name, this is it,
+ /// otherwise, it's empty.
+ std::string Name;
+
+ /// The names of sub-operands, if given, otherwise empty.
+ std::vector<std::string> SubOpNames;
+
+ /// PrinterMethodName - The method used to print operands of this type in
+ /// the asmprinter.
+ std::string PrinterMethodName;
+
+ /// The method used to get the machine operand value for binary
+ /// encoding, per sub-operand. If empty, uses "getMachineOpValue".
+ std::vector<std::string> EncoderMethodNames;
+
+ /// OperandType - A value from MCOI::OperandType representing the type of
+ /// the operand.
+ std::string OperandType;
+
+ /// MIOperandNo - Currently (this is meant to be phased out), some logical
+ /// operands correspond to multiple MachineInstr operands. In the X86
+ /// target for example, one address operand is represented as 4
+ /// MachineOperands. Because of this, the operand number in the
+ /// OperandList may not match the MachineInstr operand num. Until it
+ /// does, this contains the MI operand index of this operand.
+ unsigned MIOperandNo;
+ unsigned MINumOperands; // The number of operands.
+
+ /// DoNotEncode - Bools are set to true in this vector for each operand in
+ /// the DisableEncoding list. These should not be emitted by the code
+ /// emitter.
+ BitVector DoNotEncode;
+
+ /// MIOperandInfo - Default MI operand type. Note an operand may be made
+ /// up of multiple MI operands.
+ DagInit *MIOperandInfo;
+
+ /// Constraint info for this operand. This operand can have pieces, so we
+ /// track constraint info for each.
+ std::vector<ConstraintInfo> Constraints;
+
+ OperandInfo(Record *R, const std::string &N, const std::string &PMN,
+ const std::string &OT, unsigned MION, unsigned MINO,
+ DagInit *MIOI)
+ : Rec(R), Name(N), SubOpNames(MINO), PrinterMethodName(PMN),
+ EncoderMethodNames(MINO), OperandType(OT), MIOperandNo(MION),
+ MINumOperands(MINO), DoNotEncode(MINO), MIOperandInfo(MIOI),
+ Constraints(MINO) {}
+
+ /// getTiedOperand - If this operand is tied to another one, return the
+ /// other operand number. Otherwise, return -1.
+ int getTiedRegister() const {
+ for (unsigned j = 0, e = Constraints.size(); j != e; ++j) {
+ const CGIOperandList::ConstraintInfo &CI = Constraints[j];
+ if (CI.isTied()) return CI.getTiedOperand();
+ }
+ return -1;
+ }
+ };
+
+ CGIOperandList(Record *D);
+
+ Record *TheDef; // The actual record containing this OperandList.
+
+ /// NumDefs - Number of def operands declared, this is the number of
+ /// elements in the instruction's (outs) list.
+ ///
+ unsigned NumDefs;
+
+ /// OperandList - The list of declared operands, along with their declared
+ /// type (which is a record).
+ std::vector<OperandInfo> OperandList;
+
+ /// SubOpAliases - List of alias names for suboperands.
+ StringMap<std::pair<unsigned, unsigned>> SubOpAliases;
+
+ // Information gleaned from the operand list.
+ bool isPredicable;
+ bool hasOptionalDef;
+ bool isVariadic;
+
+ // Provide transparent accessors to the operand list.
+ bool empty() const { return OperandList.empty(); }
+ unsigned size() const { return OperandList.size(); }
+ const OperandInfo &operator[](unsigned i) const { return OperandList[i]; }
+ OperandInfo &operator[](unsigned i) { return OperandList[i]; }
+ OperandInfo &back() { return OperandList.back(); }
+ const OperandInfo &back() const { return OperandList.back(); }
+
+ typedef std::vector<OperandInfo>::iterator iterator;
+ typedef std::vector<OperandInfo>::const_iterator const_iterator;
+ iterator begin() { return OperandList.begin(); }
+ const_iterator begin() const { return OperandList.begin(); }
+ iterator end() { return OperandList.end(); }
+ const_iterator end() const { return OperandList.end(); }
+
+ /// getOperandNamed - Return the index of the operand with the specified
+ /// non-empty name. If the instruction does not have an operand with the
+ /// specified name, abort.
+ unsigned getOperandNamed(StringRef Name) const;
+
+ /// hasOperandNamed - Query whether the instruction has an operand of the
+ /// given name. If so, return true and set OpIdx to the index of the
+ /// operand. Otherwise, return false.
+ bool hasOperandNamed(StringRef Name, unsigned &OpIdx) const;
+
+ bool hasSubOperandAlias(StringRef Name,
+ std::pair<unsigned, unsigned> &SubOp) const;
+
+ /// ParseOperandName - Parse an operand name like "$foo" or "$foo.bar",
+ /// where $foo is a whole operand and $foo.bar refers to a suboperand.
+ /// This aborts if the name is invalid. If AllowWholeOp is true, references
+ /// to operands with suboperands are allowed, otherwise not.
+ std::pair<unsigned,unsigned> ParseOperandName(StringRef Op,
+ bool AllowWholeOp = true);
+
+ /// getFlattenedOperandNumber - Flatten a operand/suboperand pair into a
+ /// flat machineinstr operand #.
+ unsigned getFlattenedOperandNumber(std::pair<unsigned,unsigned> Op) const {
+ return OperandList[Op.first].MIOperandNo + Op.second;
+ }
+
+ /// getSubOperandNumber - Unflatten a operand number into an
+ /// operand/suboperand pair.
+ std::pair<unsigned,unsigned> getSubOperandNumber(unsigned Op) const {
+ for (unsigned i = 0; ; ++i) {
+ assert(i < OperandList.size() && "Invalid flat operand #");
+ if (OperandList[i].MIOperandNo+OperandList[i].MINumOperands > Op)
+ return std::make_pair(i, Op-OperandList[i].MIOperandNo);
+ }
+ }
+
+
+ /// isFlatOperandNotEmitted - Return true if the specified flat operand #
+ /// should not be emitted with the code emitter.
+ bool isFlatOperandNotEmitted(unsigned FlatOpNo) const {
+ std::pair<unsigned,unsigned> Op = getSubOperandNumber(FlatOpNo);
+ if (OperandList[Op.first].DoNotEncode.size() > Op.second)
+ return OperandList[Op.first].DoNotEncode[Op.second];
+ return false;
+ }
+
+ void ProcessDisableEncoding(StringRef Value);
+ };
+
+
+ class CodeGenInstruction {
+ public:
+ Record *TheDef; // The actual record defining this instruction.
+ StringRef Namespace; // The namespace the instruction is in.
+
+ /// AsmString - The format string used to emit a .s file for the
+ /// instruction.
+ std::string AsmString;
+
+ /// Operands - This is information about the (ins) and (outs) list specified
+ /// to the instruction.
+ CGIOperandList Operands;
+
+ /// ImplicitDefs/ImplicitUses - These are lists of registers that are
+ /// implicitly defined and used by the instruction.
+ std::vector<Record*> ImplicitDefs, ImplicitUses;
+
+ // Various boolean values we track for the instruction.
+ bool isPreISelOpcode : 1;
+ bool isReturn : 1;
+ bool isEHScopeReturn : 1;
+ bool isBranch : 1;
+ bool isIndirectBranch : 1;
+ bool isCompare : 1;
+ bool isMoveImm : 1;
+ bool isMoveReg : 1;
+ bool isBitcast : 1;
+ bool isSelect : 1;
+ bool isBarrier : 1;
+ bool isCall : 1;
+ bool isAdd : 1;
+ bool isTrap : 1;
+ bool canFoldAsLoad : 1;
+ bool mayLoad : 1;
+ bool mayLoad_Unset : 1;
+ bool mayStore : 1;
+ bool mayStore_Unset : 1;
+ bool mayRaiseFPException : 1;
+ bool isPredicable : 1;
+ bool isConvertibleToThreeAddress : 1;
+ bool isCommutable : 1;
+ bool isTerminator : 1;
+ bool isReMaterializable : 1;
+ bool hasDelaySlot : 1;
+ bool usesCustomInserter : 1;
+ bool hasPostISelHook : 1;
+ bool hasCtrlDep : 1;
+ bool isNotDuplicable : 1;
+ bool hasSideEffects : 1;
+ bool hasSideEffects_Unset : 1;
+ bool isAsCheapAsAMove : 1;
+ bool hasExtraSrcRegAllocReq : 1;
+ bool hasExtraDefRegAllocReq : 1;
+ bool isCodeGenOnly : 1;
+ bool isPseudo : 1;
+ bool isMeta : 1;
+ bool isRegSequence : 1;
+ bool isExtractSubreg : 1;
+ bool isInsertSubreg : 1;
+ bool isConvergent : 1;
+ bool hasNoSchedulingInfo : 1;
+ bool FastISelShouldIgnore : 1;
+ bool hasChain : 1;
+ bool hasChain_Inferred : 1;
+ bool variadicOpsAreDefs : 1;
+ bool isAuthenticated : 1;
+
+ std::string DeprecatedReason;
+ bool HasComplexDeprecationPredicate;
+
+ /// Are there any undefined flags?
+ bool hasUndefFlags() const {
+ return mayLoad_Unset || mayStore_Unset || hasSideEffects_Unset;
+ }
+
+ // The record used to infer instruction flags, or NULL if no flag values
+ // have been inferred.
+ Record *InferredFrom;
+
+ CodeGenInstruction(Record *R);
+
+ /// HasOneImplicitDefWithKnownVT - If the instruction has at least one
+ /// implicit def and it has a known VT, return the VT, otherwise return
+ /// MVT::Other.
+ MVT::SimpleValueType
+ HasOneImplicitDefWithKnownVT(const CodeGenTarget &TargetInfo) const;
+
+
+ /// FlattenAsmStringVariants - Flatten the specified AsmString to only
+ /// include text from the specified variant, returning the new string.
+ static std::string FlattenAsmStringVariants(StringRef AsmString,
+ unsigned Variant);
+
+ // Is the specified operand in a generic instruction implicitly a pointer.
+ // This can be used on intructions that use typeN or ptypeN to identify
+ // operands that should be considered as pointers even though SelectionDAG
+ // didn't make a distinction between integer and pointers.
+ bool isInOperandAPointer(unsigned i) const {
+ return isOperandImpl("InOperandList", i, "IsPointer");
+ }
+
+ bool isOutOperandAPointer(unsigned i) const {
+ return isOperandImpl("OutOperandList", i, "IsPointer");
+ }
+
+ /// Check if the operand is required to be an immediate.
+ bool isInOperandImmArg(unsigned i) const {
+ return isOperandImpl("InOperandList", i, "IsImmediate");
+ }
+
+ private:
+ bool isOperandImpl(StringRef OpListName, unsigned i,
+ StringRef PropertyName) const;
+ };
+
+
+ /// CodeGenInstAlias - This represents an InstAlias definition.
+ class CodeGenInstAlias {
+ public:
+ Record *TheDef; // The actual record defining this InstAlias.
+
+ /// AsmString - The format string used to emit a .s file for the
+ /// instruction.
+ std::string AsmString;
+
+ /// Result - The result instruction.
+ DagInit *Result;
+
+ /// ResultInst - The instruction generated by the alias (decoded from
+ /// Result).
+ CodeGenInstruction *ResultInst;
+
+
+ struct ResultOperand {
+ private:
+ std::string Name;
+ Record *R = nullptr;
+ int64_t Imm = 0;
+
+ public:
+ enum {
+ K_Record,
+ K_Imm,
+ K_Reg
+ } Kind;
+
+ ResultOperand(std::string N, Record *r)
+ : Name(std::move(N)), R(r), Kind(K_Record) {}
+ ResultOperand(int64_t I) : Imm(I), Kind(K_Imm) {}
+ ResultOperand(Record *r) : R(r), Kind(K_Reg) {}
+
+ bool isRecord() const { return Kind == K_Record; }
+ bool isImm() const { return Kind == K_Imm; }
+ bool isReg() const { return Kind == K_Reg; }
+
+ StringRef getName() const { assert(isRecord()); return Name; }
+ Record *getRecord() const { assert(isRecord()); return R; }
+ int64_t getImm() const { assert(isImm()); return Imm; }
+ Record *getRegister() const { assert(isReg()); return R; }
+
+ unsigned getMINumOperands() const;
+ };
+
+ /// ResultOperands - The decoded operands for the result instruction.
+ std::vector<ResultOperand> ResultOperands;
+
+ /// ResultInstOperandIndex - For each operand, this vector holds a pair of
+ /// indices to identify the corresponding operand in the result
+ /// instruction. The first index specifies the operand and the second
+ /// index specifies the suboperand. If there are no suboperands or if all
+ /// of them are matched by the operand, the second value should be -1.
+ std::vector<std::pair<unsigned, int> > ResultInstOperandIndex;
+
+ CodeGenInstAlias(Record *R, CodeGenTarget &T);
+
+ bool tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo,
+ Record *InstOpRec, bool hasSubOps, ArrayRef<SMLoc> Loc,
+ CodeGenTarget &T, ResultOperand &ResOp);
+ };
+}
+
+#endif
diff --git a/contrib/libs/llvm16/utils/TableGen/CodeGenIntrinsics.h b/contrib/libs/llvm16/utils/TableGen/CodeGenIntrinsics.h
new file mode 100644
index 0000000000..0558918b30
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/CodeGenIntrinsics.h
@@ -0,0 +1,189 @@
+//===- CodeGenIntrinsic.h - Intrinsic Class Wrapper ------------*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a wrapper class for the 'Intrinsic' TableGen class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_CODEGENINTRINSICS_H
+#define LLVM_UTILS_TABLEGEN_CODEGENINTRINSICS_H
+
+#include "SDNodeProperties.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/MachineValueType.h"
+#include "llvm/Support/ModRef.h"
+#include <string>
+#include <vector>
+
+namespace llvm {
+class Record;
+class RecordKeeper;
+
+struct CodeGenIntrinsic {
+ Record *TheDef; // The actual record defining this intrinsic.
+ std::string Name; // The name of the LLVM function "llvm.bswap.i32"
+ std::string EnumName; // The name of the enum "bswap_i32"
+ std::string ClangBuiltinName; // Name of the corresponding GCC builtin, or "".
+ std::string MSBuiltinName; // Name of the corresponding MS builtin, or "".
+ std::string TargetPrefix; // Target prefix, e.g. "ppc" for t-s intrinsics.
+
+ /// This structure holds the return values and parameter values of an
+ /// intrinsic. If the number of return values is > 1, then the intrinsic
+ /// implicitly returns a first-class aggregate. The numbering of the types
+ /// starts at 0 with the first return value and continues from there through
+ /// the parameter list. This is useful for "matching" types.
+ struct IntrinsicSignature {
+ /// The MVT::SimpleValueType for each return type. Note that this list is
+ /// only populated when in the context of a target .td file. When building
+ /// Intrinsics.td, this isn't available, because we don't know the target
+ /// pointer size.
+ std::vector<MVT::SimpleValueType> RetVTs;
+
+ /// The records for each return type.
+ std::vector<Record *> RetTypeDefs;
+
+ /// The MVT::SimpleValueType for each parameter type. Note that this list is
+ /// only populated when in the context of a target .td file. When building
+ /// Intrinsics.td, this isn't available, because we don't know the target
+ /// pointer size.
+ std::vector<MVT::SimpleValueType> ParamVTs;
+
+ /// The records for each parameter type.
+ std::vector<Record *> ParamTypeDefs;
+ };
+
+ IntrinsicSignature IS;
+
+ /// Memory effects of the intrinsic.
+ MemoryEffects ME = MemoryEffects::unknown();
+
+ /// SDPatternOperator Properties applied to the intrinsic.
+ unsigned Properties;
+
+ /// This is set to true if the intrinsic is overloaded by its argument
+ /// types.
+ bool isOverloaded;
+
+ /// True if the intrinsic is commutative.
+ bool isCommutative;
+
+ /// True if the intrinsic can throw.
+ bool canThrow;
+
+ /// True if the intrinsic is marked as noduplicate.
+ bool isNoDuplicate;
+
+ /// True if the intrinsic is marked as nomerge.
+ bool isNoMerge;
+
+ /// True if the intrinsic is no-return.
+ bool isNoReturn;
+
+ /// True if the intrinsic is no-callback.
+ bool isNoCallback;
+
+ /// True if the intrinsic is no-sync.
+ bool isNoSync;
+
+ /// True if the intrinsic is no-free.
+ bool isNoFree;
+
+ /// True if the intrinsic is will-return.
+ bool isWillReturn;
+
+ /// True if the intrinsic is cold.
+ bool isCold;
+
+ /// True if the intrinsic is marked as convergent.
+ bool isConvergent;
+
+ /// True if the intrinsic has side effects that aren't captured by any
+ /// of the other flags.
+ bool hasSideEffects;
+
+ // True if the intrinsic is marked as speculatable.
+ bool isSpeculatable;
+
+ enum ArgAttrKind {
+ NoCapture,
+ NoAlias,
+ NoUndef,
+ NonNull,
+ Returned,
+ ReadOnly,
+ WriteOnly,
+ ReadNone,
+ ImmArg,
+ Alignment
+ };
+
+ struct ArgAttribute {
+ ArgAttrKind Kind;
+ uint64_t Value;
+
+ ArgAttribute(ArgAttrKind K, uint64_t V) : Kind(K), Value(V) {}
+
+ bool operator<(const ArgAttribute &Other) const {
+ return std::tie(Kind, Value) < std::tie(Other.Kind, Other.Value);
+ }
+ };
+
+ /// Vector of attributes for each argument.
+ SmallVector<SmallVector<ArgAttribute, 0>> ArgumentAttributes;
+
+ void addArgAttribute(unsigned Idx, ArgAttrKind AK, uint64_t V = 0);
+
+ bool hasProperty(enum SDNP Prop) const {
+ return Properties & (1 << Prop);
+ }
+
+ /// Goes through all IntrProperties that have IsDefault
+ /// value set and sets the property.
+ void setDefaultProperties(Record *R, std::vector<Record *> DefaultProperties);
+
+ /// Helper function to set property \p Name to true;
+ void setProperty(Record *R);
+
+ /// Returns true if the parameter at \p ParamIdx is a pointer type. Returns
+ /// false if the parameter is not a pointer, or \p ParamIdx is greater than
+ /// the size of \p IS.ParamVTs.
+ ///
+ /// Note that this requires that \p IS.ParamVTs is available.
+ bool isParamAPointer(unsigned ParamIdx) const;
+
+ bool isParamImmArg(unsigned ParamIdx) const;
+
+ CodeGenIntrinsic(Record *R, std::vector<Record *> DefaultProperties);
+};
+
+class CodeGenIntrinsicTable {
+ std::vector<CodeGenIntrinsic> Intrinsics;
+
+public:
+ struct TargetSet {
+ std::string Name;
+ size_t Offset;
+ size_t Count;
+ };
+ std::vector<TargetSet> Targets;
+
+ explicit CodeGenIntrinsicTable(const RecordKeeper &RC);
+ CodeGenIntrinsicTable() = default;
+
+ bool empty() const { return Intrinsics.empty(); }
+ size_t size() const { return Intrinsics.size(); }
+ auto begin() const { return Intrinsics.begin(); }
+ auto end() const { return Intrinsics.end(); }
+ CodeGenIntrinsic &operator[](size_t Pos) { return Intrinsics[Pos]; }
+ const CodeGenIntrinsic &operator[](size_t Pos) const {
+ return Intrinsics[Pos];
+ }
+};
+}
+
+#endif
diff --git a/contrib/libs/llvm16/utils/TableGen/CodeGenMapTable.cpp b/contrib/libs/llvm16/utils/TableGen/CodeGenMapTable.cpp
new file mode 100644
index 0000000000..02695942f5
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/CodeGenMapTable.cpp
@@ -0,0 +1,605 @@
+//===- CodeGenMapTable.cpp - Instruction Mapping Table Generator ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// CodeGenMapTable provides functionality for the TableGen to create
+// relation mapping between instructions. Relation models are defined using
+// InstrMapping as a base class. This file implements the functionality which
+// parses these definitions and generates relation maps using the information
+// specified there. These maps are emitted as tables in the XXXGenInstrInfo.inc
+// file along with the functions to query them.
+//
+// A relationship model to relate non-predicate instructions with their
+// predicated true/false forms can be defined as follows:
+//
+// def getPredOpcode : InstrMapping {
+// let FilterClass = "PredRel";
+// let RowFields = ["BaseOpcode"];
+// let ColFields = ["PredSense"];
+// let KeyCol = ["none"];
+// let ValueCols = [["true"], ["false"]]; }
+//
+// CodeGenMapTable parses this map and generates a table in XXXGenInstrInfo.inc
+// file that contains the instructions modeling this relationship. This table
+// is defined in the function
+// "int getPredOpcode(uint16_t Opcode, enum PredSense inPredSense)"
+// that can be used to retrieve the predicated form of the instruction by
+// passing its opcode value and the predicate sense (true/false) of the desired
+// instruction as arguments.
+//
+// Short description of the algorithm:
+//
+// 1) Iterate through all the records that derive from "InstrMapping" class.
+// 2) For each record, filter out instructions based on the FilterClass value.
+// 3) Iterate through this set of instructions and insert them into
+// RowInstrMap map based on their RowFields values. RowInstrMap is keyed by the
+// vector of RowFields values and contains vectors of Records (instructions) as
+// values. RowFields is a list of fields that are required to have the same
+// values for all the instructions appearing in the same row of the relation
+// table. All the instructions in a given row of the relation table have some
+// sort of relationship with the key instruction defined by the corresponding
+// relationship model.
+//
+// Ex: RowInstrMap(RowVal1, RowVal2, ...) -> [Instr1, Instr2, Instr3, ... ]
+// Here Instr1, Instr2, Instr3 have same values (RowVal1, RowVal2) for
+// RowFields. These groups of instructions are later matched against ValueCols
+// to determine the column they belong to, if any.
+//
+// While building the RowInstrMap map, collect all the key instructions in
+// KeyInstrVec. These are the instructions having the same values as KeyCol
+// for all the fields listed in ColFields.
+//
+// For Example:
+//
+// Relate non-predicate instructions with their predicated true/false forms.
+//
+// def getPredOpcode : InstrMapping {
+// let FilterClass = "PredRel";
+// let RowFields = ["BaseOpcode"];
+// let ColFields = ["PredSense"];
+// let KeyCol = ["none"];
+// let ValueCols = [["true"], ["false"]]; }
+//
+// Here, only instructions that have "none" as PredSense will be selected as key
+// instructions.
+//
+// 4) For each key instruction, get the group of instructions that share the
+// same key-value as the key instruction from RowInstrMap. Iterate over the list
+// of columns in ValueCols (it is defined as a list<list<string> >. Therefore,
+// it can specify multi-column relationships). For each column, find the
+// instruction from the group that matches all the values for the column.
+// Multiple matches are not allowed.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenInstruction.h"
+#include "CodeGenTarget.h"
+#include "llvm/TableGen/Error.h"
+using namespace llvm;
+typedef std::map<std::string, std::vector<Record*> > InstrRelMapTy;
+
+typedef std::map<std::vector<Init*>, std::vector<Record*> > RowInstrMapTy;
+
+namespace {
+
+//===----------------------------------------------------------------------===//
+// This class is used to represent InstrMapping class defined in Target.td file.
+class InstrMap {
+private:
+ std::string Name;
+ std::string FilterClass;
+ ListInit *RowFields;
+ ListInit *ColFields;
+ ListInit *KeyCol;
+ std::vector<ListInit*> ValueCols;
+
+public:
+ InstrMap(Record* MapRec) {
+ Name = std::string(MapRec->getName());
+
+ // FilterClass - It's used to reduce the search space only to the
+ // instructions that define the kind of relationship modeled by
+ // this InstrMapping object/record.
+ const RecordVal *Filter = MapRec->getValue("FilterClass");
+ FilterClass = Filter->getValue()->getAsUnquotedString();
+
+ // List of fields/attributes that need to be same across all the
+ // instructions in a row of the relation table.
+ RowFields = MapRec->getValueAsListInit("RowFields");
+
+ // List of fields/attributes that are constant across all the instruction
+ // in a column of the relation table. Ex: ColFields = 'predSense'
+ ColFields = MapRec->getValueAsListInit("ColFields");
+
+ // Values for the fields/attributes listed in 'ColFields'.
+ // Ex: KeyCol = 'noPred' -- key instruction is non-predicated
+ KeyCol = MapRec->getValueAsListInit("KeyCol");
+
+ // List of values for the fields/attributes listed in 'ColFields', one for
+ // each column in the relation table.
+ //
+ // Ex: ValueCols = [['true'],['false']] -- it results two columns in the
+ // table. First column requires all the instructions to have predSense
+ // set to 'true' and second column requires it to be 'false'.
+ ListInit *ColValList = MapRec->getValueAsListInit("ValueCols");
+
+ // Each instruction map must specify at least one column for it to be valid.
+ if (ColValList->empty())
+ PrintFatalError(MapRec->getLoc(), "InstrMapping record `" +
+ MapRec->getName() + "' has empty " + "`ValueCols' field!");
+
+ for (Init *I : ColValList->getValues()) {
+ auto *ColI = cast<ListInit>(I);
+
+ // Make sure that all the sub-lists in 'ValueCols' have same number of
+ // elements as the fields in 'ColFields'.
+ if (ColI->size() != ColFields->size())
+ PrintFatalError(MapRec->getLoc(), "Record `" + MapRec->getName() +
+ "', field `ValueCols' entries don't match with " +
+ " the entries in 'ColFields'!");
+ ValueCols.push_back(ColI);
+ }
+ }
+
+ const std::string &getName() const { return Name; }
+
+ const std::string &getFilterClass() const { return FilterClass; }
+
+ ListInit *getRowFields() const { return RowFields; }
+
+ ListInit *getColFields() const { return ColFields; }
+
+ ListInit *getKeyCol() const { return KeyCol; }
+
+ const std::vector<ListInit*> &getValueCols() const {
+ return ValueCols;
+ }
+};
+} // end anonymous namespace
+
+
+//===----------------------------------------------------------------------===//
+// class MapTableEmitter : It builds the instruction relation maps using
+// the information provided in InstrMapping records. It outputs these
+// relationship maps as tables into XXXGenInstrInfo.inc file along with the
+// functions to query them.
+
+namespace {
+class MapTableEmitter {
+private:
+// std::string TargetName;
+ const CodeGenTarget &Target;
+ // InstrMapDesc - InstrMapping record to be processed.
+ InstrMap InstrMapDesc;
+
+ // InstrDefs - list of instructions filtered using FilterClass defined
+ // in InstrMapDesc.
+ std::vector<Record*> InstrDefs;
+
+ // RowInstrMap - maps RowFields values to the instructions. It's keyed by the
+ // values of the row fields and contains vector of records as values.
+ RowInstrMapTy RowInstrMap;
+
+ // KeyInstrVec - list of key instructions.
+ std::vector<Record*> KeyInstrVec;
+ DenseMap<Record*, std::vector<Record*> > MapTable;
+
+public:
+ MapTableEmitter(CodeGenTarget &Target, RecordKeeper &Records, Record *IMRec):
+ Target(Target), InstrMapDesc(IMRec) {
+ const std::string &FilterClass = InstrMapDesc.getFilterClass();
+ InstrDefs = Records.getAllDerivedDefinitions(FilterClass);
+ }
+
+ void buildRowInstrMap();
+
+ // Returns true if an instruction is a key instruction, i.e., its ColFields
+ // have same values as KeyCol.
+ bool isKeyColInstr(Record* CurInstr);
+
+ // Find column instruction corresponding to a key instruction based on the
+ // constraints for that column.
+ Record *getInstrForColumn(Record *KeyInstr, ListInit *CurValueCol);
+
+ // Find column instructions for each key instruction based
+ // on ValueCols and store them into MapTable.
+ void buildMapTable();
+
+ void emitBinSearch(raw_ostream &OS, unsigned TableSize);
+ void emitTablesWithFunc(raw_ostream &OS);
+ unsigned emitBinSearchTable(raw_ostream &OS);
+
+ // Lookup functions to query binary search tables.
+ void emitMapFuncBody(raw_ostream &OS, unsigned TableSize);
+
+};
+} // end anonymous namespace
+
+
+//===----------------------------------------------------------------------===//
+// Process all the instructions that model this relation (alreday present in
+// InstrDefs) and insert them into RowInstrMap which is keyed by the values of
+// the fields listed as RowFields. It stores vectors of records as values.
+// All the related instructions have the same values for the RowFields thus are
+// part of the same key-value pair.
+//===----------------------------------------------------------------------===//
+
+void MapTableEmitter::buildRowInstrMap() {
+ for (Record *CurInstr : InstrDefs) {
+ std::vector<Init*> KeyValue;
+ ListInit *RowFields = InstrMapDesc.getRowFields();
+ for (Init *RowField : RowFields->getValues()) {
+ RecordVal *RecVal = CurInstr->getValue(RowField);
+ if (RecVal == nullptr)
+ PrintFatalError(CurInstr->getLoc(), "No value " +
+ RowField->getAsString() + " found in \"" +
+ CurInstr->getName() + "\" instruction description.");
+ Init *CurInstrVal = RecVal->getValue();
+ KeyValue.push_back(CurInstrVal);
+ }
+
+ // Collect key instructions into KeyInstrVec. Later, these instructions are
+ // processed to assign column position to the instructions sharing
+ // their KeyValue in RowInstrMap.
+ if (isKeyColInstr(CurInstr))
+ KeyInstrVec.push_back(CurInstr);
+
+ RowInstrMap[KeyValue].push_back(CurInstr);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Return true if an instruction is a KeyCol instruction.
+//===----------------------------------------------------------------------===//
+
+bool MapTableEmitter::isKeyColInstr(Record* CurInstr) {
+ ListInit *ColFields = InstrMapDesc.getColFields();
+ ListInit *KeyCol = InstrMapDesc.getKeyCol();
+
+ // Check if the instruction is a KeyCol instruction.
+ bool MatchFound = true;
+ for (unsigned j = 0, endCF = ColFields->size();
+ (j < endCF) && MatchFound; j++) {
+ RecordVal *ColFieldName = CurInstr->getValue(ColFields->getElement(j));
+ std::string CurInstrVal = ColFieldName->getValue()->getAsUnquotedString();
+ std::string KeyColValue = KeyCol->getElement(j)->getAsUnquotedString();
+ MatchFound = (CurInstrVal == KeyColValue);
+ }
+ return MatchFound;
+}
+
+//===----------------------------------------------------------------------===//
+// Build a map to link key instructions with the column instructions arranged
+// according to their column positions.
+//===----------------------------------------------------------------------===//
+
+void MapTableEmitter::buildMapTable() {
+ // Find column instructions for a given key based on the ColField
+ // constraints.
+ const std::vector<ListInit*> &ValueCols = InstrMapDesc.getValueCols();
+ unsigned NumOfCols = ValueCols.size();
+ for (Record *CurKeyInstr : KeyInstrVec) {
+ std::vector<Record*> ColInstrVec(NumOfCols);
+
+ // Find the column instruction based on the constraints for the column.
+ for (unsigned ColIdx = 0; ColIdx < NumOfCols; ColIdx++) {
+ ListInit *CurValueCol = ValueCols[ColIdx];
+ Record *ColInstr = getInstrForColumn(CurKeyInstr, CurValueCol);
+ ColInstrVec[ColIdx] = ColInstr;
+ }
+ MapTable[CurKeyInstr] = ColInstrVec;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Find column instruction based on the constraints for that column.
+//===----------------------------------------------------------------------===//
+
+Record *MapTableEmitter::getInstrForColumn(Record *KeyInstr,
+ ListInit *CurValueCol) {
+ ListInit *RowFields = InstrMapDesc.getRowFields();
+ std::vector<Init*> KeyValue;
+
+ // Construct KeyValue using KeyInstr's values for RowFields.
+ for (Init *RowField : RowFields->getValues()) {
+ Init *KeyInstrVal = KeyInstr->getValue(RowField)->getValue();
+ KeyValue.push_back(KeyInstrVal);
+ }
+
+ // Get all the instructions that share the same KeyValue as the KeyInstr
+ // in RowInstrMap. We search through these instructions to find a match
+ // for the current column, i.e., the instruction which has the same values
+ // as CurValueCol for all the fields in ColFields.
+ const std::vector<Record*> &RelatedInstrVec = RowInstrMap[KeyValue];
+
+ ListInit *ColFields = InstrMapDesc.getColFields();
+ Record *MatchInstr = nullptr;
+
+ for (llvm::Record *CurInstr : RelatedInstrVec) {
+ bool MatchFound = true;
+ for (unsigned j = 0, endCF = ColFields->size();
+ (j < endCF) && MatchFound; j++) {
+ Init *ColFieldJ = ColFields->getElement(j);
+ Init *CurInstrInit = CurInstr->getValue(ColFieldJ)->getValue();
+ std::string CurInstrVal = CurInstrInit->getAsUnquotedString();
+ Init *ColFieldJVallue = CurValueCol->getElement(j);
+ MatchFound = (CurInstrVal == ColFieldJVallue->getAsUnquotedString());
+ }
+
+ if (MatchFound) {
+ if (MatchInstr) {
+ // Already had a match
+ // Error if multiple matches are found for a column.
+ std::string KeyValueStr;
+ for (Init *Value : KeyValue) {
+ if (!KeyValueStr.empty())
+ KeyValueStr += ", ";
+ KeyValueStr += Value->getAsString();
+ }
+
+ PrintFatalError("Multiple matches found for `" + KeyInstr->getName() +
+ "', for the relation `" + InstrMapDesc.getName() +
+ "', row fields [" + KeyValueStr + "], column `" +
+ CurValueCol->getAsString() + "'");
+ }
+ MatchInstr = CurInstr;
+ }
+ }
+ return MatchInstr;
+}
+
+//===----------------------------------------------------------------------===//
+// Emit one table per relation. Only instructions with a valid relation of a
+// given type are included in the table sorted by their enum values (opcodes).
+// Binary search is used for locating instructions in the table.
+//===----------------------------------------------------------------------===//
+
+unsigned MapTableEmitter::emitBinSearchTable(raw_ostream &OS) {
+
+ ArrayRef<const CodeGenInstruction*> NumberedInstructions =
+ Target.getInstructionsByEnumValue();
+ StringRef Namespace = Target.getInstNamespace();
+ const std::vector<ListInit*> &ValueCols = InstrMapDesc.getValueCols();
+ unsigned NumCol = ValueCols.size();
+ unsigned TotalNumInstr = NumberedInstructions.size();
+ unsigned TableSize = 0;
+
+ OS << "static const uint16_t "<<InstrMapDesc.getName();
+ // Number of columns in the table are NumCol+1 because key instructions are
+ // emitted as first column.
+ OS << "Table[]["<< NumCol+1 << "] = {\n";
+ for (unsigned i = 0; i < TotalNumInstr; i++) {
+ Record *CurInstr = NumberedInstructions[i]->TheDef;
+ std::vector<Record*> ColInstrs = MapTable[CurInstr];
+ std::string OutStr;
+ unsigned RelExists = 0;
+ if (!ColInstrs.empty()) {
+ for (unsigned j = 0; j < NumCol; j++) {
+ if (ColInstrs[j] != nullptr) {
+ RelExists = 1;
+ OutStr += ", ";
+ OutStr += Namespace;
+ OutStr += "::";
+ OutStr += ColInstrs[j]->getName();
+ } else { OutStr += ", (uint16_t)-1U";}
+ }
+
+ if (RelExists) {
+ OS << " { " << Namespace << "::" << CurInstr->getName();
+ OS << OutStr <<" },\n";
+ TableSize++;
+ }
+ }
+ }
+ if (!TableSize) {
+ OS << " { " << Namespace << "::" << "INSTRUCTION_LIST_END, ";
+ OS << Namespace << "::" << "INSTRUCTION_LIST_END }";
+ }
+ OS << "}; // End of " << InstrMapDesc.getName() << "Table\n\n";
+ return TableSize;
+}
+
+//===----------------------------------------------------------------------===//
+// Emit binary search algorithm as part of the functions used to query
+// relation tables.
+//===----------------------------------------------------------------------===//
+
+void MapTableEmitter::emitBinSearch(raw_ostream &OS, unsigned TableSize) {
+ OS << " unsigned mid;\n";
+ OS << " unsigned start = 0;\n";
+ OS << " unsigned end = " << TableSize << ";\n";
+ OS << " while (start < end) {\n";
+ OS << " mid = start + (end - start) / 2;\n";
+ OS << " if (Opcode == " << InstrMapDesc.getName() << "Table[mid][0]) {\n";
+ OS << " break;\n";
+ OS << " }\n";
+ OS << " if (Opcode < " << InstrMapDesc.getName() << "Table[mid][0])\n";
+ OS << " end = mid;\n";
+ OS << " else\n";
+ OS << " start = mid + 1;\n";
+ OS << " }\n";
+ OS << " if (start == end)\n";
+ OS << " return -1; // Instruction doesn't exist in this table.\n\n";
+}
+
+//===----------------------------------------------------------------------===//
+// Emit functions to query relation tables.
+//===----------------------------------------------------------------------===//
+
+void MapTableEmitter::emitMapFuncBody(raw_ostream &OS,
+ unsigned TableSize) {
+
+ ListInit *ColFields = InstrMapDesc.getColFields();
+ const std::vector<ListInit*> &ValueCols = InstrMapDesc.getValueCols();
+
+ // Emit binary search algorithm to locate instructions in the
+ // relation table. If found, return opcode value from the appropriate column
+ // of the table.
+ emitBinSearch(OS, TableSize);
+
+ if (ValueCols.size() > 1) {
+ for (unsigned i = 0, e = ValueCols.size(); i < e; i++) {
+ ListInit *ColumnI = ValueCols[i];
+ OS << " if (";
+ for (unsigned j = 0, ColSize = ColumnI->size(); j < ColSize; ++j) {
+ std::string ColName = ColFields->getElement(j)->getAsUnquotedString();
+ OS << "in" << ColName;
+ OS << " == ";
+ OS << ColName << "_" << ColumnI->getElement(j)->getAsUnquotedString();
+ if (j < ColumnI->size() - 1)
+ OS << " && ";
+ }
+ OS << ")\n";
+ OS << " return " << InstrMapDesc.getName();
+ OS << "Table[mid]["<<i+1<<"];\n";
+ }
+ OS << " return -1;";
+ }
+ else
+ OS << " return " << InstrMapDesc.getName() << "Table[mid][1];\n";
+
+ OS <<"}\n\n";
+}
+
+//===----------------------------------------------------------------------===//
+// Emit relation tables and the functions to query them.
+//===----------------------------------------------------------------------===//
+
+void MapTableEmitter::emitTablesWithFunc(raw_ostream &OS) {
+
+ // Emit function name and the input parameters : mostly opcode value of the
+ // current instruction. However, if a table has multiple columns (more than 2
+ // since first column is used for the key instructions), then we also need
+ // to pass another input to indicate the column to be selected.
+
+ ListInit *ColFields = InstrMapDesc.getColFields();
+ const std::vector<ListInit*> &ValueCols = InstrMapDesc.getValueCols();
+ OS << "// "<< InstrMapDesc.getName() << "\nLLVM_READONLY\n";
+ OS << "int "<< InstrMapDesc.getName() << "(uint16_t Opcode";
+ if (ValueCols.size() > 1) {
+ for (Init *CF : ColFields->getValues()) {
+ std::string ColName = CF->getAsUnquotedString();
+ OS << ", enum " << ColName << " in" << ColName;
+ }
+ }
+ OS << ") {\n";
+
+ // Emit map table.
+ unsigned TableSize = emitBinSearchTable(OS);
+
+ // Emit rest of the function body.
+ emitMapFuncBody(OS, TableSize);
+}
+
+//===----------------------------------------------------------------------===//
+// Emit enums for the column fields across all the instruction maps.
+//===----------------------------------------------------------------------===//
+
+static void emitEnums(raw_ostream &OS, RecordKeeper &Records) {
+
+ std::vector<Record*> InstrMapVec;
+ InstrMapVec = Records.getAllDerivedDefinitions("InstrMapping");
+ std::map<std::string, std::vector<Init*> > ColFieldValueMap;
+
+ // Iterate over all InstrMapping records and create a map between column
+ // fields and their possible values across all records.
+ for (Record *CurMap : InstrMapVec) {
+ ListInit *ColFields;
+ ColFields = CurMap->getValueAsListInit("ColFields");
+ ListInit *List = CurMap->getValueAsListInit("ValueCols");
+ std::vector<ListInit*> ValueCols;
+ unsigned ListSize = List->size();
+
+ for (unsigned j = 0; j < ListSize; j++) {
+ auto *ListJ = cast<ListInit>(List->getElement(j));
+
+ if (ListJ->size() != ColFields->size())
+ PrintFatalError("Record `" + CurMap->getName() + "', field "
+ "`ValueCols' entries don't match with the entries in 'ColFields' !");
+ ValueCols.push_back(ListJ);
+ }
+
+ for (unsigned j = 0, endCF = ColFields->size(); j < endCF; j++) {
+ for (unsigned k = 0; k < ListSize; k++){
+ std::string ColName = ColFields->getElement(j)->getAsUnquotedString();
+ ColFieldValueMap[ColName].push_back((ValueCols[k])->getElement(j));
+ }
+ }
+ }
+
+ for (auto &Entry : ColFieldValueMap) {
+ std::vector<Init*> FieldValues = Entry.second;
+
+ // Delete duplicate entries from ColFieldValueMap
+ for (unsigned i = 0; i < FieldValues.size() - 1; i++) {
+ Init *CurVal = FieldValues[i];
+ for (unsigned j = i+1; j < FieldValues.size(); j++) {
+ if (CurVal == FieldValues[j]) {
+ FieldValues.erase(FieldValues.begin()+j);
+ --j;
+ }
+ }
+ }
+
+ // Emit enumerated values for the column fields.
+ OS << "enum " << Entry.first << " {\n";
+ for (unsigned i = 0, endFV = FieldValues.size(); i < endFV; i++) {
+ OS << "\t" << Entry.first << "_" << FieldValues[i]->getAsUnquotedString();
+ if (i != endFV - 1)
+ OS << ",\n";
+ else
+ OS << "\n};\n\n";
+ }
+ }
+}
+
+namespace llvm {
+//===----------------------------------------------------------------------===//
+// Parse 'InstrMapping' records and use the information to form relationship
+// between instructions. These relations are emitted as a tables along with the
+// functions to query them.
+//===----------------------------------------------------------------------===//
+void EmitMapTable(RecordKeeper &Records, raw_ostream &OS) {
+ CodeGenTarget Target(Records);
+ StringRef NameSpace = Target.getInstNamespace();
+ std::vector<Record*> InstrMapVec;
+ InstrMapVec = Records.getAllDerivedDefinitions("InstrMapping");
+
+ if (InstrMapVec.empty())
+ return;
+
+ OS << "#ifdef GET_INSTRMAP_INFO\n";
+ OS << "#undef GET_INSTRMAP_INFO\n";
+ OS << "namespace llvm {\n\n";
+ OS << "namespace " << NameSpace << " {\n\n";
+
+ // Emit coulumn field names and their values as enums.
+ emitEnums(OS, Records);
+
+ // Iterate over all instruction mapping records and construct relationship
+ // maps based on the information specified there.
+ //
+ for (Record *CurMap : InstrMapVec) {
+ MapTableEmitter IMap(Target, Records, CurMap);
+
+ // Build RowInstrMap to group instructions based on their values for
+ // RowFields. In the process, also collect key instructions into
+ // KeyInstrVec.
+ IMap.buildRowInstrMap();
+
+ // Build MapTable to map key instructions with the corresponding column
+ // instructions.
+ IMap.buildMapTable();
+
+ // Emit map tables and the functions to query them.
+ IMap.emitTablesWithFunc(OS);
+ }
+ OS << "} // end namespace " << NameSpace << "\n";
+ OS << "} // end namespace llvm\n";
+ OS << "#endif // GET_INSTRMAP_INFO\n\n";
+}
+
+} // End llvm namespace
diff --git a/contrib/libs/llvm16/utils/TableGen/CodeGenRegisters.cpp b/contrib/libs/llvm16/utils/TableGen/CodeGenRegisters.cpp
new file mode 100644
index 0000000000..8ad8a7a5bc
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/CodeGenRegisters.cpp
@@ -0,0 +1,2503 @@
+//===- CodeGenRegisters.cpp - Register and RegisterClass Info -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines structures to encapsulate information gleaned from the
+// target register and register class definitions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenRegisters.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IntEqClasses.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <iterator>
+#include <map>
+#include <queue>
+#include <set>
+#include <string>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "regalloc-emitter"
+
+//===----------------------------------------------------------------------===//
+// CodeGenSubRegIndex
+//===----------------------------------------------------------------------===//
+
+CodeGenSubRegIndex::CodeGenSubRegIndex(Record *R, unsigned Enum)
+ : TheDef(R), EnumValue(Enum), AllSuperRegsCovered(true), Artificial(true) {
+ Name = std::string(R->getName());
+ if (R->getValue("Namespace"))
+ Namespace = std::string(R->getValueAsString("Namespace"));
+ Size = R->getValueAsInt("Size");
+ Offset = R->getValueAsInt("Offset");
+}
+
+CodeGenSubRegIndex::CodeGenSubRegIndex(StringRef N, StringRef Nspace,
+ unsigned Enum)
+ : TheDef(nullptr), Name(std::string(N)), Namespace(std::string(Nspace)),
+ Size(-1), Offset(-1), EnumValue(Enum), AllSuperRegsCovered(true),
+ Artificial(true) {}
+
+std::string CodeGenSubRegIndex::getQualifiedName() const {
+ std::string N = getNamespace();
+ if (!N.empty())
+ N += "::";
+ N += getName();
+ return N;
+}
+
+void CodeGenSubRegIndex::updateComponents(CodeGenRegBank &RegBank) {
+ if (!TheDef)
+ return;
+
+ std::vector<Record*> Comps = TheDef->getValueAsListOfDefs("ComposedOf");
+ if (!Comps.empty()) {
+ if (Comps.size() != 2)
+ PrintFatalError(TheDef->getLoc(),
+ "ComposedOf must have exactly two entries");
+ CodeGenSubRegIndex *A = RegBank.getSubRegIdx(Comps[0]);
+ CodeGenSubRegIndex *B = RegBank.getSubRegIdx(Comps[1]);
+ CodeGenSubRegIndex *X = A->addComposite(B, this);
+ if (X)
+ PrintFatalError(TheDef->getLoc(), "Ambiguous ComposedOf entries");
+ }
+
+ std::vector<Record*> Parts =
+ TheDef->getValueAsListOfDefs("CoveringSubRegIndices");
+ if (!Parts.empty()) {
+ if (Parts.size() < 2)
+ PrintFatalError(TheDef->getLoc(),
+ "CoveredBySubRegs must have two or more entries");
+ SmallVector<CodeGenSubRegIndex*, 8> IdxParts;
+ for (Record *Part : Parts)
+ IdxParts.push_back(RegBank.getSubRegIdx(Part));
+ setConcatenationOf(IdxParts);
+ }
+}
+
+LaneBitmask CodeGenSubRegIndex::computeLaneMask() const {
+ // Already computed?
+ if (LaneMask.any())
+ return LaneMask;
+
+ // Recursion guard, shouldn't be required.
+ LaneMask = LaneBitmask::getAll();
+
+ // The lane mask is simply the union of all sub-indices.
+ LaneBitmask M;
+ for (const auto &C : Composed)
+ M |= C.second->computeLaneMask();
+ assert(M.any() && "Missing lane mask, sub-register cycle?");
+ LaneMask = M;
+ return LaneMask;
+}
+
+void CodeGenSubRegIndex::setConcatenationOf(
+ ArrayRef<CodeGenSubRegIndex*> Parts) {
+ if (ConcatenationOf.empty())
+ ConcatenationOf.assign(Parts.begin(), Parts.end());
+ else
+ assert(std::equal(Parts.begin(), Parts.end(),
+ ConcatenationOf.begin()) && "parts consistent");
+}
+
+void CodeGenSubRegIndex::computeConcatTransitiveClosure() {
+ for (SmallVectorImpl<CodeGenSubRegIndex*>::iterator
+ I = ConcatenationOf.begin(); I != ConcatenationOf.end(); /*empty*/) {
+ CodeGenSubRegIndex *SubIdx = *I;
+ SubIdx->computeConcatTransitiveClosure();
+#ifndef NDEBUG
+ for (CodeGenSubRegIndex *SRI : SubIdx->ConcatenationOf)
+ assert(SRI->ConcatenationOf.empty() && "No transitive closure?");
+#endif
+
+ if (SubIdx->ConcatenationOf.empty()) {
+ ++I;
+ } else {
+ I = ConcatenationOf.erase(I);
+ I = ConcatenationOf.insert(I, SubIdx->ConcatenationOf.begin(),
+ SubIdx->ConcatenationOf.end());
+ I += SubIdx->ConcatenationOf.size();
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// CodeGenRegister
+//===----------------------------------------------------------------------===//
+
+CodeGenRegister::CodeGenRegister(Record *R, unsigned Enum)
+ : TheDef(R), EnumValue(Enum),
+ CostPerUse(R->getValueAsListOfInts("CostPerUse")),
+ CoveredBySubRegs(R->getValueAsBit("CoveredBySubRegs")),
+ HasDisjunctSubRegs(false), Constant(R->getValueAsBit("isConstant")),
+ SubRegsComplete(false), SuperRegsComplete(false), TopoSig(~0u) {
+ Artificial = R->getValueAsBit("isArtificial");
+}
+
+void CodeGenRegister::buildObjectGraph(CodeGenRegBank &RegBank) {
+ std::vector<Record*> SRIs = TheDef->getValueAsListOfDefs("SubRegIndices");
+ std::vector<Record*> SRs = TheDef->getValueAsListOfDefs("SubRegs");
+
+ if (SRIs.size() != SRs.size())
+ PrintFatalError(TheDef->getLoc(),
+ "SubRegs and SubRegIndices must have the same size");
+
+ for (unsigned i = 0, e = SRIs.size(); i != e; ++i) {
+ ExplicitSubRegIndices.push_back(RegBank.getSubRegIdx(SRIs[i]));
+ ExplicitSubRegs.push_back(RegBank.getReg(SRs[i]));
+ }
+
+ // Also compute leading super-registers. Each register has a list of
+ // covered-by-subregs super-registers where it appears as the first explicit
+ // sub-register.
+ //
+ // This is used by computeSecondarySubRegs() to find candidates.
+ if (CoveredBySubRegs && !ExplicitSubRegs.empty())
+ ExplicitSubRegs.front()->LeadingSuperRegs.push_back(this);
+
+ // Add ad hoc alias links. This is a symmetric relationship between two
+ // registers, so build a symmetric graph by adding links in both ends.
+ std::vector<Record*> Aliases = TheDef->getValueAsListOfDefs("Aliases");
+ for (Record *Alias : Aliases) {
+ CodeGenRegister *Reg = RegBank.getReg(Alias);
+ ExplicitAliases.push_back(Reg);
+ Reg->ExplicitAliases.push_back(this);
+ }
+}
+
+StringRef CodeGenRegister::getName() const {
+ assert(TheDef && "no def");
+ return TheDef->getName();
+}
+
+namespace {
+
+// Iterate over all register units in a set of registers.
+class RegUnitIterator {
+ CodeGenRegister::Vec::const_iterator RegI, RegE;
+ CodeGenRegister::RegUnitList::iterator UnitI, UnitE;
+ static CodeGenRegister::RegUnitList Sentinel;
+
+public:
+ RegUnitIterator(const CodeGenRegister::Vec &Regs):
+ RegI(Regs.begin()), RegE(Regs.end()) {
+
+ if (RegI == RegE) {
+ UnitI = Sentinel.end();
+ UnitE = Sentinel.end();
+ } else {
+ UnitI = (*RegI)->getRegUnits().begin();
+ UnitE = (*RegI)->getRegUnits().end();
+ advance();
+ }
+ }
+
+ bool isValid() const { return UnitI != UnitE; }
+
+ unsigned operator* () const { assert(isValid()); return *UnitI; }
+
+ const CodeGenRegister *getReg() const { assert(isValid()); return *RegI; }
+
+ /// Preincrement. Move to the next unit.
+ void operator++() {
+ assert(isValid() && "Cannot advance beyond the last operand");
+ ++UnitI;
+ advance();
+ }
+
+protected:
+ void advance() {
+ while (UnitI == UnitE) {
+ if (++RegI == RegE)
+ break;
+ UnitI = (*RegI)->getRegUnits().begin();
+ UnitE = (*RegI)->getRegUnits().end();
+ }
+ }
+};
+
+CodeGenRegister::RegUnitList RegUnitIterator::Sentinel;
+
+} // end anonymous namespace
+
+// Return true of this unit appears in RegUnits.
+static bool hasRegUnit(CodeGenRegister::RegUnitList &RegUnits, unsigned Unit) {
+ return RegUnits.test(Unit);
+}
+
+// Inherit register units from subregisters.
+// Return true if the RegUnits changed.
+bool CodeGenRegister::inheritRegUnits(CodeGenRegBank &RegBank) {
+ bool changed = false;
+ for (const auto &SubReg : SubRegs) {
+ CodeGenRegister *SR = SubReg.second;
+ // Merge the subregister's units into this register's RegUnits.
+ changed |= (RegUnits |= SR->RegUnits);
+ }
+
+ return changed;
+}
+
+const CodeGenRegister::SubRegMap &
+CodeGenRegister::computeSubRegs(CodeGenRegBank &RegBank) {
+ // Only compute this map once.
+ if (SubRegsComplete)
+ return SubRegs;
+ SubRegsComplete = true;
+
+ HasDisjunctSubRegs = ExplicitSubRegs.size() > 1;
+
+ // First insert the explicit subregs and make sure they are fully indexed.
+ for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
+ CodeGenRegister *SR = ExplicitSubRegs[i];
+ CodeGenSubRegIndex *Idx = ExplicitSubRegIndices[i];
+ if (!SR->Artificial)
+ Idx->Artificial = false;
+ if (!SubRegs.insert(std::make_pair(Idx, SR)).second)
+ PrintFatalError(TheDef->getLoc(), "SubRegIndex " + Idx->getName() +
+ " appears twice in Register " + getName());
+ // Map explicit sub-registers first, so the names take precedence.
+ // The inherited sub-registers are mapped below.
+ SubReg2Idx.insert(std::make_pair(SR, Idx));
+ }
+
+ // Keep track of inherited subregs and how they can be reached.
+ SmallPtrSet<CodeGenRegister*, 8> Orphans;
+
+ // Clone inherited subregs and place duplicate entries in Orphans.
+ // Here the order is important - earlier subregs take precedence.
+ for (CodeGenRegister *ESR : ExplicitSubRegs) {
+ const SubRegMap &Map = ESR->computeSubRegs(RegBank);
+ HasDisjunctSubRegs |= ESR->HasDisjunctSubRegs;
+
+ for (const auto &SR : Map) {
+ if (!SubRegs.insert(SR).second)
+ Orphans.insert(SR.second);
+ }
+ }
+
+ // Expand any composed subreg indices.
+ // If dsub_2 has ComposedOf = [qsub_1, dsub_0], and this register has a
+ // qsub_1 subreg, add a dsub_2 subreg. Keep growing Indices and process
+ // expanded subreg indices recursively.
+ SmallVector<CodeGenSubRegIndex*, 8> Indices = ExplicitSubRegIndices;
+ for (unsigned i = 0; i != Indices.size(); ++i) {
+ CodeGenSubRegIndex *Idx = Indices[i];
+ const CodeGenSubRegIndex::CompMap &Comps = Idx->getComposites();
+ CodeGenRegister *SR = SubRegs[Idx];
+ const SubRegMap &Map = SR->computeSubRegs(RegBank);
+
+ // Look at the possible compositions of Idx.
+ // They may not all be supported by SR.
+ for (auto Comp : Comps) {
+ SubRegMap::const_iterator SRI = Map.find(Comp.first);
+ if (SRI == Map.end())
+ continue; // Idx + I->first doesn't exist in SR.
+ // Add I->second as a name for the subreg SRI->second, assuming it is
+ // orphaned, and the name isn't already used for something else.
+ if (SubRegs.count(Comp.second) || !Orphans.erase(SRI->second))
+ continue;
+ // We found a new name for the orphaned sub-register.
+ SubRegs.insert(std::make_pair(Comp.second, SRI->second));
+ Indices.push_back(Comp.second);
+ }
+ }
+
+ // Now Orphans contains the inherited subregisters without a direct index.
+ // Create inferred indexes for all missing entries.
+ // Work backwards in the Indices vector in order to compose subregs bottom-up.
+ // Consider this subreg sequence:
+ //
+ // qsub_1 -> dsub_0 -> ssub_0
+ //
+ // The qsub_1 -> dsub_0 composition becomes dsub_2, so the ssub_0 register
+ // can be reached in two different ways:
+ //
+ // qsub_1 -> ssub_0
+ // dsub_2 -> ssub_0
+ //
+ // We pick the latter composition because another register may have [dsub_0,
+ // dsub_1, dsub_2] subregs without necessarily having a qsub_1 subreg. The
+ // dsub_2 -> ssub_0 composition can be shared.
+ while (!Indices.empty() && !Orphans.empty()) {
+ CodeGenSubRegIndex *Idx = Indices.pop_back_val();
+ CodeGenRegister *SR = SubRegs[Idx];
+ const SubRegMap &Map = SR->computeSubRegs(RegBank);
+ for (const auto &SubReg : Map)
+ if (Orphans.erase(SubReg.second))
+ SubRegs[RegBank.getCompositeSubRegIndex(Idx, SubReg.first)] = SubReg.second;
+ }
+
+ // Compute the inverse SubReg -> Idx map.
+ for (const auto &SubReg : SubRegs) {
+ if (SubReg.second == this) {
+ ArrayRef<SMLoc> Loc;
+ if (TheDef)
+ Loc = TheDef->getLoc();
+ PrintFatalError(Loc, "Register " + getName() +
+ " has itself as a sub-register");
+ }
+
+ // Compute AllSuperRegsCovered.
+ if (!CoveredBySubRegs)
+ SubReg.first->AllSuperRegsCovered = false;
+
+ // Ensure that every sub-register has a unique name.
+ DenseMap<const CodeGenRegister*, CodeGenSubRegIndex*>::iterator Ins =
+ SubReg2Idx.insert(std::make_pair(SubReg.second, SubReg.first)).first;
+ if (Ins->second == SubReg.first)
+ continue;
+ // Trouble: Two different names for SubReg.second.
+ ArrayRef<SMLoc> Loc;
+ if (TheDef)
+ Loc = TheDef->getLoc();
+ PrintFatalError(Loc, "Sub-register can't have two names: " +
+ SubReg.second->getName() + " available as " +
+ SubReg.first->getName() + " and " + Ins->second->getName());
+ }
+
+ // Derive possible names for sub-register concatenations from any explicit
+ // sub-registers. By doing this before computeSecondarySubRegs(), we ensure
+ // that getConcatSubRegIndex() won't invent any concatenated indices that the
+ // user already specified.
+ for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
+ CodeGenRegister *SR = ExplicitSubRegs[i];
+ if (!SR->CoveredBySubRegs || SR->ExplicitSubRegs.size() <= 1 ||
+ SR->Artificial)
+ continue;
+
+ // SR is composed of multiple sub-regs. Find their names in this register.
+ SmallVector<CodeGenSubRegIndex*, 8> Parts;
+ for (unsigned j = 0, e = SR->ExplicitSubRegs.size(); j != e; ++j) {
+ CodeGenSubRegIndex &I = *SR->ExplicitSubRegIndices[j];
+ if (!I.Artificial)
+ Parts.push_back(getSubRegIndex(SR->ExplicitSubRegs[j]));
+ }
+
+ // Offer this as an existing spelling for the concatenation of Parts.
+ CodeGenSubRegIndex &Idx = *ExplicitSubRegIndices[i];
+ Idx.setConcatenationOf(Parts);
+ }
+
+ // Initialize RegUnitList. Because getSubRegs is called recursively, this
+ // processes the register hierarchy in postorder.
+ //
+ // Inherit all sub-register units. It is good enough to look at the explicit
+ // sub-registers, the other registers won't contribute any more units.
+ for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
+ CodeGenRegister *SR = ExplicitSubRegs[i];
+ RegUnits |= SR->RegUnits;
+ }
+
+ // Absent any ad hoc aliasing, we create one register unit per leaf register.
+ // These units correspond to the maximal cliques in the register overlap
+ // graph which is optimal.
+ //
+ // When there is ad hoc aliasing, we simply create one unit per edge in the
+ // undirected ad hoc aliasing graph. Technically, we could do better by
+ // identifying maximal cliques in the ad hoc graph, but cliques larger than 2
+ // are extremely rare anyway (I've never seen one), so we don't bother with
+ // the added complexity.
+ for (unsigned i = 0, e = ExplicitAliases.size(); i != e; ++i) {
+ CodeGenRegister *AR = ExplicitAliases[i];
+ // Only visit each edge once.
+ if (AR->SubRegsComplete)
+ continue;
+ // Create a RegUnit representing this alias edge, and add it to both
+ // registers.
+ unsigned Unit = RegBank.newRegUnit(this, AR);
+ RegUnits.set(Unit);
+ AR->RegUnits.set(Unit);
+ }
+
+ // Finally, create units for leaf registers without ad hoc aliases. Note that
+ // a leaf register with ad hoc aliases doesn't get its own unit - it isn't
+ // necessary. This means the aliasing leaf registers can share a single unit.
+ if (RegUnits.empty())
+ RegUnits.set(RegBank.newRegUnit(this));
+
+ // We have now computed the native register units. More may be adopted later
+ // for balancing purposes.
+ NativeRegUnits = RegUnits;
+
+ return SubRegs;
+}
+
+// In a register that is covered by its sub-registers, try to find redundant
+// sub-registers. For example:
+//
+// QQ0 = {Q0, Q1}
+// Q0 = {D0, D1}
+// Q1 = {D2, D3}
+//
+// We can infer that D1_D2 is also a sub-register, even if it wasn't named in
+// the register definition.
+//
+// The explicitly specified registers form a tree. This function discovers
+// sub-register relationships that would force a DAG.
+//
+void CodeGenRegister::computeSecondarySubRegs(CodeGenRegBank &RegBank) {
+ SmallVector<SubRegMap::value_type, 8> NewSubRegs;
+
+ std::queue<std::pair<CodeGenSubRegIndex*,CodeGenRegister*>> SubRegQueue;
+ for (std::pair<CodeGenSubRegIndex*,CodeGenRegister*> P : SubRegs)
+ SubRegQueue.push(P);
+
+ // Look at the leading super-registers of each sub-register. Those are the
+ // candidates for new sub-registers, assuming they are fully contained in
+ // this register.
+ while (!SubRegQueue.empty()) {
+ CodeGenSubRegIndex *SubRegIdx;
+ const CodeGenRegister *SubReg;
+ std::tie(SubRegIdx, SubReg) = SubRegQueue.front();
+ SubRegQueue.pop();
+
+ const CodeGenRegister::SuperRegList &Leads = SubReg->LeadingSuperRegs;
+ for (unsigned i = 0, e = Leads.size(); i != e; ++i) {
+ CodeGenRegister *Cand = const_cast<CodeGenRegister*>(Leads[i]);
+ // Already got this sub-register?
+ if (Cand == this || getSubRegIndex(Cand))
+ continue;
+ // Check if each component of Cand is already a sub-register.
+ assert(!Cand->ExplicitSubRegs.empty() &&
+ "Super-register has no sub-registers");
+ if (Cand->ExplicitSubRegs.size() == 1)
+ continue;
+ SmallVector<CodeGenSubRegIndex*, 8> Parts;
+ // We know that the first component is (SubRegIdx,SubReg). However we
+ // may still need to split it into smaller subregister parts.
+ assert(Cand->ExplicitSubRegs[0] == SubReg && "LeadingSuperRegs correct");
+ assert(getSubRegIndex(SubReg) == SubRegIdx && "LeadingSuperRegs correct");
+ for (CodeGenRegister *SubReg : Cand->ExplicitSubRegs) {
+ if (CodeGenSubRegIndex *SubRegIdx = getSubRegIndex(SubReg)) {
+ if (SubRegIdx->ConcatenationOf.empty())
+ Parts.push_back(SubRegIdx);
+ else
+ append_range(Parts, SubRegIdx->ConcatenationOf);
+ } else {
+ // Sub-register doesn't exist.
+ Parts.clear();
+ break;
+ }
+ }
+ // There is nothing to do if some Cand sub-register is not part of this
+ // register.
+ if (Parts.empty())
+ continue;
+
+ // Each part of Cand is a sub-register of this. Make the full Cand also
+ // a sub-register with a concatenated sub-register index.
+ CodeGenSubRegIndex *Concat = RegBank.getConcatSubRegIndex(Parts);
+ std::pair<CodeGenSubRegIndex*,CodeGenRegister*> NewSubReg =
+ std::make_pair(Concat, Cand);
+
+ if (!SubRegs.insert(NewSubReg).second)
+ continue;
+
+ // We inserted a new subregister.
+ NewSubRegs.push_back(NewSubReg);
+ SubRegQueue.push(NewSubReg);
+ SubReg2Idx.insert(std::make_pair(Cand, Concat));
+ }
+ }
+
+ // Create sub-register index composition maps for the synthesized indices.
+ for (unsigned i = 0, e = NewSubRegs.size(); i != e; ++i) {
+ CodeGenSubRegIndex *NewIdx = NewSubRegs[i].first;
+ CodeGenRegister *NewSubReg = NewSubRegs[i].second;
+ for (auto SubReg : NewSubReg->SubRegs) {
+ CodeGenSubRegIndex *SubIdx = getSubRegIndex(SubReg.second);
+ if (!SubIdx)
+ PrintFatalError(TheDef->getLoc(), "No SubRegIndex for " +
+ SubReg.second->getName() +
+ " in " + getName());
+ NewIdx->addComposite(SubReg.first, SubIdx);
+ }
+ }
+}
+
+void CodeGenRegister::computeSuperRegs(CodeGenRegBank &RegBank) {
+ // Only visit each register once.
+ if (SuperRegsComplete)
+ return;
+ SuperRegsComplete = true;
+
+ // Make sure all sub-registers have been visited first, so the super-reg
+ // lists will be topologically ordered.
+ for (auto SubReg : SubRegs)
+ SubReg.second->computeSuperRegs(RegBank);
+
+ // Now add this as a super-register on all sub-registers.
+ // Also compute the TopoSigId in post-order.
+ TopoSigId Id;
+ for (auto SubReg : SubRegs) {
+ // Topological signature computed from SubIdx, TopoId(SubReg).
+ // Loops and idempotent indices have TopoSig = ~0u.
+ Id.push_back(SubReg.first->EnumValue);
+ Id.push_back(SubReg.second->TopoSig);
+
+ // Don't add duplicate entries.
+ if (!SubReg.second->SuperRegs.empty() &&
+ SubReg.second->SuperRegs.back() == this)
+ continue;
+ SubReg.second->SuperRegs.push_back(this);
+ }
+ TopoSig = RegBank.getTopoSig(Id);
+}
+
+void
+CodeGenRegister::addSubRegsPreOrder(SetVector<const CodeGenRegister*> &OSet,
+ CodeGenRegBank &RegBank) const {
+ assert(SubRegsComplete && "Must precompute sub-registers");
+ for (unsigned i = 0, e = ExplicitSubRegs.size(); i != e; ++i) {
+ CodeGenRegister *SR = ExplicitSubRegs[i];
+ if (OSet.insert(SR))
+ SR->addSubRegsPreOrder(OSet, RegBank);
+ }
+ // Add any secondary sub-registers that weren't part of the explicit tree.
+ for (auto SubReg : SubRegs)
+ OSet.insert(SubReg.second);
+}
+
+// Get the sum of this register's unit weights.
+unsigned CodeGenRegister::getWeight(const CodeGenRegBank &RegBank) const {
+ unsigned Weight = 0;
+ for (unsigned RegUnit : RegUnits) {
+ Weight += RegBank.getRegUnit(RegUnit).Weight;
+ }
+ return Weight;
+}
+
+//===----------------------------------------------------------------------===//
+// RegisterTuples
+//===----------------------------------------------------------------------===//
+
+// A RegisterTuples def is used to generate pseudo-registers from lists of
+// sub-registers. We provide a SetTheory expander class that returns the new
+// registers.
+namespace {
+
+struct TupleExpander : SetTheory::Expander {
+ // Reference to SynthDefs in the containing CodeGenRegBank, to keep track of
+ // the synthesized definitions for their lifetime.
+ std::vector<std::unique_ptr<Record>> &SynthDefs;
+
+ TupleExpander(std::vector<std::unique_ptr<Record>> &SynthDefs)
+ : SynthDefs(SynthDefs) {}
+
+ void expand(SetTheory &ST, Record *Def, SetTheory::RecSet &Elts) override {
+ std::vector<Record*> Indices = Def->getValueAsListOfDefs("SubRegIndices");
+ unsigned Dim = Indices.size();
+ ListInit *SubRegs = Def->getValueAsListInit("SubRegs");
+ if (Dim != SubRegs->size())
+ PrintFatalError(Def->getLoc(), "SubRegIndices and SubRegs size mismatch");
+ if (Dim < 2)
+ PrintFatalError(Def->getLoc(),
+ "Tuples must have at least 2 sub-registers");
+
+ // Evaluate the sub-register lists to be zipped.
+ unsigned Length = ~0u;
+ SmallVector<SetTheory::RecSet, 4> Lists(Dim);
+ for (unsigned i = 0; i != Dim; ++i) {
+ ST.evaluate(SubRegs->getElement(i), Lists[i], Def->getLoc());
+ Length = std::min(Length, unsigned(Lists[i].size()));
+ }
+
+ if (Length == 0)
+ return;
+
+ // Precompute some types.
+ Record *RegisterCl = Def->getRecords().getClass("Register");
+ RecTy *RegisterRecTy = RecordRecTy::get(RegisterCl);
+ std::vector<StringRef> RegNames =
+ Def->getValueAsListOfStrings("RegAsmNames");
+
+ // Zip them up.
+ RecordKeeper &RK = Def->getRecords();
+ for (unsigned n = 0; n != Length; ++n) {
+ std::string Name;
+ Record *Proto = Lists[0][n];
+ std::vector<Init*> Tuple;
+ for (unsigned i = 0; i != Dim; ++i) {
+ Record *Reg = Lists[i][n];
+ if (i) Name += '_';
+ Name += Reg->getName();
+ Tuple.push_back(DefInit::get(Reg));
+ }
+
+ // Take the cost list of the first register in the tuple.
+ ListInit *CostList = Proto->getValueAsListInit("CostPerUse");
+ SmallVector<Init *, 2> CostPerUse;
+ CostPerUse.insert(CostPerUse.end(), CostList->begin(), CostList->end());
+
+ StringInit *AsmName = StringInit::get(RK, "");
+ if (!RegNames.empty()) {
+ if (RegNames.size() <= n)
+ PrintFatalError(Def->getLoc(),
+ "Register tuple definition missing name for '" +
+ Name + "'.");
+ AsmName = StringInit::get(RK, RegNames[n]);
+ }
+
+ // Create a new Record representing the synthesized register. This record
+ // is only for consumption by CodeGenRegister, it is not added to the
+ // RecordKeeper.
+ SynthDefs.emplace_back(
+ std::make_unique<Record>(Name, Def->getLoc(), Def->getRecords()));
+ Record *NewReg = SynthDefs.back().get();
+ Elts.insert(NewReg);
+
+ // Copy Proto super-classes.
+ ArrayRef<std::pair<Record *, SMRange>> Supers = Proto->getSuperClasses();
+ for (const auto &SuperPair : Supers)
+ NewReg->addSuperClass(SuperPair.first, SuperPair.second);
+
+ // Copy Proto fields.
+ for (unsigned i = 0, e = Proto->getValues().size(); i != e; ++i) {
+ RecordVal RV = Proto->getValues()[i];
+
+ // Skip existing fields, like NAME.
+ if (NewReg->getValue(RV.getNameInit()))
+ continue;
+
+ StringRef Field = RV.getName();
+
+ // Replace the sub-register list with Tuple.
+ if (Field == "SubRegs")
+ RV.setValue(ListInit::get(Tuple, RegisterRecTy));
+
+ if (Field == "AsmName")
+ RV.setValue(AsmName);
+
+ // CostPerUse is aggregated from all Tuple members.
+ if (Field == "CostPerUse")
+ RV.setValue(ListInit::get(CostPerUse, CostList->getElementType()));
+
+ // Composite registers are always covered by sub-registers.
+ if (Field == "CoveredBySubRegs")
+ RV.setValue(BitInit::get(RK, true));
+
+ // Copy fields from the RegisterTuples def.
+ if (Field == "SubRegIndices" ||
+ Field == "CompositeIndices") {
+ NewReg->addValue(*Def->getValue(Field));
+ continue;
+ }
+
+ // Some fields get their default uninitialized value.
+ if (Field == "DwarfNumbers" ||
+ Field == "DwarfAlias" ||
+ Field == "Aliases") {
+ if (const RecordVal *DefRV = RegisterCl->getValue(Field))
+ NewReg->addValue(*DefRV);
+ continue;
+ }
+
+ // Everything else is copied from Proto.
+ NewReg->addValue(RV);
+ }
+ }
+ }
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// CodeGenRegisterClass
+//===----------------------------------------------------------------------===//
+
+static void sortAndUniqueRegisters(CodeGenRegister::Vec &M) {
+ llvm::sort(M, deref<std::less<>>());
+ M.erase(std::unique(M.begin(), M.end(), deref<std::equal_to<>>()), M.end());
+}
+
+CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank, Record *R)
+ : TheDef(R), Name(std::string(R->getName())),
+ TopoSigs(RegBank.getNumTopoSigs()), EnumValue(-1), TSFlags(0) {
+ GeneratePressureSet = R->getValueAsBit("GeneratePressureSet");
+ std::vector<Record*> TypeList = R->getValueAsListOfDefs("RegTypes");
+ if (TypeList.empty())
+ PrintFatalError(R->getLoc(), "RegTypes list must not be empty!");
+ for (unsigned i = 0, e = TypeList.size(); i != e; ++i) {
+ Record *Type = TypeList[i];
+ if (!Type->isSubClassOf("ValueType"))
+ PrintFatalError(R->getLoc(),
+ "RegTypes list member '" + Type->getName() +
+ "' does not derive from the ValueType class!");
+ VTs.push_back(getValueTypeByHwMode(Type, RegBank.getHwModes()));
+ }
+
+ // Allocation order 0 is the full set. AltOrders provides others.
+ const SetTheory::RecVec *Elements = RegBank.getSets().expand(R);
+ ListInit *AltOrders = R->getValueAsListInit("AltOrders");
+ Orders.resize(1 + AltOrders->size());
+
+ // Default allocation order always contains all registers.
+ Artificial = true;
+ for (unsigned i = 0, e = Elements->size(); i != e; ++i) {
+ Orders[0].push_back((*Elements)[i]);
+ const CodeGenRegister *Reg = RegBank.getReg((*Elements)[i]);
+ Members.push_back(Reg);
+ Artificial &= Reg->Artificial;
+ TopoSigs.set(Reg->getTopoSig());
+ }
+ sortAndUniqueRegisters(Members);
+
+ // Alternative allocation orders may be subsets.
+ SetTheory::RecSet Order;
+ for (unsigned i = 0, e = AltOrders->size(); i != e; ++i) {
+ RegBank.getSets().evaluate(AltOrders->getElement(i), Order, R->getLoc());
+ Orders[1 + i].append(Order.begin(), Order.end());
+ // Verify that all altorder members are regclass members.
+ while (!Order.empty()) {
+ CodeGenRegister *Reg = RegBank.getReg(Order.back());
+ Order.pop_back();
+ if (!contains(Reg))
+ PrintFatalError(R->getLoc(), " AltOrder register " + Reg->getName() +
+ " is not a class member");
+ }
+ }
+
+ Namespace = R->getValueAsString("Namespace");
+
+ if (const RecordVal *RV = R->getValue("RegInfos"))
+ if (DefInit *DI = dyn_cast_or_null<DefInit>(RV->getValue()))
+ RSI = RegSizeInfoByHwMode(DI->getDef(), RegBank.getHwModes());
+ unsigned Size = R->getValueAsInt("Size");
+ assert((RSI.hasDefault() || Size != 0 || VTs[0].isSimple()) &&
+ "Impossible to determine register size");
+ if (!RSI.hasDefault()) {
+ RegSizeInfo RI;
+ RI.RegSize = RI.SpillSize = Size ? Size
+ : VTs[0].getSimple().getSizeInBits();
+ RI.SpillAlignment = R->getValueAsInt("Alignment");
+ RSI.insertRegSizeForMode(DefaultMode, RI);
+ }
+
+ CopyCost = R->getValueAsInt("CopyCost");
+ Allocatable = R->getValueAsBit("isAllocatable");
+ AltOrderSelect = R->getValueAsString("AltOrderSelect");
+ int AllocationPriority = R->getValueAsInt("AllocationPriority");
+ if (!isUInt<5>(AllocationPriority))
+ PrintFatalError(R->getLoc(), "AllocationPriority out of range [0,31]");
+ this->AllocationPriority = AllocationPriority;
+
+ GlobalPriority = R->getValueAsBit("GlobalPriority");
+
+ BitsInit *TSF = R->getValueAsBitsInit("TSFlags");
+ for (unsigned I = 0, E = TSF->getNumBits(); I != E; ++I) {
+ BitInit *Bit = cast<BitInit>(TSF->getBit(I));
+ TSFlags |= uint8_t(Bit->getValue()) << I;
+ }
+}
+
+// Create an inferred register class that was missing from the .td files.
+// Most properties will be inherited from the closest super-class after the
+// class structure has been computed.
+CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank,
+ StringRef Name, Key Props)
+ : Members(*Props.Members), TheDef(nullptr), Name(std::string(Name)),
+ TopoSigs(RegBank.getNumTopoSigs()), EnumValue(-1), RSI(Props.RSI),
+ CopyCost(0), Allocatable(true), AllocationPriority(0),
+ GlobalPriority(false), TSFlags(0) {
+ Artificial = true;
+ GeneratePressureSet = false;
+ for (const auto R : Members) {
+ TopoSigs.set(R->getTopoSig());
+ Artificial &= R->Artificial;
+ }
+}
+
+// Compute inherited propertied for a synthesized register class.
+void CodeGenRegisterClass::inheritProperties(CodeGenRegBank &RegBank) {
+ assert(!getDef() && "Only synthesized classes can inherit properties");
+ assert(!SuperClasses.empty() && "Synthesized class without super class");
+
+ // The last super-class is the smallest one.
+ CodeGenRegisterClass &Super = *SuperClasses.back();
+
+ // Most properties are copied directly.
+ // Exceptions are members, size, and alignment
+ Namespace = Super.Namespace;
+ VTs = Super.VTs;
+ CopyCost = Super.CopyCost;
+ // Check for allocatable superclasses.
+ Allocatable = any_of(SuperClasses, [&](const CodeGenRegisterClass *S) {
+ return S->Allocatable;
+ });
+ AltOrderSelect = Super.AltOrderSelect;
+ AllocationPriority = Super.AllocationPriority;
+ GlobalPriority = Super.GlobalPriority;
+ TSFlags = Super.TSFlags;
+ GeneratePressureSet |= Super.GeneratePressureSet;
+
+ // Copy all allocation orders, filter out foreign registers from the larger
+ // super-class.
+ Orders.resize(Super.Orders.size());
+ for (unsigned i = 0, ie = Super.Orders.size(); i != ie; ++i)
+ for (unsigned j = 0, je = Super.Orders[i].size(); j != je; ++j)
+ if (contains(RegBank.getReg(Super.Orders[i][j])))
+ Orders[i].push_back(Super.Orders[i][j]);
+}
+
+bool CodeGenRegisterClass::hasType(const ValueTypeByHwMode &VT) const {
+ if (llvm::is_contained(VTs, VT))
+ return true;
+
+ // If VT is not identical to any of this class's types, but is a simple
+ // type, check if any of the types for this class contain it under some
+ // mode.
+ // The motivating example came from RISCV, where (likely because of being
+ // guarded by "64-bit" predicate), the type of X5 was {*:[i64]}, but the
+ // type in GRC was {*:[i32], m1:[i64]}.
+ if (VT.isSimple()) {
+ MVT T = VT.getSimple();
+ for (const ValueTypeByHwMode &OurVT : VTs) {
+ if (llvm::count_if(OurVT, [T](auto &&P) { return P.second == T; }))
+ return true;
+ }
+ }
+ return false;
+}
+
+bool CodeGenRegisterClass::contains(const CodeGenRegister *Reg) const {
+ return std::binary_search(Members.begin(), Members.end(), Reg,
+ deref<std::less<>>());
+}
+
+unsigned CodeGenRegisterClass::getWeight(const CodeGenRegBank& RegBank) const {
+ if (TheDef && !TheDef->isValueUnset("Weight"))
+ return TheDef->getValueAsInt("Weight");
+
+ if (Members.empty() || Artificial)
+ return 0;
+
+ return (*Members.begin())->getWeight(RegBank);
+}
+
+namespace llvm {
+
+ raw_ostream &operator<<(raw_ostream &OS, const CodeGenRegisterClass::Key &K) {
+ OS << "{ " << K.RSI;
+ for (const auto R : *K.Members)
+ OS << ", " << R->getName();
+ return OS << " }";
+ }
+
+} // end namespace llvm
+
+// This is a simple lexicographical order that can be used to search for sets.
+// It is not the same as the topological order provided by TopoOrderRC.
+bool CodeGenRegisterClass::Key::
+operator<(const CodeGenRegisterClass::Key &B) const {
+ assert(Members && B.Members);
+ return std::tie(*Members, RSI) < std::tie(*B.Members, B.RSI);
+}
+
+// Returns true if RC is a strict subclass.
+// RC is a sub-class of this class if it is a valid replacement for any
+// instruction operand where a register of this classis required. It must
+// satisfy these conditions:
+//
+// 1. All RC registers are also in this.
+// 2. The RC spill size must not be smaller than our spill size.
+// 3. RC spill alignment must be compatible with ours.
+//
+static bool testSubClass(const CodeGenRegisterClass *A,
+ const CodeGenRegisterClass *B) {
+ return A->RSI.isSubClassOf(B->RSI) &&
+ std::includes(A->getMembers().begin(), A->getMembers().end(),
+ B->getMembers().begin(), B->getMembers().end(),
+ deref<std::less<>>());
+}
+
+/// Sorting predicate for register classes. This provides a topological
+/// ordering that arranges all register classes before their sub-classes.
+///
+/// Register classes with the same registers, spill size, and alignment form a
+/// clique. They will be ordered alphabetically.
+///
+static bool TopoOrderRC(const CodeGenRegisterClass &PA,
+ const CodeGenRegisterClass &PB) {
+ auto *A = &PA;
+ auto *B = &PB;
+ if (A == B)
+ return false;
+
+ if (A->RSI < B->RSI)
+ return true;
+ if (A->RSI != B->RSI)
+ return false;
+
+ // Order by descending set size. Note that the classes' allocation order may
+ // not have been computed yet. The Members set is always vaild.
+ if (A->getMembers().size() > B->getMembers().size())
+ return true;
+ if (A->getMembers().size() < B->getMembers().size())
+ return false;
+
+ // Finally order by name as a tie breaker.
+ return StringRef(A->getName()) < B->getName();
+}
+
+std::string CodeGenRegisterClass::getQualifiedName() const {
+ if (Namespace.empty())
+ return getName();
+ else
+ return (Namespace + "::" + getName()).str();
+}
+
+// Compute sub-classes of all register classes.
+// Assume the classes are ordered topologically.
+void CodeGenRegisterClass::computeSubClasses(CodeGenRegBank &RegBank) {
+ auto &RegClasses = RegBank.getRegClasses();
+
+ // Visit backwards so sub-classes are seen first.
+ for (auto I = RegClasses.rbegin(), E = RegClasses.rend(); I != E; ++I) {
+ CodeGenRegisterClass &RC = *I;
+ RC.SubClasses.resize(RegClasses.size());
+ RC.SubClasses.set(RC.EnumValue);
+ if (RC.Artificial)
+ continue;
+
+ // Normally, all subclasses have IDs >= rci, unless RC is part of a clique.
+ for (auto I2 = I.base(), E2 = RegClasses.end(); I2 != E2; ++I2) {
+ CodeGenRegisterClass &SubRC = *I2;
+ if (RC.SubClasses.test(SubRC.EnumValue))
+ continue;
+ if (!testSubClass(&RC, &SubRC))
+ continue;
+ // SubRC is a sub-class. Grap all its sub-classes so we won't have to
+ // check them again.
+ RC.SubClasses |= SubRC.SubClasses;
+ }
+
+ // Sweep up missed clique members. They will be immediately preceding RC.
+ for (auto I2 = std::next(I); I2 != E && testSubClass(&RC, &*I2); ++I2)
+ RC.SubClasses.set(I2->EnumValue);
+ }
+
+ // Compute the SuperClasses lists from the SubClasses vectors.
+ for (auto &RC : RegClasses) {
+ const BitVector &SC = RC.getSubClasses();
+ auto I = RegClasses.begin();
+ for (int s = 0, next_s = SC.find_first(); next_s != -1;
+ next_s = SC.find_next(s)) {
+ std::advance(I, next_s - s);
+ s = next_s;
+ if (&*I == &RC)
+ continue;
+ I->SuperClasses.push_back(&RC);
+ }
+ }
+
+ // With the class hierarchy in place, let synthesized register classes inherit
+ // properties from their closest super-class. The iteration order here can
+ // propagate properties down multiple levels.
+ for (auto &RC : RegClasses)
+ if (!RC.getDef())
+ RC.inheritProperties(RegBank);
+}
+
+std::optional<std::pair<CodeGenRegisterClass *, CodeGenRegisterClass *>>
+CodeGenRegisterClass::getMatchingSubClassWithSubRegs(
+ CodeGenRegBank &RegBank, const CodeGenSubRegIndex *SubIdx) const {
+ auto SizeOrder = [this](const CodeGenRegisterClass *A,
+ const CodeGenRegisterClass *B) {
+ // If there are multiple, identical register classes, prefer the original
+ // register class.
+ if (A == B)
+ return false;
+ if (A->getMembers().size() == B->getMembers().size())
+ return A == this;
+ return A->getMembers().size() > B->getMembers().size();
+ };
+
+ auto &RegClasses = RegBank.getRegClasses();
+
+ // Find all the subclasses of this one that fully support the sub-register
+ // index and order them by size. BiggestSuperRC should always be first.
+ CodeGenRegisterClass *BiggestSuperRegRC = getSubClassWithSubReg(SubIdx);
+ if (!BiggestSuperRegRC)
+ return std::nullopt;
+ BitVector SuperRegRCsBV = BiggestSuperRegRC->getSubClasses();
+ std::vector<CodeGenRegisterClass *> SuperRegRCs;
+ for (auto &RC : RegClasses)
+ if (SuperRegRCsBV[RC.EnumValue])
+ SuperRegRCs.emplace_back(&RC);
+ llvm::stable_sort(SuperRegRCs, SizeOrder);
+
+ assert(SuperRegRCs.front() == BiggestSuperRegRC &&
+ "Biggest class wasn't first");
+
+ // Find all the subreg classes and order them by size too.
+ std::vector<std::pair<CodeGenRegisterClass *, BitVector>> SuperRegClasses;
+ for (auto &RC: RegClasses) {
+ BitVector SuperRegClassesBV(RegClasses.size());
+ RC.getSuperRegClasses(SubIdx, SuperRegClassesBV);
+ if (SuperRegClassesBV.any())
+ SuperRegClasses.push_back(std::make_pair(&RC, SuperRegClassesBV));
+ }
+ llvm::sort(SuperRegClasses,
+ [&](const std::pair<CodeGenRegisterClass *, BitVector> &A,
+ const std::pair<CodeGenRegisterClass *, BitVector> &B) {
+ return SizeOrder(A.first, B.first);
+ });
+
+ // Find the biggest subclass and subreg class such that R:subidx is in the
+ // subreg class for all R in subclass.
+ //
+ // For example:
+ // All registers in X86's GR64 have a sub_32bit subregister but no class
+ // exists that contains all the 32-bit subregisters because GR64 contains RIP
+ // but GR32 does not contain EIP. Instead, we constrain SuperRegRC to
+ // GR32_with_sub_8bit (which is identical to GR32_with_sub_32bit) and then,
+ // having excluded RIP, we are able to find a SubRegRC (GR32).
+ CodeGenRegisterClass *ChosenSuperRegClass = nullptr;
+ CodeGenRegisterClass *SubRegRC = nullptr;
+ for (auto *SuperRegRC : SuperRegRCs) {
+ for (const auto &SuperRegClassPair : SuperRegClasses) {
+ const BitVector &SuperRegClassBV = SuperRegClassPair.second;
+ if (SuperRegClassBV[SuperRegRC->EnumValue]) {
+ SubRegRC = SuperRegClassPair.first;
+ ChosenSuperRegClass = SuperRegRC;
+
+ // If SubRegRC is bigger than SuperRegRC then there are members of
+ // SubRegRC that don't have super registers via SubIdx. Keep looking to
+ // find a better fit and fall back on this one if there isn't one.
+ //
+ // This is intended to prevent X86 from making odd choices such as
+ // picking LOW32_ADDR_ACCESS_RBP instead of GR32 in the example above.
+ // LOW32_ADDR_ACCESS_RBP is a valid choice but contains registers that
+ // aren't subregisters of SuperRegRC whereas GR32 has a direct 1:1
+ // mapping.
+ if (SuperRegRC->getMembers().size() >= SubRegRC->getMembers().size())
+ return std::make_pair(ChosenSuperRegClass, SubRegRC);
+ }
+ }
+
+ // If we found a fit but it wasn't quite ideal because SubRegRC had excess
+ // registers, then we're done.
+ if (ChosenSuperRegClass)
+ return std::make_pair(ChosenSuperRegClass, SubRegRC);
+ }
+
+ return std::nullopt;
+}
+
+void CodeGenRegisterClass::getSuperRegClasses(const CodeGenSubRegIndex *SubIdx,
+ BitVector &Out) const {
+ auto FindI = SuperRegClasses.find(SubIdx);
+ if (FindI == SuperRegClasses.end())
+ return;
+ for (CodeGenRegisterClass *RC : FindI->second)
+ Out.set(RC->EnumValue);
+}
+
+// Populate a unique sorted list of units from a register set.
+void CodeGenRegisterClass::buildRegUnitSet(const CodeGenRegBank &RegBank,
+ std::vector<unsigned> &RegUnits) const {
+ std::vector<unsigned> TmpUnits;
+ for (RegUnitIterator UnitI(Members); UnitI.isValid(); ++UnitI) {
+ const RegUnit &RU = RegBank.getRegUnit(*UnitI);
+ if (!RU.Artificial)
+ TmpUnits.push_back(*UnitI);
+ }
+ llvm::sort(TmpUnits);
+ std::unique_copy(TmpUnits.begin(), TmpUnits.end(),
+ std::back_inserter(RegUnits));
+}
+
+//===----------------------------------------------------------------------===//
+// CodeGenRegisterCategory
+//===----------------------------------------------------------------------===//
+
+CodeGenRegisterCategory::CodeGenRegisterCategory(CodeGenRegBank &RegBank,
+ Record *R)
+ : TheDef(R), Name(std::string(R->getName())) {
+ for (Record *RegClass : R->getValueAsListOfDefs("Classes"))
+ Classes.push_back(RegBank.getRegClass(RegClass));
+}
+
+//===----------------------------------------------------------------------===//
+// CodeGenRegBank
+//===----------------------------------------------------------------------===//
+
+CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records,
+ const CodeGenHwModes &Modes) : CGH(Modes) {
+ // Configure register Sets to understand register classes and tuples.
+ Sets.addFieldExpander("RegisterClass", "MemberList");
+ Sets.addFieldExpander("CalleeSavedRegs", "SaveList");
+ Sets.addExpander("RegisterTuples",
+ std::make_unique<TupleExpander>(SynthDefs));
+
+ // Read in the user-defined (named) sub-register indices.
+ // More indices will be synthesized later.
+ std::vector<Record*> SRIs = Records.getAllDerivedDefinitions("SubRegIndex");
+ llvm::sort(SRIs, LessRecord());
+ for (unsigned i = 0, e = SRIs.size(); i != e; ++i)
+ getSubRegIdx(SRIs[i]);
+ // Build composite maps from ComposedOf fields.
+ for (auto &Idx : SubRegIndices)
+ Idx.updateComponents(*this);
+
+ // Read in the register definitions.
+ std::vector<Record*> Regs = Records.getAllDerivedDefinitions("Register");
+ llvm::sort(Regs, LessRecordRegister());
+ // Assign the enumeration values.
+ for (unsigned i = 0, e = Regs.size(); i != e; ++i)
+ getReg(Regs[i]);
+
+ // Expand tuples and number the new registers.
+ std::vector<Record*> Tups =
+ Records.getAllDerivedDefinitions("RegisterTuples");
+
+ for (Record *R : Tups) {
+ std::vector<Record *> TupRegs = *Sets.expand(R);
+ llvm::sort(TupRegs, LessRecordRegister());
+ for (Record *RC : TupRegs)
+ getReg(RC);
+ }
+
+ // Now all the registers are known. Build the object graph of explicit
+ // register-register references.
+ for (auto &Reg : Registers)
+ Reg.buildObjectGraph(*this);
+
+ // Compute register name map.
+ for (auto &Reg : Registers)
+ // FIXME: This could just be RegistersByName[name] = register, except that
+ // causes some failures in MIPS - perhaps they have duplicate register name
+ // entries? (or maybe there's a reason for it - I don't know much about this
+ // code, just drive-by refactoring)
+ RegistersByName.insert(
+ std::make_pair(Reg.TheDef->getValueAsString("AsmName"), &Reg));
+
+ // Precompute all sub-register maps.
+ // This will create Composite entries for all inferred sub-register indices.
+ for (auto &Reg : Registers)
+ Reg.computeSubRegs(*this);
+
+ // Compute transitive closure of subregister index ConcatenationOf vectors
+ // and initialize ConcatIdx map.
+ for (CodeGenSubRegIndex &SRI : SubRegIndices) {
+ SRI.computeConcatTransitiveClosure();
+ if (!SRI.ConcatenationOf.empty())
+ ConcatIdx.insert(std::make_pair(
+ SmallVector<CodeGenSubRegIndex*,8>(SRI.ConcatenationOf.begin(),
+ SRI.ConcatenationOf.end()), &SRI));
+ }
+
+ // Infer even more sub-registers by combining leading super-registers.
+ for (auto &Reg : Registers)
+ if (Reg.CoveredBySubRegs)
+ Reg.computeSecondarySubRegs(*this);
+
+ // After the sub-register graph is complete, compute the topologically
+ // ordered SuperRegs list.
+ for (auto &Reg : Registers)
+ Reg.computeSuperRegs(*this);
+
+ // For each pair of Reg:SR, if both are non-artificial, mark the
+ // corresponding sub-register index as non-artificial.
+ for (auto &Reg : Registers) {
+ if (Reg.Artificial)
+ continue;
+ for (auto P : Reg.getSubRegs()) {
+ const CodeGenRegister *SR = P.second;
+ if (!SR->Artificial)
+ P.first->Artificial = false;
+ }
+ }
+
+ // Native register units are associated with a leaf register. They've all been
+ // discovered now.
+ NumNativeRegUnits = RegUnits.size();
+
+ // Read in register class definitions.
+ std::vector<Record*> RCs = Records.getAllDerivedDefinitions("RegisterClass");
+ if (RCs.empty())
+ PrintFatalError("No 'RegisterClass' subclasses defined!");
+
+ // Allocate user-defined register classes.
+ for (auto *R : RCs) {
+ RegClasses.emplace_back(*this, R);
+ CodeGenRegisterClass &RC = RegClasses.back();
+ if (!RC.Artificial)
+ addToMaps(&RC);
+ }
+
+ // Infer missing classes to create a full algebra.
+ computeInferredRegisterClasses();
+
+ // Order register classes topologically and assign enum values.
+ RegClasses.sort(TopoOrderRC);
+ unsigned i = 0;
+ for (auto &RC : RegClasses)
+ RC.EnumValue = i++;
+ CodeGenRegisterClass::computeSubClasses(*this);
+
+ // Read in the register category definitions.
+ std::vector<Record *> RCats =
+ Records.getAllDerivedDefinitions("RegisterCategory");
+ for (auto *R : RCats)
+ RegCategories.emplace_back(*this, R);
+}
+
+// Create a synthetic CodeGenSubRegIndex without a corresponding Record.
+CodeGenSubRegIndex*
+CodeGenRegBank::createSubRegIndex(StringRef Name, StringRef Namespace) {
+ SubRegIndices.emplace_back(Name, Namespace, SubRegIndices.size() + 1);
+ return &SubRegIndices.back();
+}
+
+CodeGenSubRegIndex *CodeGenRegBank::getSubRegIdx(Record *Def) {
+ CodeGenSubRegIndex *&Idx = Def2SubRegIdx[Def];
+ if (Idx)
+ return Idx;
+ SubRegIndices.emplace_back(Def, SubRegIndices.size() + 1);
+ Idx = &SubRegIndices.back();
+ return Idx;
+}
+
+const CodeGenSubRegIndex *
+CodeGenRegBank::findSubRegIdx(const Record* Def) const {
+ return Def2SubRegIdx.lookup(Def);
+}
+
+CodeGenRegister *CodeGenRegBank::getReg(Record *Def) {
+ CodeGenRegister *&Reg = Def2Reg[Def];
+ if (Reg)
+ return Reg;
+ Registers.emplace_back(Def, Registers.size() + 1);
+ Reg = &Registers.back();
+ return Reg;
+}
+
+void CodeGenRegBank::addToMaps(CodeGenRegisterClass *RC) {
+ if (Record *Def = RC->getDef())
+ Def2RC.insert(std::make_pair(Def, RC));
+
+ // Duplicate classes are rejected by insert().
+ // That's OK, we only care about the properties handled by CGRC::Key.
+ CodeGenRegisterClass::Key K(*RC);
+ Key2RC.insert(std::make_pair(K, RC));
+}
+
+// Create a synthetic sub-class if it is missing.
+CodeGenRegisterClass*
+CodeGenRegBank::getOrCreateSubClass(const CodeGenRegisterClass *RC,
+ const CodeGenRegister::Vec *Members,
+ StringRef Name) {
+ // Synthetic sub-class has the same size and alignment as RC.
+ CodeGenRegisterClass::Key K(Members, RC->RSI);
+ RCKeyMap::const_iterator FoundI = Key2RC.find(K);
+ if (FoundI != Key2RC.end())
+ return FoundI->second;
+
+ // Sub-class doesn't exist, create a new one.
+ RegClasses.emplace_back(*this, Name, K);
+ addToMaps(&RegClasses.back());
+ return &RegClasses.back();
+}
+
+CodeGenRegisterClass *CodeGenRegBank::getRegClass(const Record *Def) const {
+ if (CodeGenRegisterClass *RC = Def2RC.lookup(Def))
+ return RC;
+
+ PrintFatalError(Def->getLoc(), "Not a known RegisterClass!");
+}
+
+CodeGenSubRegIndex*
+CodeGenRegBank::getCompositeSubRegIndex(CodeGenSubRegIndex *A,
+ CodeGenSubRegIndex *B) {
+ // Look for an existing entry.
+ CodeGenSubRegIndex *Comp = A->compose(B);
+ if (Comp)
+ return Comp;
+
+ // None exists, synthesize one.
+ std::string Name = A->getName() + "_then_" + B->getName();
+ Comp = createSubRegIndex(Name, A->getNamespace());
+ A->addComposite(B, Comp);
+ return Comp;
+}
+
+CodeGenSubRegIndex *CodeGenRegBank::
+getConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex *, 8> &Parts) {
+ assert(Parts.size() > 1 && "Need two parts to concatenate");
+#ifndef NDEBUG
+ for (CodeGenSubRegIndex *Idx : Parts) {
+ assert(Idx->ConcatenationOf.empty() && "No transitive closure?");
+ }
+#endif
+
+ // Look for an existing entry.
+ CodeGenSubRegIndex *&Idx = ConcatIdx[Parts];
+ if (Idx)
+ return Idx;
+
+ // None exists, synthesize one.
+ std::string Name = Parts.front()->getName();
+ // Determine whether all parts are contiguous.
+ bool isContinuous = true;
+ unsigned Size = Parts.front()->Size;
+ unsigned LastOffset = Parts.front()->Offset;
+ unsigned LastSize = Parts.front()->Size;
+ unsigned UnknownSize = (uint16_t)-1;
+ for (unsigned i = 1, e = Parts.size(); i != e; ++i) {
+ Name += '_';
+ Name += Parts[i]->getName();
+ if (Size == UnknownSize || Parts[i]->Size == UnknownSize)
+ Size = UnknownSize;
+ else
+ Size += Parts[i]->Size;
+ if (LastSize == UnknownSize || Parts[i]->Offset != (LastOffset + LastSize))
+ isContinuous = false;
+ LastOffset = Parts[i]->Offset;
+ LastSize = Parts[i]->Size;
+ }
+ Idx = createSubRegIndex(Name, Parts.front()->getNamespace());
+ Idx->Size = Size;
+ Idx->Offset = isContinuous ? Parts.front()->Offset : -1;
+ Idx->ConcatenationOf.assign(Parts.begin(), Parts.end());
+ return Idx;
+}
+
+void CodeGenRegBank::computeComposites() {
+ using RegMap = std::map<const CodeGenRegister*, const CodeGenRegister*>;
+
+ // Subreg -> { Reg->Reg }, where the right-hand side is the mapping from
+ // register to (sub)register associated with the action of the left-hand
+ // side subregister.
+ std::map<const CodeGenSubRegIndex*, RegMap> SubRegAction;
+ for (const CodeGenRegister &R : Registers) {
+ const CodeGenRegister::SubRegMap &SM = R.getSubRegs();
+ for (std::pair<const CodeGenSubRegIndex*, const CodeGenRegister*> P : SM)
+ SubRegAction[P.first].insert({&R, P.second});
+ }
+
+ // Calculate the composition of two subregisters as compositions of their
+ // associated actions.
+ auto compose = [&SubRegAction] (const CodeGenSubRegIndex *Sub1,
+ const CodeGenSubRegIndex *Sub2) {
+ RegMap C;
+ const RegMap &Img1 = SubRegAction.at(Sub1);
+ const RegMap &Img2 = SubRegAction.at(Sub2);
+ for (std::pair<const CodeGenRegister*, const CodeGenRegister*> P : Img1) {
+ auto F = Img2.find(P.second);
+ if (F != Img2.end())
+ C.insert({P.first, F->second});
+ }
+ return C;
+ };
+
+ // Check if the two maps agree on the intersection of their domains.
+ auto agree = [] (const RegMap &Map1, const RegMap &Map2) {
+ // Technically speaking, an empty map agrees with any other map, but
+ // this could flag false positives. We're interested in non-vacuous
+ // agreements.
+ if (Map1.empty() || Map2.empty())
+ return false;
+ for (std::pair<const CodeGenRegister*, const CodeGenRegister*> P : Map1) {
+ auto F = Map2.find(P.first);
+ if (F == Map2.end() || P.second != F->second)
+ return false;
+ }
+ return true;
+ };
+
+ using CompositePair = std::pair<const CodeGenSubRegIndex*,
+ const CodeGenSubRegIndex*>;
+ SmallSet<CompositePair,4> UserDefined;
+ for (const CodeGenSubRegIndex &Idx : SubRegIndices)
+ for (auto P : Idx.getComposites())
+ UserDefined.insert(std::make_pair(&Idx, P.first));
+
+ // Keep track of TopoSigs visited. We only need to visit each TopoSig once,
+ // and many registers will share TopoSigs on regular architectures.
+ BitVector TopoSigs(getNumTopoSigs());
+
+ for (const auto &Reg1 : Registers) {
+ // Skip identical subreg structures already processed.
+ if (TopoSigs.test(Reg1.getTopoSig()))
+ continue;
+ TopoSigs.set(Reg1.getTopoSig());
+
+ const CodeGenRegister::SubRegMap &SRM1 = Reg1.getSubRegs();
+ for (auto I1 : SRM1) {
+ CodeGenSubRegIndex *Idx1 = I1.first;
+ CodeGenRegister *Reg2 = I1.second;
+ // Ignore identity compositions.
+ if (&Reg1 == Reg2)
+ continue;
+ const CodeGenRegister::SubRegMap &SRM2 = Reg2->getSubRegs();
+ // Try composing Idx1 with another SubRegIndex.
+ for (auto I2 : SRM2) {
+ CodeGenSubRegIndex *Idx2 = I2.first;
+ CodeGenRegister *Reg3 = I2.second;
+ // Ignore identity compositions.
+ if (Reg2 == Reg3)
+ continue;
+ // OK Reg1:IdxPair == Reg3. Find the index with Reg:Idx == Reg3.
+ CodeGenSubRegIndex *Idx3 = Reg1.getSubRegIndex(Reg3);
+ assert(Idx3 && "Sub-register doesn't have an index");
+
+ // Conflicting composition? Emit a warning but allow it.
+ if (CodeGenSubRegIndex *Prev = Idx1->addComposite(Idx2, Idx3)) {
+ // If the composition was not user-defined, always emit a warning.
+ if (!UserDefined.count({Idx1, Idx2}) ||
+ agree(compose(Idx1, Idx2), SubRegAction.at(Idx3)))
+ PrintWarning(Twine("SubRegIndex ") + Idx1->getQualifiedName() +
+ " and " + Idx2->getQualifiedName() +
+ " compose ambiguously as " + Prev->getQualifiedName() +
+ " or " + Idx3->getQualifiedName());
+ }
+ }
+ }
+ }
+}
+
+// Compute lane masks. This is similar to register units, but at the
+// sub-register index level. Each bit in the lane mask is like a register unit
+// class, and two lane masks will have a bit in common if two sub-register
+// indices overlap in some register.
+//
+// Conservatively share a lane mask bit if two sub-register indices overlap in
+// some registers, but not in others. That shouldn't happen a lot.
+void CodeGenRegBank::computeSubRegLaneMasks() {
+ // First assign individual bits to all the leaf indices.
+ unsigned Bit = 0;
+ // Determine mask of lanes that cover their registers.
+ CoveringLanes = LaneBitmask::getAll();
+ for (auto &Idx : SubRegIndices) {
+ if (Idx.getComposites().empty()) {
+ if (Bit > LaneBitmask::BitWidth) {
+ PrintFatalError(
+ Twine("Ran out of lanemask bits to represent subregister ")
+ + Idx.getName());
+ }
+ Idx.LaneMask = LaneBitmask::getLane(Bit);
+ ++Bit;
+ } else {
+ Idx.LaneMask = LaneBitmask::getNone();
+ }
+ }
+
+ // Compute transformation sequences for composeSubRegIndexLaneMask. The idea
+ // here is that for each possible target subregister we look at the leafs
+ // in the subregister graph that compose for this target and create
+ // transformation sequences for the lanemasks. Each step in the sequence
+ // consists of a bitmask and a bitrotate operation. As the rotation amounts
+ // are usually the same for many subregisters we can easily combine the steps
+ // by combining the masks.
+ for (const auto &Idx : SubRegIndices) {
+ const auto &Composites = Idx.getComposites();
+ auto &LaneTransforms = Idx.CompositionLaneMaskTransform;
+
+ if (Composites.empty()) {
+ // Moving from a class with no subregisters we just had a single lane:
+ // The subregister must be a leaf subregister and only occupies 1 bit.
+ // Move the bit from the class without subregisters into that position.
+ unsigned DstBit = Idx.LaneMask.getHighestLane();
+ assert(Idx.LaneMask == LaneBitmask::getLane(DstBit) &&
+ "Must be a leaf subregister");
+ MaskRolPair MaskRol = { LaneBitmask::getLane(0), (uint8_t)DstBit };
+ LaneTransforms.push_back(MaskRol);
+ } else {
+ // Go through all leaf subregisters and find the ones that compose with
+ // Idx. These make out all possible valid bits in the lane mask we want to
+ // transform. Looking only at the leafs ensure that only a single bit in
+ // the mask is set.
+ unsigned NextBit = 0;
+ for (auto &Idx2 : SubRegIndices) {
+ // Skip non-leaf subregisters.
+ if (!Idx2.getComposites().empty())
+ continue;
+ // Replicate the behaviour from the lane mask generation loop above.
+ unsigned SrcBit = NextBit;
+ LaneBitmask SrcMask = LaneBitmask::getLane(SrcBit);
+ if (NextBit < LaneBitmask::BitWidth-1)
+ ++NextBit;
+ assert(Idx2.LaneMask == SrcMask);
+
+ // Get the composed subregister if there is any.
+ auto C = Composites.find(&Idx2);
+ if (C == Composites.end())
+ continue;
+ const CodeGenSubRegIndex *Composite = C->second;
+ // The Composed subreg should be a leaf subreg too
+ assert(Composite->getComposites().empty());
+
+ // Create Mask+Rotate operation and merge with existing ops if possible.
+ unsigned DstBit = Composite->LaneMask.getHighestLane();
+ int Shift = DstBit - SrcBit;
+ uint8_t RotateLeft = Shift >= 0 ? (uint8_t)Shift
+ : LaneBitmask::BitWidth + Shift;
+ for (auto &I : LaneTransforms) {
+ if (I.RotateLeft == RotateLeft) {
+ I.Mask |= SrcMask;
+ SrcMask = LaneBitmask::getNone();
+ }
+ }
+ if (SrcMask.any()) {
+ MaskRolPair MaskRol = { SrcMask, RotateLeft };
+ LaneTransforms.push_back(MaskRol);
+ }
+ }
+ }
+
+ // Optimize if the transformation consists of one step only: Set mask to
+ // 0xffffffff (including some irrelevant invalid bits) so that it should
+ // merge with more entries later while compressing the table.
+ if (LaneTransforms.size() == 1)
+ LaneTransforms[0].Mask = LaneBitmask::getAll();
+
+ // Further compression optimization: For invalid compositions resulting
+ // in a sequence with 0 entries we can just pick any other. Choose
+ // Mask 0xffffffff with Rotation 0.
+ if (LaneTransforms.size() == 0) {
+ MaskRolPair P = { LaneBitmask::getAll(), 0 };
+ LaneTransforms.push_back(P);
+ }
+ }
+
+ // FIXME: What if ad-hoc aliasing introduces overlaps that aren't represented
+ // by the sub-register graph? This doesn't occur in any known targets.
+
+ // Inherit lanes from composites.
+ for (const auto &Idx : SubRegIndices) {
+ LaneBitmask Mask = Idx.computeLaneMask();
+ // If some super-registers without CoveredBySubRegs use this index, we can
+ // no longer assume that the lanes are covering their registers.
+ if (!Idx.AllSuperRegsCovered)
+ CoveringLanes &= ~Mask;
+ }
+
+ // Compute lane mask combinations for register classes.
+ for (auto &RegClass : RegClasses) {
+ LaneBitmask LaneMask;
+ for (const auto &SubRegIndex : SubRegIndices) {
+ if (RegClass.getSubClassWithSubReg(&SubRegIndex) == nullptr)
+ continue;
+ LaneMask |= SubRegIndex.LaneMask;
+ }
+
+ // For classes without any subregisters set LaneMask to 1 instead of 0.
+ // This makes it easier for client code to handle classes uniformly.
+ if (LaneMask.none())
+ LaneMask = LaneBitmask::getLane(0);
+
+ RegClass.LaneMask = LaneMask;
+ }
+}
+
+namespace {
+
+// UberRegSet is a helper class for computeRegUnitWeights. Each UberRegSet is
+// the transitive closure of the union of overlapping register
+// classes. Together, the UberRegSets form a partition of the registers. If we
+// consider overlapping register classes to be connected, then each UberRegSet
+// is a set of connected components.
+//
+// An UberRegSet will likely be a horizontal slice of register names of
+// the same width. Nontrivial subregisters should then be in a separate
+// UberRegSet. But this property isn't required for valid computation of
+// register unit weights.
+//
+// A Weight field caches the max per-register unit weight in each UberRegSet.
+//
+// A set of SingularDeterminants flags single units of some register in this set
+// for which the unit weight equals the set weight. These units should not have
+// their weight increased.
+struct UberRegSet {
+ CodeGenRegister::Vec Regs;
+ unsigned Weight = 0;
+ CodeGenRegister::RegUnitList SingularDeterminants;
+
+ UberRegSet() = default;
+};
+
+} // end anonymous namespace
+
+// Partition registers into UberRegSets, where each set is the transitive
+// closure of the union of overlapping register classes.
+//
+// UberRegSets[0] is a special non-allocatable set.
+static void computeUberSets(std::vector<UberRegSet> &UberSets,
+ std::vector<UberRegSet*> &RegSets,
+ CodeGenRegBank &RegBank) {
+ const auto &Registers = RegBank.getRegisters();
+
+ // The Register EnumValue is one greater than its index into Registers.
+ assert(Registers.size() == Registers.back().EnumValue &&
+ "register enum value mismatch");
+
+ // For simplicitly make the SetID the same as EnumValue.
+ IntEqClasses UberSetIDs(Registers.size()+1);
+ std::set<unsigned> AllocatableRegs;
+ for (auto &RegClass : RegBank.getRegClasses()) {
+ if (!RegClass.Allocatable)
+ continue;
+
+ const CodeGenRegister::Vec &Regs = RegClass.getMembers();
+ if (Regs.empty())
+ continue;
+
+ unsigned USetID = UberSetIDs.findLeader((*Regs.begin())->EnumValue);
+ assert(USetID && "register number 0 is invalid");
+
+ AllocatableRegs.insert((*Regs.begin())->EnumValue);
+ for (const CodeGenRegister *CGR : llvm::drop_begin(Regs)) {
+ AllocatableRegs.insert(CGR->EnumValue);
+ UberSetIDs.join(USetID, CGR->EnumValue);
+ }
+ }
+ // Combine non-allocatable regs.
+ for (const auto &Reg : Registers) {
+ unsigned RegNum = Reg.EnumValue;
+ if (AllocatableRegs.count(RegNum))
+ continue;
+
+ UberSetIDs.join(0, RegNum);
+ }
+ UberSetIDs.compress();
+
+ // Make the first UberSet a special unallocatable set.
+ unsigned ZeroID = UberSetIDs[0];
+
+ // Insert Registers into the UberSets formed by union-find.
+ // Do not resize after this.
+ UberSets.resize(UberSetIDs.getNumClasses());
+ unsigned i = 0;
+ for (const CodeGenRegister &Reg : Registers) {
+ unsigned USetID = UberSetIDs[Reg.EnumValue];
+ if (!USetID)
+ USetID = ZeroID;
+ else if (USetID == ZeroID)
+ USetID = 0;
+
+ UberRegSet *USet = &UberSets[USetID];
+ USet->Regs.push_back(&Reg);
+ sortAndUniqueRegisters(USet->Regs);
+ RegSets[i++] = USet;
+ }
+}
+
+// Recompute each UberSet weight after changing unit weights.
+static void computeUberWeights(std::vector<UberRegSet> &UberSets,
+ CodeGenRegBank &RegBank) {
+ // Skip the first unallocatable set.
+ for (std::vector<UberRegSet>::iterator I = std::next(UberSets.begin()),
+ E = UberSets.end(); I != E; ++I) {
+
+ // Initialize all unit weights in this set, and remember the max units/reg.
+ const CodeGenRegister *Reg = nullptr;
+ unsigned MaxWeight = 0, Weight = 0;
+ for (RegUnitIterator UnitI(I->Regs); UnitI.isValid(); ++UnitI) {
+ if (Reg != UnitI.getReg()) {
+ if (Weight > MaxWeight)
+ MaxWeight = Weight;
+ Reg = UnitI.getReg();
+ Weight = 0;
+ }
+ if (!RegBank.getRegUnit(*UnitI).Artificial) {
+ unsigned UWeight = RegBank.getRegUnit(*UnitI).Weight;
+ if (!UWeight) {
+ UWeight = 1;
+ RegBank.increaseRegUnitWeight(*UnitI, UWeight);
+ }
+ Weight += UWeight;
+ }
+ }
+ if (Weight > MaxWeight)
+ MaxWeight = Weight;
+ if (I->Weight != MaxWeight) {
+ LLVM_DEBUG(dbgs() << "UberSet " << I - UberSets.begin() << " Weight "
+ << MaxWeight;
+ for (auto &Unit
+ : I->Regs) dbgs()
+ << " " << Unit->getName();
+ dbgs() << "\n");
+ // Update the set weight.
+ I->Weight = MaxWeight;
+ }
+
+ // Find singular determinants.
+ for (const auto R : I->Regs) {
+ if (R->getRegUnits().count() == 1 && R->getWeight(RegBank) == I->Weight) {
+ I->SingularDeterminants |= R->getRegUnits();
+ }
+ }
+ }
+}
+
+// normalizeWeight is a computeRegUnitWeights helper that adjusts the weight of
+// a register and its subregisters so that they have the same weight as their
+// UberSet. Self-recursion processes the subregister tree in postorder so
+// subregisters are normalized first.
+//
+// Side effects:
+// - creates new adopted register units
+// - causes superregisters to inherit adopted units
+// - increases the weight of "singular" units
+// - induces recomputation of UberWeights.
+static bool normalizeWeight(CodeGenRegister *Reg,
+ std::vector<UberRegSet> &UberSets,
+ std::vector<UberRegSet*> &RegSets,
+ BitVector &NormalRegs,
+ CodeGenRegister::RegUnitList &NormalUnits,
+ CodeGenRegBank &RegBank) {
+ NormalRegs.resize(std::max(Reg->EnumValue + 1, NormalRegs.size()));
+ if (NormalRegs.test(Reg->EnumValue))
+ return false;
+ NormalRegs.set(Reg->EnumValue);
+
+ bool Changed = false;
+ const CodeGenRegister::SubRegMap &SRM = Reg->getSubRegs();
+ for (auto SRI : SRM) {
+ if (SRI.second == Reg)
+ continue; // self-cycles happen
+
+ Changed |= normalizeWeight(SRI.second, UberSets, RegSets, NormalRegs,
+ NormalUnits, RegBank);
+ }
+ // Postorder register normalization.
+
+ // Inherit register units newly adopted by subregisters.
+ if (Reg->inheritRegUnits(RegBank))
+ computeUberWeights(UberSets, RegBank);
+
+ // Check if this register is too skinny for its UberRegSet.
+ UberRegSet *UberSet = RegSets[RegBank.getRegIndex(Reg)];
+
+ unsigned RegWeight = Reg->getWeight(RegBank);
+ if (UberSet->Weight > RegWeight) {
+ // A register unit's weight can be adjusted only if it is the singular unit
+ // for this register, has not been used to normalize a subregister's set,
+ // and has not already been used to singularly determine this UberRegSet.
+ unsigned AdjustUnit = *Reg->getRegUnits().begin();
+ if (Reg->getRegUnits().count() != 1
+ || hasRegUnit(NormalUnits, AdjustUnit)
+ || hasRegUnit(UberSet->SingularDeterminants, AdjustUnit)) {
+ // We don't have an adjustable unit, so adopt a new one.
+ AdjustUnit = RegBank.newRegUnit(UberSet->Weight - RegWeight);
+ Reg->adoptRegUnit(AdjustUnit);
+ // Adopting a unit does not immediately require recomputing set weights.
+ }
+ else {
+ // Adjust the existing single unit.
+ if (!RegBank.getRegUnit(AdjustUnit).Artificial)
+ RegBank.increaseRegUnitWeight(AdjustUnit, UberSet->Weight - RegWeight);
+ // The unit may be shared among sets and registers within this set.
+ computeUberWeights(UberSets, RegBank);
+ }
+ Changed = true;
+ }
+
+ // Mark these units normalized so superregisters can't change their weights.
+ NormalUnits |= Reg->getRegUnits();
+
+ return Changed;
+}
+
+// Compute a weight for each register unit created during getSubRegs.
+//
+// The goal is that two registers in the same class will have the same weight,
+// where each register's weight is defined as sum of its units' weights.
+void CodeGenRegBank::computeRegUnitWeights() {
+ std::vector<UberRegSet> UberSets;
+ std::vector<UberRegSet*> RegSets(Registers.size());
+ computeUberSets(UberSets, RegSets, *this);
+ // UberSets and RegSets are now immutable.
+
+ computeUberWeights(UberSets, *this);
+
+ // Iterate over each Register, normalizing the unit weights until reaching
+ // a fix point.
+ unsigned NumIters = 0;
+ for (bool Changed = true; Changed; ++NumIters) {
+ assert(NumIters <= NumNativeRegUnits && "Runaway register unit weights");
+ (void) NumIters;
+ Changed = false;
+ for (auto &Reg : Registers) {
+ CodeGenRegister::RegUnitList NormalUnits;
+ BitVector NormalRegs;
+ Changed |= normalizeWeight(&Reg, UberSets, RegSets, NormalRegs,
+ NormalUnits, *this);
+ }
+ }
+}
+
+// Find a set in UniqueSets with the same elements as Set.
+// Return an iterator into UniqueSets.
+static std::vector<RegUnitSet>::const_iterator
+findRegUnitSet(const std::vector<RegUnitSet> &UniqueSets,
+ const RegUnitSet &Set) {
+ std::vector<RegUnitSet>::const_iterator
+ I = UniqueSets.begin(), E = UniqueSets.end();
+ for(;I != E; ++I) {
+ if (I->Units == Set.Units)
+ break;
+ }
+ return I;
+}
+
+// Return true if the RUSubSet is a subset of RUSuperSet.
+static bool isRegUnitSubSet(const std::vector<unsigned> &RUSubSet,
+ const std::vector<unsigned> &RUSuperSet) {
+ return std::includes(RUSuperSet.begin(), RUSuperSet.end(),
+ RUSubSet.begin(), RUSubSet.end());
+}
+
+/// Iteratively prune unit sets. Prune subsets that are close to the superset,
+/// but with one or two registers removed. We occasionally have registers like
+/// APSR and PC thrown in with the general registers. We also see many
+/// special-purpose register subsets, such as tail-call and Thumb
+/// encodings. Generating all possible overlapping sets is combinatorial and
+/// overkill for modeling pressure. Ideally we could fix this statically in
+/// tablegen by (1) having the target define register classes that only include
+/// the allocatable registers and marking other classes as non-allocatable and
+/// (2) having a way to mark special purpose classes as "don't-care" classes for
+/// the purpose of pressure. However, we make an attempt to handle targets that
+/// are not nicely defined by merging nearly identical register unit sets
+/// statically. This generates smaller tables. Then, dynamically, we adjust the
+/// set limit by filtering the reserved registers.
+///
+/// Merge sets only if the units have the same weight. For example, on ARM,
+/// Q-tuples with ssub index 0 include all S regs but also include D16+. We
+/// should not expand the S set to include D regs.
+void CodeGenRegBank::pruneUnitSets() {
+ assert(RegClassUnitSets.empty() && "this invalidates RegClassUnitSets");
+
+ // Form an equivalence class of UnitSets with no significant difference.
+ std::vector<unsigned> SuperSetIDs;
+ for (unsigned SubIdx = 0, EndIdx = RegUnitSets.size();
+ SubIdx != EndIdx; ++SubIdx) {
+ const RegUnitSet &SubSet = RegUnitSets[SubIdx];
+ unsigned SuperIdx = 0;
+ for (; SuperIdx != EndIdx; ++SuperIdx) {
+ if (SuperIdx == SubIdx)
+ continue;
+
+ unsigned UnitWeight = RegUnits[SubSet.Units[0]].Weight;
+ const RegUnitSet &SuperSet = RegUnitSets[SuperIdx];
+ if (isRegUnitSubSet(SubSet.Units, SuperSet.Units)
+ && (SubSet.Units.size() + 3 > SuperSet.Units.size())
+ && UnitWeight == RegUnits[SuperSet.Units[0]].Weight
+ && UnitWeight == RegUnits[SuperSet.Units.back()].Weight) {
+ LLVM_DEBUG(dbgs() << "UnitSet " << SubIdx << " subsumed by " << SuperIdx
+ << "\n");
+ // We can pick any of the set names for the merged set. Go for the
+ // shortest one to avoid picking the name of one of the classes that are
+ // artificially created by tablegen. So "FPR128_lo" instead of
+ // "QQQQ_with_qsub3_in_FPR128_lo".
+ if (RegUnitSets[SubIdx].Name.size() < RegUnitSets[SuperIdx].Name.size())
+ RegUnitSets[SuperIdx].Name = RegUnitSets[SubIdx].Name;
+ break;
+ }
+ }
+ if (SuperIdx == EndIdx)
+ SuperSetIDs.push_back(SubIdx);
+ }
+ // Populate PrunedUnitSets with each equivalence class's superset.
+ std::vector<RegUnitSet> PrunedUnitSets(SuperSetIDs.size());
+ for (unsigned i = 0, e = SuperSetIDs.size(); i != e; ++i) {
+ unsigned SuperIdx = SuperSetIDs[i];
+ PrunedUnitSets[i].Name = RegUnitSets[SuperIdx].Name;
+ PrunedUnitSets[i].Units.swap(RegUnitSets[SuperIdx].Units);
+ }
+ RegUnitSets.swap(PrunedUnitSets);
+}
+
+// Create a RegUnitSet for each RegClass that contains all units in the class
+// including adopted units that are necessary to model register pressure. Then
+// iteratively compute RegUnitSets such that the union of any two overlapping
+// RegUnitSets is repreresented.
+//
+// RegisterInfoEmitter will map each RegClass to its RegUnitClass and any
+// RegUnitSet that is a superset of that RegUnitClass.
+void CodeGenRegBank::computeRegUnitSets() {
+ assert(RegUnitSets.empty() && "dirty RegUnitSets");
+
+ // Compute a unique RegUnitSet for each RegClass.
+ auto &RegClasses = getRegClasses();
+ for (auto &RC : RegClasses) {
+ if (!RC.Allocatable || RC.Artificial || !RC.GeneratePressureSet)
+ continue;
+
+ // Speculatively grow the RegUnitSets to hold the new set.
+ RegUnitSets.resize(RegUnitSets.size() + 1);
+ RegUnitSets.back().Name = RC.getName();
+
+ // Compute a sorted list of units in this class.
+ RC.buildRegUnitSet(*this, RegUnitSets.back().Units);
+
+ // Find an existing RegUnitSet.
+ std::vector<RegUnitSet>::const_iterator SetI =
+ findRegUnitSet(RegUnitSets, RegUnitSets.back());
+ if (SetI != std::prev(RegUnitSets.end()))
+ RegUnitSets.pop_back();
+ }
+
+ if (RegUnitSets.empty())
+ PrintFatalError("RegUnitSets cannot be empty!");
+
+ LLVM_DEBUG(dbgs() << "\nBefore pruning:\n"; for (unsigned USIdx = 0,
+ USEnd = RegUnitSets.size();
+ USIdx < USEnd; ++USIdx) {
+ dbgs() << "UnitSet " << USIdx << " " << RegUnitSets[USIdx].Name << ":";
+ for (auto &U : RegUnitSets[USIdx].Units)
+ printRegUnitName(U);
+ dbgs() << "\n";
+ });
+
+ // Iteratively prune unit sets.
+ pruneUnitSets();
+
+ LLVM_DEBUG(dbgs() << "\nBefore union:\n"; for (unsigned USIdx = 0,
+ USEnd = RegUnitSets.size();
+ USIdx < USEnd; ++USIdx) {
+ dbgs() << "UnitSet " << USIdx << " " << RegUnitSets[USIdx].Name << ":";
+ for (auto &U : RegUnitSets[USIdx].Units)
+ printRegUnitName(U);
+ dbgs() << "\n";
+ } dbgs() << "\nUnion sets:\n");
+
+ // Iterate over all unit sets, including new ones added by this loop.
+ unsigned NumRegUnitSubSets = RegUnitSets.size();
+ for (unsigned Idx = 0, EndIdx = RegUnitSets.size(); Idx != EndIdx; ++Idx) {
+ // In theory, this is combinatorial. In practice, it needs to be bounded
+ // by a small number of sets for regpressure to be efficient.
+ // If the assert is hit, we need to implement pruning.
+ assert(Idx < (2*NumRegUnitSubSets) && "runaway unit set inference");
+
+ // Compare new sets with all original classes.
+ for (unsigned SearchIdx = (Idx >= NumRegUnitSubSets) ? 0 : Idx+1;
+ SearchIdx != EndIdx; ++SearchIdx) {
+ std::set<unsigned> Intersection;
+ std::set_intersection(RegUnitSets[Idx].Units.begin(),
+ RegUnitSets[Idx].Units.end(),
+ RegUnitSets[SearchIdx].Units.begin(),
+ RegUnitSets[SearchIdx].Units.end(),
+ std::inserter(Intersection, Intersection.begin()));
+ if (Intersection.empty())
+ continue;
+
+ // Speculatively grow the RegUnitSets to hold the new set.
+ RegUnitSets.resize(RegUnitSets.size() + 1);
+ RegUnitSets.back().Name =
+ RegUnitSets[Idx].Name + "_with_" + RegUnitSets[SearchIdx].Name;
+
+ std::set_union(RegUnitSets[Idx].Units.begin(),
+ RegUnitSets[Idx].Units.end(),
+ RegUnitSets[SearchIdx].Units.begin(),
+ RegUnitSets[SearchIdx].Units.end(),
+ std::inserter(RegUnitSets.back().Units,
+ RegUnitSets.back().Units.begin()));
+
+ // Find an existing RegUnitSet, or add the union to the unique sets.
+ std::vector<RegUnitSet>::const_iterator SetI =
+ findRegUnitSet(RegUnitSets, RegUnitSets.back());
+ if (SetI != std::prev(RegUnitSets.end()))
+ RegUnitSets.pop_back();
+ else {
+ LLVM_DEBUG(dbgs() << "UnitSet " << RegUnitSets.size() - 1 << " "
+ << RegUnitSets.back().Name << ":";
+ for (auto &U
+ : RegUnitSets.back().Units) printRegUnitName(U);
+ dbgs() << "\n";);
+ }
+ }
+ }
+
+ // Iteratively prune unit sets after inferring supersets.
+ pruneUnitSets();
+
+ LLVM_DEBUG(
+ dbgs() << "\n"; for (unsigned USIdx = 0, USEnd = RegUnitSets.size();
+ USIdx < USEnd; ++USIdx) {
+ dbgs() << "UnitSet " << USIdx << " " << RegUnitSets[USIdx].Name << ":";
+ for (auto &U : RegUnitSets[USIdx].Units)
+ printRegUnitName(U);
+ dbgs() << "\n";
+ });
+
+ // For each register class, list the UnitSets that are supersets.
+ RegClassUnitSets.resize(RegClasses.size());
+ int RCIdx = -1;
+ for (auto &RC : RegClasses) {
+ ++RCIdx;
+ if (!RC.Allocatable)
+ continue;
+
+ // Recompute the sorted list of units in this class.
+ std::vector<unsigned> RCRegUnits;
+ RC.buildRegUnitSet(*this, RCRegUnits);
+
+ // Don't increase pressure for unallocatable regclasses.
+ if (RCRegUnits.empty())
+ continue;
+
+ LLVM_DEBUG(dbgs() << "RC " << RC.getName() << " Units:\n";
+ for (auto U
+ : RCRegUnits) printRegUnitName(U);
+ dbgs() << "\n UnitSetIDs:");
+
+ // Find all supersets.
+ for (unsigned USIdx = 0, USEnd = RegUnitSets.size();
+ USIdx != USEnd; ++USIdx) {
+ if (isRegUnitSubSet(RCRegUnits, RegUnitSets[USIdx].Units)) {
+ LLVM_DEBUG(dbgs() << " " << USIdx);
+ RegClassUnitSets[RCIdx].push_back(USIdx);
+ }
+ }
+ LLVM_DEBUG(dbgs() << "\n");
+ assert((!RegClassUnitSets[RCIdx].empty() || !RC.GeneratePressureSet) &&
+ "missing unit set for regclass");
+ }
+
+ // For each register unit, ensure that we have the list of UnitSets that
+ // contain the unit. Normally, this matches an existing list of UnitSets for a
+ // register class. If not, we create a new entry in RegClassUnitSets as a
+ // "fake" register class.
+ for (unsigned UnitIdx = 0, UnitEnd = NumNativeRegUnits;
+ UnitIdx < UnitEnd; ++UnitIdx) {
+ std::vector<unsigned> RUSets;
+ for (unsigned i = 0, e = RegUnitSets.size(); i != e; ++i) {
+ RegUnitSet &RUSet = RegUnitSets[i];
+ if (!is_contained(RUSet.Units, UnitIdx))
+ continue;
+ RUSets.push_back(i);
+ }
+ unsigned RCUnitSetsIdx = 0;
+ for (unsigned e = RegClassUnitSets.size();
+ RCUnitSetsIdx != e; ++RCUnitSetsIdx) {
+ if (RegClassUnitSets[RCUnitSetsIdx] == RUSets) {
+ break;
+ }
+ }
+ RegUnits[UnitIdx].RegClassUnitSetsIdx = RCUnitSetsIdx;
+ if (RCUnitSetsIdx == RegClassUnitSets.size()) {
+ // Create a new list of UnitSets as a "fake" register class.
+ RegClassUnitSets.resize(RCUnitSetsIdx + 1);
+ RegClassUnitSets[RCUnitSetsIdx].swap(RUSets);
+ }
+ }
+}
+
+void CodeGenRegBank::computeRegUnitLaneMasks() {
+ for (auto &Register : Registers) {
+ // Create an initial lane mask for all register units.
+ const auto &RegUnits = Register.getRegUnits();
+ CodeGenRegister::RegUnitLaneMaskList
+ RegUnitLaneMasks(RegUnits.count(), LaneBitmask::getNone());
+ // Iterate through SubRegisters.
+ typedef CodeGenRegister::SubRegMap SubRegMap;
+ const SubRegMap &SubRegs = Register.getSubRegs();
+ for (auto S : SubRegs) {
+ CodeGenRegister *SubReg = S.second;
+ // Ignore non-leaf subregisters, their lane masks are fully covered by
+ // the leaf subregisters anyway.
+ if (!SubReg->getSubRegs().empty())
+ continue;
+ CodeGenSubRegIndex *SubRegIndex = S.first;
+ const CodeGenRegister *SubRegister = S.second;
+ LaneBitmask LaneMask = SubRegIndex->LaneMask;
+ // Distribute LaneMask to Register Units touched.
+ for (unsigned SUI : SubRegister->getRegUnits()) {
+ bool Found = false;
+ unsigned u = 0;
+ for (unsigned RU : RegUnits) {
+ if (SUI == RU) {
+ RegUnitLaneMasks[u] |= LaneMask;
+ assert(!Found);
+ Found = true;
+ }
+ ++u;
+ }
+ (void)Found;
+ assert(Found);
+ }
+ }
+ Register.setRegUnitLaneMasks(RegUnitLaneMasks);
+ }
+}
+
+void CodeGenRegBank::computeDerivedInfo() {
+ computeComposites();
+ computeSubRegLaneMasks();
+
+ // Compute a weight for each register unit created during getSubRegs.
+ // This may create adopted register units (with unit # >= NumNativeRegUnits).
+ computeRegUnitWeights();
+
+ // Compute a unique set of RegUnitSets. One for each RegClass and inferred
+ // supersets for the union of overlapping sets.
+ computeRegUnitSets();
+
+ computeRegUnitLaneMasks();
+
+ // Compute register class HasDisjunctSubRegs/CoveredBySubRegs flag.
+ for (CodeGenRegisterClass &RC : RegClasses) {
+ RC.HasDisjunctSubRegs = false;
+ RC.CoveredBySubRegs = true;
+ for (const CodeGenRegister *Reg : RC.getMembers()) {
+ RC.HasDisjunctSubRegs |= Reg->HasDisjunctSubRegs;
+ RC.CoveredBySubRegs &= Reg->CoveredBySubRegs;
+ }
+ }
+
+ // Get the weight of each set.
+ for (unsigned Idx = 0, EndIdx = RegUnitSets.size(); Idx != EndIdx; ++Idx)
+ RegUnitSets[Idx].Weight = getRegUnitSetWeight(RegUnitSets[Idx].Units);
+
+ // Find the order of each set.
+ RegUnitSetOrder.reserve(RegUnitSets.size());
+ for (unsigned Idx = 0, EndIdx = RegUnitSets.size(); Idx != EndIdx; ++Idx)
+ RegUnitSetOrder.push_back(Idx);
+
+ llvm::stable_sort(RegUnitSetOrder, [this](unsigned ID1, unsigned ID2) {
+ return getRegPressureSet(ID1).Units.size() <
+ getRegPressureSet(ID2).Units.size();
+ });
+ for (unsigned Idx = 0, EndIdx = RegUnitSets.size(); Idx != EndIdx; ++Idx) {
+ RegUnitSets[RegUnitSetOrder[Idx]].Order = Idx;
+ }
+}
+
+//
+// Synthesize missing register class intersections.
+//
+// Make sure that sub-classes of RC exists such that getCommonSubClass(RC, X)
+// returns a maximal register class for all X.
+//
+void CodeGenRegBank::inferCommonSubClass(CodeGenRegisterClass *RC) {
+ assert(!RegClasses.empty());
+ // Stash the iterator to the last element so that this loop doesn't visit
+ // elements added by the getOrCreateSubClass call within it.
+ for (auto I = RegClasses.begin(), E = std::prev(RegClasses.end());
+ I != std::next(E); ++I) {
+ CodeGenRegisterClass *RC1 = RC;
+ CodeGenRegisterClass *RC2 = &*I;
+ if (RC1 == RC2)
+ continue;
+
+ // Compute the set intersection of RC1 and RC2.
+ const CodeGenRegister::Vec &Memb1 = RC1->getMembers();
+ const CodeGenRegister::Vec &Memb2 = RC2->getMembers();
+ CodeGenRegister::Vec Intersection;
+ std::set_intersection(Memb1.begin(), Memb1.end(), Memb2.begin(),
+ Memb2.end(),
+ std::inserter(Intersection, Intersection.begin()),
+ deref<std::less<>>());
+
+ // Skip disjoint class pairs.
+ if (Intersection.empty())
+ continue;
+
+ // If RC1 and RC2 have different spill sizes or alignments, use the
+ // stricter one for sub-classing. If they are equal, prefer RC1.
+ if (RC2->RSI.hasStricterSpillThan(RC1->RSI))
+ std::swap(RC1, RC2);
+
+ getOrCreateSubClass(RC1, &Intersection,
+ RC1->getName() + "_and_" + RC2->getName());
+ }
+}
+
+//
+// Synthesize missing sub-classes for getSubClassWithSubReg().
+//
+// Make sure that the set of registers in RC with a given SubIdx sub-register
+// form a register class. Update RC->SubClassWithSubReg.
+//
+void CodeGenRegBank::inferSubClassWithSubReg(CodeGenRegisterClass *RC) {
+ // Map SubRegIndex to set of registers in RC supporting that SubRegIndex.
+ typedef std::map<const CodeGenSubRegIndex *, CodeGenRegister::Vec,
+ deref<std::less<>>>
+ SubReg2SetMap;
+
+ // Compute the set of registers supporting each SubRegIndex.
+ SubReg2SetMap SRSets;
+ for (const auto R : RC->getMembers()) {
+ if (R->Artificial)
+ continue;
+ const CodeGenRegister::SubRegMap &SRM = R->getSubRegs();
+ for (auto I : SRM) {
+ if (!I.first->Artificial)
+ SRSets[I.first].push_back(R);
+ }
+ }
+
+ for (auto I : SRSets)
+ sortAndUniqueRegisters(I.second);
+
+ // Find matching classes for all SRSets entries. Iterate in SubRegIndex
+ // numerical order to visit synthetic indices last.
+ for (const auto &SubIdx : SubRegIndices) {
+ if (SubIdx.Artificial)
+ continue;
+ SubReg2SetMap::const_iterator I = SRSets.find(&SubIdx);
+ // Unsupported SubRegIndex. Skip it.
+ if (I == SRSets.end())
+ continue;
+ // In most cases, all RC registers support the SubRegIndex.
+ if (I->second.size() == RC->getMembers().size()) {
+ RC->setSubClassWithSubReg(&SubIdx, RC);
+ continue;
+ }
+ // This is a real subset. See if we have a matching class.
+ CodeGenRegisterClass *SubRC =
+ getOrCreateSubClass(RC, &I->second,
+ RC->getName() + "_with_" + I->first->getName());
+ RC->setSubClassWithSubReg(&SubIdx, SubRC);
+ }
+}
+
+//
+// Synthesize missing sub-classes of RC for getMatchingSuperRegClass().
+//
+// Create sub-classes of RC such that getMatchingSuperRegClass(RC, SubIdx, X)
+// has a maximal result for any SubIdx and any X >= FirstSubRegRC.
+//
+
+void CodeGenRegBank::inferMatchingSuperRegClass(CodeGenRegisterClass *RC,
+ std::list<CodeGenRegisterClass>::iterator FirstSubRegRC) {
+ SmallVector<std::pair<const CodeGenRegister*,
+ const CodeGenRegister*>, 16> SSPairs;
+ BitVector TopoSigs(getNumTopoSigs());
+
+ // Iterate in SubRegIndex numerical order to visit synthetic indices last.
+ for (auto &SubIdx : SubRegIndices) {
+ // Skip indexes that aren't fully supported by RC's registers. This was
+ // computed by inferSubClassWithSubReg() above which should have been
+ // called first.
+ if (RC->getSubClassWithSubReg(&SubIdx) != RC)
+ continue;
+
+ // Build list of (Super, Sub) pairs for this SubIdx.
+ SSPairs.clear();
+ TopoSigs.reset();
+ for (const auto Super : RC->getMembers()) {
+ const CodeGenRegister *Sub = Super->getSubRegs().find(&SubIdx)->second;
+ assert(Sub && "Missing sub-register");
+ SSPairs.push_back(std::make_pair(Super, Sub));
+ TopoSigs.set(Sub->getTopoSig());
+ }
+
+ // Iterate over sub-register class candidates. Ignore classes created by
+ // this loop. They will never be useful.
+ // Store an iterator to the last element (not end) so that this loop doesn't
+ // visit newly inserted elements.
+ assert(!RegClasses.empty());
+ for (auto I = FirstSubRegRC, E = std::prev(RegClasses.end());
+ I != std::next(E); ++I) {
+ CodeGenRegisterClass &SubRC = *I;
+ if (SubRC.Artificial)
+ continue;
+ // Topological shortcut: SubRC members have the wrong shape.
+ if (!TopoSigs.anyCommon(SubRC.getTopoSigs()))
+ continue;
+ // Compute the subset of RC that maps into SubRC.
+ CodeGenRegister::Vec SubSetVec;
+ for (unsigned i = 0, e = SSPairs.size(); i != e; ++i)
+ if (SubRC.contains(SSPairs[i].second))
+ SubSetVec.push_back(SSPairs[i].first);
+
+ if (SubSetVec.empty())
+ continue;
+
+ // RC injects completely into SubRC.
+ sortAndUniqueRegisters(SubSetVec);
+ if (SubSetVec.size() == SSPairs.size()) {
+ SubRC.addSuperRegClass(&SubIdx, RC);
+ continue;
+ }
+
+ // Only a subset of RC maps into SubRC. Make sure it is represented by a
+ // class.
+ getOrCreateSubClass(RC, &SubSetVec, RC->getName() + "_with_" +
+ SubIdx.getName() + "_in_" +
+ SubRC.getName());
+ }
+ }
+}
+
+//
+// Infer missing register classes.
+//
+void CodeGenRegBank::computeInferredRegisterClasses() {
+ assert(!RegClasses.empty());
+ // When this function is called, the register classes have not been sorted
+ // and assigned EnumValues yet. That means getSubClasses(),
+ // getSuperClasses(), and hasSubClass() functions are defunct.
+
+ // Use one-before-the-end so it doesn't move forward when new elements are
+ // added.
+ auto FirstNewRC = std::prev(RegClasses.end());
+
+ // Visit all register classes, including the ones being added by the loop.
+ // Watch out for iterator invalidation here.
+ for (auto I = RegClasses.begin(), E = RegClasses.end(); I != E; ++I) {
+ CodeGenRegisterClass *RC = &*I;
+ if (RC->Artificial)
+ continue;
+
+ // Synthesize answers for getSubClassWithSubReg().
+ inferSubClassWithSubReg(RC);
+
+ // Synthesize answers for getCommonSubClass().
+ inferCommonSubClass(RC);
+
+ // Synthesize answers for getMatchingSuperRegClass().
+ inferMatchingSuperRegClass(RC);
+
+ // New register classes are created while this loop is running, and we need
+ // to visit all of them. I particular, inferMatchingSuperRegClass needs
+ // to match old super-register classes with sub-register classes created
+ // after inferMatchingSuperRegClass was called. At this point,
+ // inferMatchingSuperRegClass has checked SuperRC = [0..rci] with SubRC =
+ // [0..FirstNewRC). We need to cover SubRC = [FirstNewRC..rci].
+ if (I == FirstNewRC) {
+ auto NextNewRC = std::prev(RegClasses.end());
+ for (auto I2 = RegClasses.begin(), E2 = std::next(FirstNewRC); I2 != E2;
+ ++I2)
+ inferMatchingSuperRegClass(&*I2, E2);
+ FirstNewRC = NextNewRC;
+ }
+ }
+}
+
+/// getRegisterClassForRegister - Find the register class that contains the
+/// specified physical register. If the register is not in a register class,
+/// return null. If the register is in multiple classes, and the classes have a
+/// superset-subset relationship and the same set of types, return the
+/// superclass. Otherwise return null.
+const CodeGenRegisterClass*
+CodeGenRegBank::getRegClassForRegister(Record *R) {
+ const CodeGenRegister *Reg = getReg(R);
+ const CodeGenRegisterClass *FoundRC = nullptr;
+ for (const auto &RC : getRegClasses()) {
+ if (!RC.contains(Reg))
+ continue;
+
+ // If this is the first class that contains the register,
+ // make a note of it and go on to the next class.
+ if (!FoundRC) {
+ FoundRC = &RC;
+ continue;
+ }
+
+ // If a register's classes have different types, return null.
+ if (RC.getValueTypes() != FoundRC->getValueTypes())
+ return nullptr;
+
+ // Check to see if the previously found class that contains
+ // the register is a subclass of the current class. If so,
+ // prefer the superclass.
+ if (RC.hasSubClass(FoundRC)) {
+ FoundRC = &RC;
+ continue;
+ }
+
+ // Check to see if the previously found class that contains
+ // the register is a superclass of the current class. If so,
+ // prefer the superclass.
+ if (FoundRC->hasSubClass(&RC))
+ continue;
+
+ // Multiple classes, and neither is a superclass of the other.
+ // Return null.
+ return nullptr;
+ }
+ return FoundRC;
+}
+
+const CodeGenRegisterClass *
+CodeGenRegBank::getMinimalPhysRegClass(Record *RegRecord,
+ ValueTypeByHwMode *VT) {
+ const CodeGenRegister *Reg = getReg(RegRecord);
+ const CodeGenRegisterClass *BestRC = nullptr;
+ for (const auto &RC : getRegClasses()) {
+ if ((!VT || RC.hasType(*VT)) &&
+ RC.contains(Reg) && (!BestRC || BestRC->hasSubClass(&RC)))
+ BestRC = &RC;
+ }
+
+ assert(BestRC && "Couldn't find the register class");
+ return BestRC;
+}
+
+BitVector CodeGenRegBank::computeCoveredRegisters(ArrayRef<Record*> Regs) {
+ SetVector<const CodeGenRegister*> Set;
+
+ // First add Regs with all sub-registers.
+ for (unsigned i = 0, e = Regs.size(); i != e; ++i) {
+ CodeGenRegister *Reg = getReg(Regs[i]);
+ if (Set.insert(Reg))
+ // Reg is new, add all sub-registers.
+ // The pre-ordering is not important here.
+ Reg->addSubRegsPreOrder(Set, *this);
+ }
+
+ // Second, find all super-registers that are completely covered by the set.
+ for (unsigned i = 0; i != Set.size(); ++i) {
+ const CodeGenRegister::SuperRegList &SR = Set[i]->getSuperRegs();
+ for (unsigned j = 0, e = SR.size(); j != e; ++j) {
+ const CodeGenRegister *Super = SR[j];
+ if (!Super->CoveredBySubRegs || Set.count(Super))
+ continue;
+ // This new super-register is covered by its sub-registers.
+ bool AllSubsInSet = true;
+ const CodeGenRegister::SubRegMap &SRM = Super->getSubRegs();
+ for (auto I : SRM)
+ if (!Set.count(I.second)) {
+ AllSubsInSet = false;
+ break;
+ }
+ // All sub-registers in Set, add Super as well.
+ // We will visit Super later to recheck its super-registers.
+ if (AllSubsInSet)
+ Set.insert(Super);
+ }
+ }
+
+ // Convert to BitVector.
+ BitVector BV(Registers.size() + 1);
+ for (unsigned i = 0, e = Set.size(); i != e; ++i)
+ BV.set(Set[i]->EnumValue);
+ return BV;
+}
+
+void CodeGenRegBank::printRegUnitName(unsigned Unit) const {
+ if (Unit < NumNativeRegUnits)
+ dbgs() << ' ' << RegUnits[Unit].Roots[0]->getName();
+ else
+ dbgs() << " #" << Unit;
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/CodeGenRegisters.h b/contrib/libs/llvm16/utils/TableGen/CodeGenRegisters.h
new file mode 100644
index 0000000000..765425ed68
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/CodeGenRegisters.h
@@ -0,0 +1,847 @@
+//===- CodeGenRegisters.h - Register and RegisterClass Info -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines structures to encapsulate information gleaned from the
+// target register and register class definitions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_CODEGENREGISTERS_H
+#define LLVM_UTILS_TABLEGEN_CODEGENREGISTERS_H
+
+#include "InfoByHwMode.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SparseBitVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/LaneBitmask.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/SetTheory.h"
+#include <cassert>
+#include <cstdint>
+#include <deque>
+#include <list>
+#include <map>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+ class CodeGenRegBank;
+ template <typename T, typename Vector, typename Set> class SetVector;
+
+ /// Used to encode a step in a register lane mask transformation.
+ /// Mask the bits specified in Mask, then rotate them Rol bits to the left
+ /// assuming a wraparound at 32bits.
+ struct MaskRolPair {
+ LaneBitmask Mask;
+ uint8_t RotateLeft;
+
+ bool operator==(const MaskRolPair Other) const {
+ return Mask == Other.Mask && RotateLeft == Other.RotateLeft;
+ }
+ bool operator!=(const MaskRolPair Other) const {
+ return Mask != Other.Mask || RotateLeft != Other.RotateLeft;
+ }
+ };
+
+ /// CodeGenSubRegIndex - Represents a sub-register index.
+ class CodeGenSubRegIndex {
+ Record *const TheDef;
+ std::string Name;
+ std::string Namespace;
+
+ public:
+ uint16_t Size;
+ uint16_t Offset;
+ const unsigned EnumValue;
+ mutable LaneBitmask LaneMask;
+ mutable SmallVector<MaskRolPair,1> CompositionLaneMaskTransform;
+
+ /// A list of subregister indexes concatenated resulting in this
+ /// subregister index. This is the reverse of CodeGenRegBank::ConcatIdx.
+ SmallVector<CodeGenSubRegIndex*,4> ConcatenationOf;
+
+ // Are all super-registers containing this SubRegIndex covered by their
+ // sub-registers?
+ bool AllSuperRegsCovered;
+ // A subregister index is "artificial" if every subregister obtained
+ // from applying this index is artificial. Artificial subregister
+ // indexes are not used to create new register classes.
+ bool Artificial;
+
+ CodeGenSubRegIndex(Record *R, unsigned Enum);
+ CodeGenSubRegIndex(StringRef N, StringRef Nspace, unsigned Enum);
+ CodeGenSubRegIndex(CodeGenSubRegIndex&) = delete;
+
+ const std::string &getName() const { return Name; }
+ const std::string &getNamespace() const { return Namespace; }
+ std::string getQualifiedName() const;
+
+ // Map of composite subreg indices.
+ typedef std::map<CodeGenSubRegIndex *, CodeGenSubRegIndex *,
+ deref<std::less<>>>
+ CompMap;
+
+ // Returns the subreg index that results from composing this with Idx.
+ // Returns NULL if this and Idx don't compose.
+ CodeGenSubRegIndex *compose(CodeGenSubRegIndex *Idx) const {
+ CompMap::const_iterator I = Composed.find(Idx);
+ return I == Composed.end() ? nullptr : I->second;
+ }
+
+ // Add a composite subreg index: this+A = B.
+ // Return a conflicting composite, or NULL
+ CodeGenSubRegIndex *addComposite(CodeGenSubRegIndex *A,
+ CodeGenSubRegIndex *B) {
+ assert(A && B);
+ std::pair<CompMap::iterator, bool> Ins =
+ Composed.insert(std::make_pair(A, B));
+ // Synthetic subreg indices that aren't contiguous (for instance ARM
+ // register tuples) don't have a bit range, so it's OK to let
+ // B->Offset == -1. For the other cases, accumulate the offset and set
+ // the size here. Only do so if there is no offset yet though.
+ if ((Offset != (uint16_t)-1 && A->Offset != (uint16_t)-1) &&
+ (B->Offset == (uint16_t)-1)) {
+ B->Offset = Offset + A->Offset;
+ B->Size = A->Size;
+ }
+ return (Ins.second || Ins.first->second == B) ? nullptr
+ : Ins.first->second;
+ }
+
+ // Update the composite maps of components specified in 'ComposedOf'.
+ void updateComponents(CodeGenRegBank&);
+
+ // Return the map of composites.
+ const CompMap &getComposites() const { return Composed; }
+
+ // Compute LaneMask from Composed. Return LaneMask.
+ LaneBitmask computeLaneMask() const;
+
+ void setConcatenationOf(ArrayRef<CodeGenSubRegIndex*> Parts);
+
+ /// Replaces subregister indexes in the `ConcatenationOf` list with
+ /// list of subregisters they are composed of (if any). Do this recursively.
+ void computeConcatTransitiveClosure();
+
+ bool operator<(const CodeGenSubRegIndex &RHS) const {
+ return this->EnumValue < RHS.EnumValue;
+ }
+
+ private:
+ CompMap Composed;
+ };
+
+ /// CodeGenRegister - Represents a register definition.
+ struct CodeGenRegister {
+ Record *TheDef;
+ unsigned EnumValue;
+ std::vector<int64_t> CostPerUse;
+ bool CoveredBySubRegs;
+ bool HasDisjunctSubRegs;
+ bool Artificial;
+ bool Constant;
+
+ // Map SubRegIndex -> Register.
+ typedef std::map<CodeGenSubRegIndex *, CodeGenRegister *,
+ deref<std::less<>>>
+ SubRegMap;
+
+ CodeGenRegister(Record *R, unsigned Enum);
+
+ StringRef getName() const;
+
+ // Extract more information from TheDef. This is used to build an object
+ // graph after all CodeGenRegister objects have been created.
+ void buildObjectGraph(CodeGenRegBank&);
+
+ // Lazily compute a map of all sub-registers.
+ // This includes unique entries for all sub-sub-registers.
+ const SubRegMap &computeSubRegs(CodeGenRegBank&);
+
+ // Compute extra sub-registers by combining the existing sub-registers.
+ void computeSecondarySubRegs(CodeGenRegBank&);
+
+ // Add this as a super-register to all sub-registers after the sub-register
+ // graph has been built.
+ void computeSuperRegs(CodeGenRegBank&);
+
+ const SubRegMap &getSubRegs() const {
+ assert(SubRegsComplete && "Must precompute sub-registers");
+ return SubRegs;
+ }
+
+ // Add sub-registers to OSet following a pre-order defined by the .td file.
+ void addSubRegsPreOrder(SetVector<const CodeGenRegister*> &OSet,
+ CodeGenRegBank&) const;
+
+ // Return the sub-register index naming Reg as a sub-register of this
+ // register. Returns NULL if Reg is not a sub-register.
+ CodeGenSubRegIndex *getSubRegIndex(const CodeGenRegister *Reg) const {
+ return SubReg2Idx.lookup(Reg);
+ }
+
+ typedef std::vector<const CodeGenRegister*> SuperRegList;
+
+ // Get the list of super-registers in topological order, small to large.
+ // This is valid after computeSubRegs visits all registers during RegBank
+ // construction.
+ const SuperRegList &getSuperRegs() const {
+ assert(SubRegsComplete && "Must precompute sub-registers");
+ return SuperRegs;
+ }
+
+ // Get the list of ad hoc aliases. The graph is symmetric, so the list
+ // contains all registers in 'Aliases', and all registers that mention this
+ // register in 'Aliases'.
+ ArrayRef<CodeGenRegister*> getExplicitAliases() const {
+ return ExplicitAliases;
+ }
+
+ // Get the topological signature of this register. This is a small integer
+ // less than RegBank.getNumTopoSigs(). Registers with the same TopoSig have
+ // identical sub-register structure. That is, they support the same set of
+ // sub-register indices mapping to the same kind of sub-registers
+ // (TopoSig-wise).
+ unsigned getTopoSig() const {
+ assert(SuperRegsComplete && "TopoSigs haven't been computed yet.");
+ return TopoSig;
+ }
+
+ // List of register units in ascending order.
+ typedef SparseBitVector<> RegUnitList;
+ typedef SmallVector<LaneBitmask, 16> RegUnitLaneMaskList;
+
+ // How many entries in RegUnitList are native?
+ RegUnitList NativeRegUnits;
+
+ // Get the list of register units.
+ // This is only valid after computeSubRegs() completes.
+ const RegUnitList &getRegUnits() const { return RegUnits; }
+
+ ArrayRef<LaneBitmask> getRegUnitLaneMasks() const {
+ return ArrayRef(RegUnitLaneMasks).slice(0, NativeRegUnits.count());
+ }
+
+ // Get the native register units. This is a prefix of getRegUnits().
+ RegUnitList getNativeRegUnits() const {
+ return NativeRegUnits;
+ }
+
+ void setRegUnitLaneMasks(const RegUnitLaneMaskList &LaneMasks) {
+ RegUnitLaneMasks = LaneMasks;
+ }
+
+ // Inherit register units from subregisters.
+ // Return true if the RegUnits changed.
+ bool inheritRegUnits(CodeGenRegBank &RegBank);
+
+ // Adopt a register unit for pressure tracking.
+ // A unit is adopted iff its unit number is >= NativeRegUnits.count().
+ void adoptRegUnit(unsigned RUID) { RegUnits.set(RUID); }
+
+ // Get the sum of this register's register unit weights.
+ unsigned getWeight(const CodeGenRegBank &RegBank) const;
+
+ // Canonically ordered set.
+ typedef std::vector<const CodeGenRegister*> Vec;
+
+ private:
+ bool SubRegsComplete;
+ bool SuperRegsComplete;
+ unsigned TopoSig;
+
+ // The sub-registers explicit in the .td file form a tree.
+ SmallVector<CodeGenSubRegIndex*, 8> ExplicitSubRegIndices;
+ SmallVector<CodeGenRegister*, 8> ExplicitSubRegs;
+
+ // Explicit ad hoc aliases, symmetrized to form an undirected graph.
+ SmallVector<CodeGenRegister*, 8> ExplicitAliases;
+
+ // Super-registers where this is the first explicit sub-register.
+ SuperRegList LeadingSuperRegs;
+
+ SubRegMap SubRegs;
+ SuperRegList SuperRegs;
+ DenseMap<const CodeGenRegister*, CodeGenSubRegIndex*> SubReg2Idx;
+ RegUnitList RegUnits;
+ RegUnitLaneMaskList RegUnitLaneMasks;
+ };
+
+ inline bool operator<(const CodeGenRegister &A, const CodeGenRegister &B) {
+ return A.EnumValue < B.EnumValue;
+ }
+
+ inline bool operator==(const CodeGenRegister &A, const CodeGenRegister &B) {
+ return A.EnumValue == B.EnumValue;
+ }
+
+ class CodeGenRegisterClass {
+ CodeGenRegister::Vec Members;
+ // Allocation orders. Order[0] always contains all registers in Members.
+ std::vector<SmallVector<Record*, 16>> Orders;
+ // Bit mask of sub-classes including this, indexed by their EnumValue.
+ BitVector SubClasses;
+ // List of super-classes, topologocally ordered to have the larger classes
+ // first. This is the same as sorting by EnumValue.
+ SmallVector<CodeGenRegisterClass*, 4> SuperClasses;
+ Record *TheDef;
+ std::string Name;
+
+ // For a synthesized class, inherit missing properties from the nearest
+ // super-class.
+ void inheritProperties(CodeGenRegBank&);
+
+ // Map SubRegIndex -> sub-class. This is the largest sub-class where all
+ // registers have a SubRegIndex sub-register.
+ DenseMap<const CodeGenSubRegIndex *, CodeGenRegisterClass *>
+ SubClassWithSubReg;
+
+ // Map SubRegIndex -> set of super-reg classes. This is all register
+ // classes SuperRC such that:
+ //
+ // R:SubRegIndex in this RC for all R in SuperRC.
+ //
+ DenseMap<const CodeGenSubRegIndex *, SmallPtrSet<CodeGenRegisterClass *, 8>>
+ SuperRegClasses;
+
+ // Bit vector of TopoSigs for the registers in this class. This will be
+ // very sparse on regular architectures.
+ BitVector TopoSigs;
+
+ public:
+ unsigned EnumValue;
+ StringRef Namespace;
+ SmallVector<ValueTypeByHwMode, 4> VTs;
+ RegSizeInfoByHwMode RSI;
+ int CopyCost;
+ bool Allocatable;
+ StringRef AltOrderSelect;
+ uint8_t AllocationPriority;
+ bool GlobalPriority;
+ uint8_t TSFlags;
+ /// Contains the combination of the lane masks of all subregisters.
+ LaneBitmask LaneMask;
+ /// True if there are at least 2 subregisters which do not interfere.
+ bool HasDisjunctSubRegs;
+ bool CoveredBySubRegs;
+ /// A register class is artificial if all its members are artificial.
+ bool Artificial;
+ /// Generate register pressure set for this register class and any class
+ /// synthesized from it.
+ bool GeneratePressureSet;
+
+ // Return the Record that defined this class, or NULL if the class was
+ // created by TableGen.
+ Record *getDef() const { return TheDef; }
+
+ const std::string &getName() const { return Name; }
+ std::string getQualifiedName() const;
+ ArrayRef<ValueTypeByHwMode> getValueTypes() const { return VTs; }
+ unsigned getNumValueTypes() const { return VTs.size(); }
+ bool hasType(const ValueTypeByHwMode &VT) const;
+
+ const ValueTypeByHwMode &getValueTypeNum(unsigned VTNum) const {
+ if (VTNum < VTs.size())
+ return VTs[VTNum];
+ llvm_unreachable("VTNum greater than number of ValueTypes in RegClass!");
+ }
+
+ // Return true if this this class contains the register.
+ bool contains(const CodeGenRegister*) const;
+
+ // Returns true if RC is a subclass.
+ // RC is a sub-class of this class if it is a valid replacement for any
+ // instruction operand where a register of this classis required. It must
+ // satisfy these conditions:
+ //
+ // 1. All RC registers are also in this.
+ // 2. The RC spill size must not be smaller than our spill size.
+ // 3. RC spill alignment must be compatible with ours.
+ //
+ bool hasSubClass(const CodeGenRegisterClass *RC) const {
+ return SubClasses.test(RC->EnumValue);
+ }
+
+ // getSubClassWithSubReg - Returns the largest sub-class where all
+ // registers have a SubIdx sub-register.
+ CodeGenRegisterClass *
+ getSubClassWithSubReg(const CodeGenSubRegIndex *SubIdx) const {
+ return SubClassWithSubReg.lookup(SubIdx);
+ }
+
+ /// Find largest subclass where all registers have SubIdx subregisters in
+ /// SubRegClass and the largest subregister class that contains those
+ /// subregisters without (as far as possible) also containing additional registers.
+ ///
+ /// This can be used to find a suitable pair of classes for subregister copies.
+ /// \return std::pair<SubClass, SubRegClass> where SubClass is a SubClass is
+ /// a class where every register has SubIdx and SubRegClass is a class where
+ /// every register is covered by the SubIdx subregister of SubClass.
+ std::optional<std::pair<CodeGenRegisterClass *, CodeGenRegisterClass *>>
+ getMatchingSubClassWithSubRegs(CodeGenRegBank &RegBank,
+ const CodeGenSubRegIndex *SubIdx) const;
+
+ void setSubClassWithSubReg(const CodeGenSubRegIndex *SubIdx,
+ CodeGenRegisterClass *SubRC) {
+ SubClassWithSubReg[SubIdx] = SubRC;
+ }
+
+ // getSuperRegClasses - Returns a bit vector of all register classes
+ // containing only SubIdx super-registers of this class.
+ void getSuperRegClasses(const CodeGenSubRegIndex *SubIdx,
+ BitVector &Out) const;
+
+ // addSuperRegClass - Add a class containing only SubIdx super-registers.
+ void addSuperRegClass(CodeGenSubRegIndex *SubIdx,
+ CodeGenRegisterClass *SuperRC) {
+ SuperRegClasses[SubIdx].insert(SuperRC);
+ }
+
+ // getSubClasses - Returns a constant BitVector of subclasses indexed by
+ // EnumValue.
+ // The SubClasses vector includes an entry for this class.
+ const BitVector &getSubClasses() const { return SubClasses; }
+
+ // getSuperClasses - Returns a list of super classes ordered by EnumValue.
+ // The array does not include an entry for this class.
+ ArrayRef<CodeGenRegisterClass*> getSuperClasses() const {
+ return SuperClasses;
+ }
+
+ // Returns an ordered list of class members.
+ // The order of registers is the same as in the .td file.
+ // No = 0 is the default allocation order, No = 1 is the first alternative.
+ ArrayRef<Record*> getOrder(unsigned No = 0) const {
+ return Orders[No];
+ }
+
+ // Return the total number of allocation orders available.
+ unsigned getNumOrders() const { return Orders.size(); }
+
+ // Get the set of registers. This set contains the same registers as
+ // getOrder(0).
+ const CodeGenRegister::Vec &getMembers() const { return Members; }
+
+ // Get a bit vector of TopoSigs present in this register class.
+ const BitVector &getTopoSigs() const { return TopoSigs; }
+
+ // Get a weight of this register class.
+ unsigned getWeight(const CodeGenRegBank&) const;
+
+ // Populate a unique sorted list of units from a register set.
+ void buildRegUnitSet(const CodeGenRegBank &RegBank,
+ std::vector<unsigned> &RegUnits) const;
+
+ CodeGenRegisterClass(CodeGenRegBank&, Record *R);
+ CodeGenRegisterClass(CodeGenRegisterClass&) = delete;
+
+ // A key representing the parts of a register class used for forming
+ // sub-classes. Note the ordering provided by this key is not the same as
+ // the topological order used for the EnumValues.
+ struct Key {
+ const CodeGenRegister::Vec *Members;
+ RegSizeInfoByHwMode RSI;
+
+ Key(const CodeGenRegister::Vec *M, const RegSizeInfoByHwMode &I)
+ : Members(M), RSI(I) {}
+
+ Key(const CodeGenRegisterClass &RC)
+ : Members(&RC.getMembers()), RSI(RC.RSI) {}
+
+ // Lexicographical order of (Members, RegSizeInfoByHwMode).
+ bool operator<(const Key&) const;
+ };
+
+ // Create a non-user defined register class.
+ CodeGenRegisterClass(CodeGenRegBank&, StringRef Name, Key Props);
+
+ // Called by CodeGenRegBank::CodeGenRegBank().
+ static void computeSubClasses(CodeGenRegBank&);
+
+ // Get ordering value among register base classes.
+ std::optional<int> getBaseClassOrder() const {
+ if (TheDef && !TheDef->isValueUnset("BaseClassOrder"))
+ return TheDef->getValueAsInt("BaseClassOrder");
+ return {};
+ }
+ };
+
+ // Register categories are used when we need to deterine the category a
+ // register falls into (GPR, vector, fixed, etc.) without having to know
+ // specific information about the target architecture.
+ class CodeGenRegisterCategory {
+ Record *TheDef;
+ std::string Name;
+ std::list<CodeGenRegisterClass *> Classes;
+
+ public:
+ CodeGenRegisterCategory(CodeGenRegBank &, Record *R);
+ CodeGenRegisterCategory(CodeGenRegisterCategory &) = delete;
+
+ // Return the Record that defined this class, or NULL if the class was
+ // created by TableGen.
+ Record *getDef() const { return TheDef; }
+
+ std::string getName() const { return Name; }
+ std::list<CodeGenRegisterClass *> getClasses() const { return Classes; }
+ };
+
+ // Register units are used to model interference and register pressure.
+ // Every register is assigned one or more register units such that two
+ // registers overlap if and only if they have a register unit in common.
+ //
+ // Normally, one register unit is created per leaf register. Non-leaf
+ // registers inherit the units of their sub-registers.
+ struct RegUnit {
+ // Weight assigned to this RegUnit for estimating register pressure.
+ // This is useful when equalizing weights in register classes with mixed
+ // register topologies.
+ unsigned Weight;
+
+ // Each native RegUnit corresponds to one or two root registers. The full
+ // set of registers containing this unit can be computed as the union of
+ // these two registers and their super-registers.
+ const CodeGenRegister *Roots[2];
+
+ // Index into RegClassUnitSets where we can find the list of UnitSets that
+ // contain this unit.
+ unsigned RegClassUnitSetsIdx;
+ // A register unit is artificial if at least one of its roots is
+ // artificial.
+ bool Artificial;
+
+ RegUnit() : Weight(0), RegClassUnitSetsIdx(0), Artificial(false) {
+ Roots[0] = Roots[1] = nullptr;
+ }
+
+ ArrayRef<const CodeGenRegister*> getRoots() const {
+ assert(!(Roots[1] && !Roots[0]) && "Invalid roots array");
+ return ArrayRef(Roots, !!Roots[0] + !!Roots[1]);
+ }
+ };
+
+ // Each RegUnitSet is a sorted vector with a name.
+ struct RegUnitSet {
+ typedef std::vector<unsigned>::const_iterator iterator;
+
+ std::string Name;
+ std::vector<unsigned> Units;
+ unsigned Weight = 0; // Cache the sum of all unit weights.
+ unsigned Order = 0; // Cache the sort key.
+
+ RegUnitSet() = default;
+ };
+
+ // Base vector for identifying TopoSigs. The contents uniquely identify a
+ // TopoSig, only computeSuperRegs needs to know how.
+ typedef SmallVector<unsigned, 16> TopoSigId;
+
+ // CodeGenRegBank - Represent a target's registers and the relations between
+ // them.
+ class CodeGenRegBank {
+ SetTheory Sets;
+
+ const CodeGenHwModes &CGH;
+
+ std::deque<CodeGenSubRegIndex> SubRegIndices;
+ DenseMap<Record*, CodeGenSubRegIndex*> Def2SubRegIdx;
+
+ CodeGenSubRegIndex *createSubRegIndex(StringRef Name, StringRef NameSpace);
+
+ typedef std::map<SmallVector<CodeGenSubRegIndex*, 8>,
+ CodeGenSubRegIndex*> ConcatIdxMap;
+ ConcatIdxMap ConcatIdx;
+
+ // Registers.
+ std::deque<CodeGenRegister> Registers;
+ StringMap<CodeGenRegister*> RegistersByName;
+ DenseMap<Record*, CodeGenRegister*> Def2Reg;
+ unsigned NumNativeRegUnits;
+
+ std::map<TopoSigId, unsigned> TopoSigs;
+
+ // Includes native (0..NumNativeRegUnits-1) and adopted register units.
+ SmallVector<RegUnit, 8> RegUnits;
+
+ // Register classes.
+ std::list<CodeGenRegisterClass> RegClasses;
+ DenseMap<Record*, CodeGenRegisterClass*> Def2RC;
+ typedef std::map<CodeGenRegisterClass::Key, CodeGenRegisterClass*> RCKeyMap;
+ RCKeyMap Key2RC;
+
+ // Register categories.
+ std::list<CodeGenRegisterCategory> RegCategories;
+ DenseMap<Record *, CodeGenRegisterCategory *> Def2RCat;
+ using RCatKeyMap =
+ std::map<CodeGenRegisterClass::Key, CodeGenRegisterCategory *>;
+ RCatKeyMap Key2RCat;
+
+ // Remember each unique set of register units. Initially, this contains a
+ // unique set for each register class. Simliar sets are coalesced with
+ // pruneUnitSets and new supersets are inferred during computeRegUnitSets.
+ std::vector<RegUnitSet> RegUnitSets;
+
+ // Map RegisterClass index to the index of the RegUnitSet that contains the
+ // class's units and any inferred RegUnit supersets.
+ //
+ // NOTE: This could grow beyond the number of register classes when we map
+ // register units to lists of unit sets. If the list of unit sets does not
+ // already exist for a register class, we create a new entry in this vector.
+ std::vector<std::vector<unsigned>> RegClassUnitSets;
+
+ // Give each register unit set an order based on sorting criteria.
+ std::vector<unsigned> RegUnitSetOrder;
+
+ // Keep track of synthesized definitions generated in TupleExpander.
+ std::vector<std::unique_ptr<Record>> SynthDefs;
+
+ // Add RC to *2RC maps.
+ void addToMaps(CodeGenRegisterClass*);
+
+ // Create a synthetic sub-class if it is missing.
+ CodeGenRegisterClass *getOrCreateSubClass(const CodeGenRegisterClass *RC,
+ const CodeGenRegister::Vec *Membs,
+ StringRef Name);
+
+ // Infer missing register classes.
+ void computeInferredRegisterClasses();
+ void inferCommonSubClass(CodeGenRegisterClass *RC);
+ void inferSubClassWithSubReg(CodeGenRegisterClass *RC);
+
+ void inferMatchingSuperRegClass(CodeGenRegisterClass *RC) {
+ inferMatchingSuperRegClass(RC, RegClasses.begin());
+ }
+
+ void inferMatchingSuperRegClass(
+ CodeGenRegisterClass *RC,
+ std::list<CodeGenRegisterClass>::iterator FirstSubRegRC);
+
+ // Iteratively prune unit sets.
+ void pruneUnitSets();
+
+ // Compute a weight for each register unit created during getSubRegs.
+ void computeRegUnitWeights();
+
+ // Create a RegUnitSet for each RegClass and infer superclasses.
+ void computeRegUnitSets();
+
+ // Populate the Composite map from sub-register relationships.
+ void computeComposites();
+
+ // Compute a lane mask for each sub-register index.
+ void computeSubRegLaneMasks();
+
+ /// Computes a lane mask for each register unit enumerated by a physical
+ /// register.
+ void computeRegUnitLaneMasks();
+
+ public:
+ CodeGenRegBank(RecordKeeper&, const CodeGenHwModes&);
+ CodeGenRegBank(CodeGenRegBank&) = delete;
+
+ SetTheory &getSets() { return Sets; }
+
+ const CodeGenHwModes &getHwModes() const { return CGH; }
+
+ // Sub-register indices. The first NumNamedIndices are defined by the user
+ // in the .td files. The rest are synthesized such that all sub-registers
+ // have a unique name.
+ const std::deque<CodeGenSubRegIndex> &getSubRegIndices() const {
+ return SubRegIndices;
+ }
+
+ // Find a SubRegIndex from its Record def or add to the list if it does
+ // not exist there yet.
+ CodeGenSubRegIndex *getSubRegIdx(Record*);
+
+ // Find a SubRegIndex from its Record def.
+ const CodeGenSubRegIndex *findSubRegIdx(const Record* Def) const;
+
+ // Find or create a sub-register index representing the A+B composition.
+ CodeGenSubRegIndex *getCompositeSubRegIndex(CodeGenSubRegIndex *A,
+ CodeGenSubRegIndex *B);
+
+ // Find or create a sub-register index representing the concatenation of
+ // non-overlapping sibling indices.
+ CodeGenSubRegIndex *
+ getConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex *, 8>&);
+
+ const std::deque<CodeGenRegister> &getRegisters() const {
+ return Registers;
+ }
+
+ const StringMap<CodeGenRegister *> &getRegistersByName() const {
+ return RegistersByName;
+ }
+
+ // Find a register from its Record def.
+ CodeGenRegister *getReg(Record*);
+
+ // Get a Register's index into the Registers array.
+ unsigned getRegIndex(const CodeGenRegister *Reg) const {
+ return Reg->EnumValue - 1;
+ }
+
+ // Return the number of allocated TopoSigs. The first TopoSig representing
+ // leaf registers is allocated number 0.
+ unsigned getNumTopoSigs() const {
+ return TopoSigs.size();
+ }
+
+ // Find or create a TopoSig for the given TopoSigId.
+ // This function is only for use by CodeGenRegister::computeSuperRegs().
+ // Others should simply use Reg->getTopoSig().
+ unsigned getTopoSig(const TopoSigId &Id) {
+ return TopoSigs.insert(std::make_pair(Id, TopoSigs.size())).first->second;
+ }
+
+ // Create a native register unit that is associated with one or two root
+ // registers.
+ unsigned newRegUnit(CodeGenRegister *R0, CodeGenRegister *R1 = nullptr) {
+ RegUnits.resize(RegUnits.size() + 1);
+ RegUnit &RU = RegUnits.back();
+ RU.Roots[0] = R0;
+ RU.Roots[1] = R1;
+ RU.Artificial = R0->Artificial;
+ if (R1)
+ RU.Artificial |= R1->Artificial;
+ return RegUnits.size() - 1;
+ }
+
+ // Create a new non-native register unit that can be adopted by a register
+ // to increase its pressure. Note that NumNativeRegUnits is not increased.
+ unsigned newRegUnit(unsigned Weight) {
+ RegUnits.resize(RegUnits.size() + 1);
+ RegUnits.back().Weight = Weight;
+ return RegUnits.size() - 1;
+ }
+
+ // Native units are the singular unit of a leaf register. Register aliasing
+ // is completely characterized by native units. Adopted units exist to give
+ // register additional weight but don't affect aliasing.
+ bool isNativeUnit(unsigned RUID) const {
+ return RUID < NumNativeRegUnits;
+ }
+
+ unsigned getNumNativeRegUnits() const {
+ return NumNativeRegUnits;
+ }
+
+ RegUnit &getRegUnit(unsigned RUID) { return RegUnits[RUID]; }
+ const RegUnit &getRegUnit(unsigned RUID) const { return RegUnits[RUID]; }
+
+ std::list<CodeGenRegisterClass> &getRegClasses() { return RegClasses; }
+
+ const std::list<CodeGenRegisterClass> &getRegClasses() const {
+ return RegClasses;
+ }
+
+ std::list<CodeGenRegisterCategory> &getRegCategories() {
+ return RegCategories;
+ }
+
+ const std::list<CodeGenRegisterCategory> &getRegCategories() const {
+ return RegCategories;
+ }
+
+ // Find a register class from its def.
+ CodeGenRegisterClass *getRegClass(const Record *) const;
+
+ /// getRegisterClassForRegister - Find the register class that contains the
+ /// specified physical register. If the register is not in a register
+ /// class, return null. If the register is in multiple classes, and the
+ /// classes have a superset-subset relationship and the same set of types,
+ /// return the superclass. Otherwise return null.
+ const CodeGenRegisterClass* getRegClassForRegister(Record *R);
+
+ // Analog of TargetRegisterInfo::getMinimalPhysRegClass. Unlike
+ // getRegClassForRegister, this tries to find the smallest class containing
+ // the physical register. If \p VT is specified, it will only find classes
+ // with a matching type
+ const CodeGenRegisterClass *
+ getMinimalPhysRegClass(Record *RegRecord, ValueTypeByHwMode *VT = nullptr);
+
+ // Get the sum of unit weights.
+ unsigned getRegUnitSetWeight(const std::vector<unsigned> &Units) const {
+ unsigned Weight = 0;
+ for (unsigned Unit : Units)
+ Weight += getRegUnit(Unit).Weight;
+ return Weight;
+ }
+
+ unsigned getRegSetIDAt(unsigned Order) const {
+ return RegUnitSetOrder[Order];
+ }
+
+ const RegUnitSet &getRegSetAt(unsigned Order) const {
+ return RegUnitSets[RegUnitSetOrder[Order]];
+ }
+
+ // Increase a RegUnitWeight.
+ void increaseRegUnitWeight(unsigned RUID, unsigned Inc) {
+ getRegUnit(RUID).Weight += Inc;
+ }
+
+ // Get the number of register pressure dimensions.
+ unsigned getNumRegPressureSets() const { return RegUnitSets.size(); }
+
+ // Get a set of register unit IDs for a given dimension of pressure.
+ const RegUnitSet &getRegPressureSet(unsigned Idx) const {
+ return RegUnitSets[Idx];
+ }
+
+ // The number of pressure set lists may be larget than the number of
+ // register classes if some register units appeared in a list of sets that
+ // did not correspond to an existing register class.
+ unsigned getNumRegClassPressureSetLists() const {
+ return RegClassUnitSets.size();
+ }
+
+ // Get a list of pressure set IDs for a register class. Liveness of a
+ // register in this class impacts each pressure set in this list by the
+ // weight of the register. An exact solution requires all registers in a
+ // class to have the same class, but it is not strictly guaranteed.
+ ArrayRef<unsigned> getRCPressureSetIDs(unsigned RCIdx) const {
+ return RegClassUnitSets[RCIdx];
+ }
+
+ // Computed derived records such as missing sub-register indices.
+ void computeDerivedInfo();
+
+ // Compute the set of registers completely covered by the registers in Regs.
+ // The returned BitVector will have a bit set for each register in Regs,
+ // all sub-registers, and all super-registers that are covered by the
+ // registers in Regs.
+ //
+ // This is used to compute the mask of call-preserved registers from a list
+ // of callee-saves.
+ BitVector computeCoveredRegisters(ArrayRef<Record*> Regs);
+
+ // Bit mask of lanes that cover their registers. A sub-register index whose
+ // LaneMask is contained in CoveringLanes will be completely covered by
+ // another sub-register with the same or larger lane mask.
+ LaneBitmask CoveringLanes;
+
+ // Helper function for printing debug information. Handles artificial
+ // (non-native) reg units.
+ void printRegUnitName(unsigned Unit) const;
+ };
+
+} // end namespace llvm
+
+#endif // LLVM_UTILS_TABLEGEN_CODEGENREGISTERS_H
diff --git a/contrib/libs/llvm16/utils/TableGen/CodeGenSchedule.cpp b/contrib/libs/llvm16/utils/TableGen/CodeGenSchedule.cpp
new file mode 100644
index 0000000000..441a088c17
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/CodeGenSchedule.cpp
@@ -0,0 +1,2273 @@
+//===- CodeGenSchedule.cpp - Scheduling MachineModels ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines structures to encapsulate the machine model as described in
+// the target description.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenSchedule.h"
+#include "CodeGenInstruction.h"
+#include "CodeGenTarget.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/Error.h"
+#include <algorithm>
+#include <iterator>
+#include <utility>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "subtarget-emitter"
+
+#ifndef NDEBUG
+static void dumpIdxVec(ArrayRef<unsigned> V) {
+ for (unsigned Idx : V)
+ dbgs() << Idx << ", ";
+}
+#endif
+
+namespace {
+
+// (instrs a, b, ...) Evaluate and union all arguments. Identical to AddOp.
+struct InstrsOp : public SetTheory::Operator {
+ void apply(SetTheory &ST, DagInit *Expr, SetTheory::RecSet &Elts,
+ ArrayRef<SMLoc> Loc) override {
+ ST.evaluate(Expr->arg_begin(), Expr->arg_end(), Elts, Loc);
+ }
+};
+
+// (instregex "OpcPat",...) Find all instructions matching an opcode pattern.
+struct InstRegexOp : public SetTheory::Operator {
+ const CodeGenTarget &Target;
+ InstRegexOp(const CodeGenTarget &t): Target(t) {}
+
+ /// Remove any text inside of parentheses from S.
+ static std::string removeParens(llvm::StringRef S) {
+ std::string Result;
+ unsigned Paren = 0;
+ // NB: We don't care about escaped parens here.
+ for (char C : S) {
+ switch (C) {
+ case '(':
+ ++Paren;
+ break;
+ case ')':
+ --Paren;
+ break;
+ default:
+ if (Paren == 0)
+ Result += C;
+ }
+ }
+ return Result;
+ }
+
+ void apply(SetTheory &ST, DagInit *Expr, SetTheory::RecSet &Elts,
+ ArrayRef<SMLoc> Loc) override {
+ ArrayRef<const CodeGenInstruction *> Instructions =
+ Target.getInstructionsByEnumValue();
+
+ unsigned NumGeneric = Target.getNumFixedInstructions();
+ unsigned NumPseudos = Target.getNumPseudoInstructions();
+ auto Generics = Instructions.slice(0, NumGeneric);
+ auto Pseudos = Instructions.slice(NumGeneric, NumPseudos);
+ auto NonPseudos = Instructions.slice(NumGeneric + NumPseudos);
+
+ for (Init *Arg : Expr->getArgs()) {
+ StringInit *SI = dyn_cast<StringInit>(Arg);
+ if (!SI)
+ PrintFatalError(Loc, "instregex requires pattern string: " +
+ Expr->getAsString());
+ StringRef Original = SI->getValue();
+
+ // Extract a prefix that we can binary search on.
+ static const char RegexMetachars[] = "()^$|*+?.[]\\{}";
+ auto FirstMeta = Original.find_first_of(RegexMetachars);
+
+ // Look for top-level | or ?. We cannot optimize them to binary search.
+ if (removeParens(Original).find_first_of("|?") != std::string::npos)
+ FirstMeta = 0;
+
+ std::optional<Regex> Regexpr;
+ StringRef Prefix = Original.substr(0, FirstMeta);
+ StringRef PatStr = Original.substr(FirstMeta);
+ if (!PatStr.empty()) {
+ // For the rest use a python-style prefix match.
+ std::string pat = std::string(PatStr);
+ if (pat[0] != '^') {
+ pat.insert(0, "^(");
+ pat.insert(pat.end(), ')');
+ }
+ Regexpr = Regex(pat);
+ }
+
+ int NumMatches = 0;
+
+ // The generic opcodes are unsorted, handle them manually.
+ for (auto *Inst : Generics) {
+ StringRef InstName = Inst->TheDef->getName();
+ if (InstName.startswith(Prefix) &&
+ (!Regexpr || Regexpr->match(InstName.substr(Prefix.size())))) {
+ Elts.insert(Inst->TheDef);
+ NumMatches++;
+ }
+ }
+
+ // Target instructions are split into two ranges: pseudo instructions
+ // first, than non-pseudos. Each range is in lexicographical order
+ // sorted by name. Find the sub-ranges that start with our prefix.
+ struct Comp {
+ bool operator()(const CodeGenInstruction *LHS, StringRef RHS) {
+ return LHS->TheDef->getName() < RHS;
+ }
+ bool operator()(StringRef LHS, const CodeGenInstruction *RHS) {
+ return LHS < RHS->TheDef->getName() &&
+ !RHS->TheDef->getName().startswith(LHS);
+ }
+ };
+ auto Range1 =
+ std::equal_range(Pseudos.begin(), Pseudos.end(), Prefix, Comp());
+ auto Range2 = std::equal_range(NonPseudos.begin(), NonPseudos.end(),
+ Prefix, Comp());
+
+ // For these ranges we know that instruction names start with the prefix.
+ // Check if there's a regex that needs to be checked.
+ const auto HandleNonGeneric = [&](const CodeGenInstruction *Inst) {
+ StringRef InstName = Inst->TheDef->getName();
+ if (!Regexpr || Regexpr->match(InstName.substr(Prefix.size()))) {
+ Elts.insert(Inst->TheDef);
+ NumMatches++;
+ }
+ };
+ std::for_each(Range1.first, Range1.second, HandleNonGeneric);
+ std::for_each(Range2.first, Range2.second, HandleNonGeneric);
+
+ if (0 == NumMatches)
+ PrintFatalError(Loc, "instregex has no matches: " + Original);
+ }
+ }
+};
+
+} // end anonymous namespace
+
+/// CodeGenModels ctor interprets machine model records and populates maps.
+CodeGenSchedModels::CodeGenSchedModels(RecordKeeper &RK,
+ const CodeGenTarget &TGT):
+ Records(RK), Target(TGT) {
+
+ Sets.addFieldExpander("InstRW", "Instrs");
+
+ // Allow Set evaluation to recognize the dags used in InstRW records:
+ // (instrs Op1, Op1...)
+ Sets.addOperator("instrs", std::make_unique<InstrsOp>());
+ Sets.addOperator("instregex", std::make_unique<InstRegexOp>(Target));
+
+ // Instantiate a CodeGenProcModel for each SchedMachineModel with the values
+ // that are explicitly referenced in tablegen records. Resources associated
+ // with each processor will be derived later. Populate ProcModelMap with the
+ // CodeGenProcModel instances.
+ collectProcModels();
+
+ // Instantiate a CodeGenSchedRW for each SchedReadWrite record explicitly
+ // defined, and populate SchedReads and SchedWrites vectors. Implicit
+ // SchedReadWrites that represent sequences derived from expanded variant will
+ // be inferred later.
+ collectSchedRW();
+
+ // Instantiate a CodeGenSchedClass for each unique SchedRW signature directly
+ // required by an instruction definition, and populate SchedClassIdxMap. Set
+ // NumItineraryClasses to the number of explicit itinerary classes referenced
+ // by instructions. Set NumInstrSchedClasses to the number of itinerary
+ // classes plus any classes implied by instructions that derive from class
+ // Sched and provide SchedRW list. This does not infer any new classes from
+ // SchedVariant.
+ collectSchedClasses();
+
+ // Find instruction itineraries for each processor. Sort and populate
+ // CodeGenProcModel::ItinDefList. (Cycle-to-cycle itineraries). This requires
+ // all itinerary classes to be discovered.
+ collectProcItins();
+
+ // Find ItinRW records for each processor and itinerary class.
+ // (For per-operand resources mapped to itinerary classes).
+ collectProcItinRW();
+
+ // Find UnsupportedFeatures records for each processor.
+ // (For per-operand resources mapped to itinerary classes).
+ collectProcUnsupportedFeatures();
+
+ // Infer new SchedClasses from SchedVariant.
+ inferSchedClasses();
+
+ // Populate each CodeGenProcModel's WriteResDefs, ReadAdvanceDefs, and
+ // ProcResourceDefs.
+ LLVM_DEBUG(
+ dbgs() << "\n+++ RESOURCE DEFINITIONS (collectProcResources) +++\n");
+ collectProcResources();
+
+ // Collect optional processor description.
+ collectOptionalProcessorInfo();
+
+ // Check MCInstPredicate definitions.
+ checkMCInstPredicates();
+
+ // Check STIPredicate definitions.
+ checkSTIPredicates();
+
+ // Find STIPredicate definitions for each processor model, and construct
+ // STIPredicateFunction objects.
+ collectSTIPredicates();
+
+ checkCompleteness();
+}
+
+void CodeGenSchedModels::checkSTIPredicates() const {
+ DenseMap<StringRef, const Record *> Declarations;
+
+ // There cannot be multiple declarations with the same name.
+ const RecVec Decls = Records.getAllDerivedDefinitions("STIPredicateDecl");
+ for (const Record *R : Decls) {
+ StringRef Name = R->getValueAsString("Name");
+ const auto It = Declarations.find(Name);
+ if (It == Declarations.end()) {
+ Declarations[Name] = R;
+ continue;
+ }
+
+ PrintError(R->getLoc(), "STIPredicate " + Name + " multiply declared.");
+ PrintFatalNote(It->second->getLoc(), "Previous declaration was here.");
+ }
+
+ // Disallow InstructionEquivalenceClasses with an empty instruction list.
+ const RecVec Defs =
+ Records.getAllDerivedDefinitions("InstructionEquivalenceClass");
+ for (const Record *R : Defs) {
+ RecVec Opcodes = R->getValueAsListOfDefs("Opcodes");
+ if (Opcodes.empty()) {
+ PrintFatalError(R->getLoc(), "Invalid InstructionEquivalenceClass "
+ "defined with an empty opcode list.");
+ }
+ }
+}
+
+// Used by function `processSTIPredicate` to construct a mask of machine
+// instruction operands.
+static APInt constructOperandMask(ArrayRef<int64_t> Indices) {
+ APInt OperandMask;
+ if (Indices.empty())
+ return OperandMask;
+
+ int64_t MaxIndex = *std::max_element(Indices.begin(), Indices.end());
+ assert(MaxIndex >= 0 && "Invalid negative indices in input!");
+ OperandMask = OperandMask.zext(MaxIndex + 1);
+ for (const int64_t Index : Indices) {
+ assert(Index >= 0 && "Invalid negative indices!");
+ OperandMask.setBit(Index);
+ }
+
+ return OperandMask;
+}
+
+static void
+processSTIPredicate(STIPredicateFunction &Fn,
+ const ProcModelMapTy &ProcModelMap) {
+ DenseMap<const Record *, unsigned> Opcode2Index;
+ using OpcodeMapPair = std::pair<const Record *, OpcodeInfo>;
+ std::vector<OpcodeMapPair> OpcodeMappings;
+ std::vector<std::pair<APInt, APInt>> OpcodeMasks;
+
+ DenseMap<const Record *, unsigned> Predicate2Index;
+ unsigned NumUniquePredicates = 0;
+
+ // Number unique predicates and opcodes used by InstructionEquivalenceClass
+ // definitions. Each unique opcode will be associated with an OpcodeInfo
+ // object.
+ for (const Record *Def : Fn.getDefinitions()) {
+ RecVec Classes = Def->getValueAsListOfDefs("Classes");
+ for (const Record *EC : Classes) {
+ const Record *Pred = EC->getValueAsDef("Predicate");
+ if (Predicate2Index.find(Pred) == Predicate2Index.end())
+ Predicate2Index[Pred] = NumUniquePredicates++;
+
+ RecVec Opcodes = EC->getValueAsListOfDefs("Opcodes");
+ for (const Record *Opcode : Opcodes) {
+ if (Opcode2Index.find(Opcode) == Opcode2Index.end()) {
+ Opcode2Index[Opcode] = OpcodeMappings.size();
+ OpcodeMappings.emplace_back(Opcode, OpcodeInfo());
+ }
+ }
+ }
+ }
+
+ // Initialize vector `OpcodeMasks` with default values. We want to keep track
+ // of which processors "use" which opcodes. We also want to be able to
+ // identify predicates that are used by different processors for a same
+ // opcode.
+ // This information is used later on by this algorithm to sort OpcodeMapping
+ // elements based on their processor and predicate sets.
+ OpcodeMasks.resize(OpcodeMappings.size());
+ APInt DefaultProcMask(ProcModelMap.size(), 0);
+ APInt DefaultPredMask(NumUniquePredicates, 0);
+ for (std::pair<APInt, APInt> &MaskPair : OpcodeMasks)
+ MaskPair = std::make_pair(DefaultProcMask, DefaultPredMask);
+
+ // Construct a OpcodeInfo object for every unique opcode declared by an
+ // InstructionEquivalenceClass definition.
+ for (const Record *Def : Fn.getDefinitions()) {
+ RecVec Classes = Def->getValueAsListOfDefs("Classes");
+ const Record *SchedModel = Def->getValueAsDef("SchedModel");
+ unsigned ProcIndex = ProcModelMap.find(SchedModel)->second;
+ APInt ProcMask(ProcModelMap.size(), 0);
+ ProcMask.setBit(ProcIndex);
+
+ for (const Record *EC : Classes) {
+ RecVec Opcodes = EC->getValueAsListOfDefs("Opcodes");
+
+ std::vector<int64_t> OpIndices =
+ EC->getValueAsListOfInts("OperandIndices");
+ APInt OperandMask = constructOperandMask(OpIndices);
+
+ const Record *Pred = EC->getValueAsDef("Predicate");
+ APInt PredMask(NumUniquePredicates, 0);
+ PredMask.setBit(Predicate2Index[Pred]);
+
+ for (const Record *Opcode : Opcodes) {
+ unsigned OpcodeIdx = Opcode2Index[Opcode];
+ if (OpcodeMasks[OpcodeIdx].first[ProcIndex]) {
+ std::string Message =
+ "Opcode " + Opcode->getName().str() +
+ " used by multiple InstructionEquivalenceClass definitions.";
+ PrintFatalError(EC->getLoc(), Message);
+ }
+ OpcodeMasks[OpcodeIdx].first |= ProcMask;
+ OpcodeMasks[OpcodeIdx].second |= PredMask;
+ OpcodeInfo &OI = OpcodeMappings[OpcodeIdx].second;
+
+ OI.addPredicateForProcModel(ProcMask, OperandMask, Pred);
+ }
+ }
+ }
+
+ // Sort OpcodeMappings elements based on their CPU and predicate masks.
+ // As a last resort, order elements by opcode identifier.
+ llvm::sort(OpcodeMappings,
+ [&](const OpcodeMapPair &Lhs, const OpcodeMapPair &Rhs) {
+ unsigned LhsIdx = Opcode2Index[Lhs.first];
+ unsigned RhsIdx = Opcode2Index[Rhs.first];
+ const std::pair<APInt, APInt> &LhsMasks = OpcodeMasks[LhsIdx];
+ const std::pair<APInt, APInt> &RhsMasks = OpcodeMasks[RhsIdx];
+
+ auto LessThan = [](const APInt &Lhs, const APInt &Rhs) {
+ unsigned LhsCountPopulation = Lhs.countPopulation();
+ unsigned RhsCountPopulation = Rhs.countPopulation();
+ return ((LhsCountPopulation < RhsCountPopulation) ||
+ ((LhsCountPopulation == RhsCountPopulation) &&
+ (Lhs.countLeadingZeros() > Rhs.countLeadingZeros())));
+ };
+
+ if (LhsMasks.first != RhsMasks.first)
+ return LessThan(LhsMasks.first, RhsMasks.first);
+
+ if (LhsMasks.second != RhsMasks.second)
+ return LessThan(LhsMasks.second, RhsMasks.second);
+
+ return LhsIdx < RhsIdx;
+ });
+
+ // Now construct opcode groups. Groups are used by the SubtargetEmitter when
+ // expanding the body of a STIPredicate function. In particular, each opcode
+ // group is expanded into a sequence of labels in a switch statement.
+ // It identifies opcodes for which different processors define same predicates
+ // and same opcode masks.
+ for (OpcodeMapPair &Info : OpcodeMappings)
+ Fn.addOpcode(Info.first, std::move(Info.second));
+}
+
+void CodeGenSchedModels::collectSTIPredicates() {
+ // Map STIPredicateDecl records to elements of vector
+ // CodeGenSchedModels::STIPredicates.
+ DenseMap<const Record *, unsigned> Decl2Index;
+
+ RecVec RV = Records.getAllDerivedDefinitions("STIPredicate");
+ for (const Record *R : RV) {
+ const Record *Decl = R->getValueAsDef("Declaration");
+
+ const auto It = Decl2Index.find(Decl);
+ if (It == Decl2Index.end()) {
+ Decl2Index[Decl] = STIPredicates.size();
+ STIPredicateFunction Predicate(Decl);
+ Predicate.addDefinition(R);
+ STIPredicates.emplace_back(std::move(Predicate));
+ continue;
+ }
+
+ STIPredicateFunction &PreviousDef = STIPredicates[It->second];
+ PreviousDef.addDefinition(R);
+ }
+
+ for (STIPredicateFunction &Fn : STIPredicates)
+ processSTIPredicate(Fn, ProcModelMap);
+}
+
+void OpcodeInfo::addPredicateForProcModel(const llvm::APInt &CpuMask,
+ const llvm::APInt &OperandMask,
+ const Record *Predicate) {
+ auto It = llvm::find_if(
+ Predicates, [&OperandMask, &Predicate](const PredicateInfo &P) {
+ return P.Predicate == Predicate && P.OperandMask == OperandMask;
+ });
+ if (It == Predicates.end()) {
+ Predicates.emplace_back(CpuMask, OperandMask, Predicate);
+ return;
+ }
+ It->ProcModelMask |= CpuMask;
+}
+
+void CodeGenSchedModels::checkMCInstPredicates() const {
+ RecVec MCPredicates = Records.getAllDerivedDefinitions("TIIPredicate");
+ if (MCPredicates.empty())
+ return;
+
+ // A target cannot have multiple TIIPredicate definitions with a same name.
+ llvm::StringMap<const Record *> TIIPredicates(MCPredicates.size());
+ for (const Record *TIIPred : MCPredicates) {
+ StringRef Name = TIIPred->getValueAsString("FunctionName");
+ StringMap<const Record *>::const_iterator It = TIIPredicates.find(Name);
+ if (It == TIIPredicates.end()) {
+ TIIPredicates[Name] = TIIPred;
+ continue;
+ }
+
+ PrintError(TIIPred->getLoc(),
+ "TIIPredicate " + Name + " is multiply defined.");
+ PrintFatalNote(It->second->getLoc(),
+ " Previous definition of " + Name + " was here.");
+ }
+}
+
+void CodeGenSchedModels::collectRetireControlUnits() {
+ RecVec Units = Records.getAllDerivedDefinitions("RetireControlUnit");
+
+ for (Record *RCU : Units) {
+ CodeGenProcModel &PM = getProcModel(RCU->getValueAsDef("SchedModel"));
+ if (PM.RetireControlUnit) {
+ PrintError(RCU->getLoc(),
+ "Expected a single RetireControlUnit definition");
+ PrintNote(PM.RetireControlUnit->getLoc(),
+ "Previous definition of RetireControlUnit was here");
+ }
+ PM.RetireControlUnit = RCU;
+ }
+}
+
+void CodeGenSchedModels::collectLoadStoreQueueInfo() {
+ RecVec Queues = Records.getAllDerivedDefinitions("MemoryQueue");
+
+ for (Record *Queue : Queues) {
+ CodeGenProcModel &PM = getProcModel(Queue->getValueAsDef("SchedModel"));
+ if (Queue->isSubClassOf("LoadQueue")) {
+ if (PM.LoadQueue) {
+ PrintError(Queue->getLoc(),
+ "Expected a single LoadQueue definition");
+ PrintNote(PM.LoadQueue->getLoc(),
+ "Previous definition of LoadQueue was here");
+ }
+
+ PM.LoadQueue = Queue;
+ }
+
+ if (Queue->isSubClassOf("StoreQueue")) {
+ if (PM.StoreQueue) {
+ PrintError(Queue->getLoc(),
+ "Expected a single StoreQueue definition");
+ PrintNote(PM.StoreQueue->getLoc(),
+ "Previous definition of StoreQueue was here");
+ }
+
+ PM.StoreQueue = Queue;
+ }
+ }
+}
+
+/// Collect optional processor information.
+void CodeGenSchedModels::collectOptionalProcessorInfo() {
+ // Find register file definitions for each processor.
+ collectRegisterFiles();
+
+ // Collect processor RetireControlUnit descriptors if available.
+ collectRetireControlUnits();
+
+ // Collect information about load/store queues.
+ collectLoadStoreQueueInfo();
+
+ checkCompleteness();
+}
+
+/// Gather all processor models.
+void CodeGenSchedModels::collectProcModels() {
+ RecVec ProcRecords = Records.getAllDerivedDefinitions("Processor");
+ llvm::sort(ProcRecords, LessRecordFieldName());
+
+ // Check for duplicated names.
+ auto I = std::adjacent_find(ProcRecords.begin(), ProcRecords.end(),
+ [](const Record *Rec1, const Record *Rec2) {
+ return Rec1->getValueAsString("Name") == Rec2->getValueAsString("Name");
+ });
+ if (I != ProcRecords.end())
+ PrintFatalError((*I)->getLoc(), "Duplicate processor name " +
+ (*I)->getValueAsString("Name"));
+
+ // Reserve space because we can. Reallocation would be ok.
+ ProcModels.reserve(ProcRecords.size()+1);
+
+ // Use idx=0 for NoModel/NoItineraries.
+ Record *NoModelDef = Records.getDef("NoSchedModel");
+ Record *NoItinsDef = Records.getDef("NoItineraries");
+ ProcModels.emplace_back(0, "NoSchedModel", NoModelDef, NoItinsDef);
+ ProcModelMap[NoModelDef] = 0;
+
+ // For each processor, find a unique machine model.
+ LLVM_DEBUG(dbgs() << "+++ PROCESSOR MODELs (addProcModel) +++\n");
+ for (Record *ProcRecord : ProcRecords)
+ addProcModel(ProcRecord);
+}
+
+/// Get a unique processor model based on the defined MachineModel and
+/// ProcessorItineraries.
+void CodeGenSchedModels::addProcModel(Record *ProcDef) {
+ Record *ModelKey = getModelOrItinDef(ProcDef);
+ if (!ProcModelMap.insert(std::make_pair(ModelKey, ProcModels.size())).second)
+ return;
+
+ std::string Name = std::string(ModelKey->getName());
+ if (ModelKey->isSubClassOf("SchedMachineModel")) {
+ Record *ItinsDef = ModelKey->getValueAsDef("Itineraries");
+ ProcModels.emplace_back(ProcModels.size(), Name, ModelKey, ItinsDef);
+ }
+ else {
+ // An itinerary is defined without a machine model. Infer a new model.
+ if (!ModelKey->getValueAsListOfDefs("IID").empty())
+ Name = Name + "Model";
+ ProcModels.emplace_back(ProcModels.size(), Name,
+ ProcDef->getValueAsDef("SchedModel"), ModelKey);
+ }
+ LLVM_DEBUG(ProcModels.back().dump());
+}
+
+// Recursively find all reachable SchedReadWrite records.
+static void scanSchedRW(Record *RWDef, RecVec &RWDefs,
+ SmallPtrSet<Record*, 16> &RWSet) {
+ if (!RWSet.insert(RWDef).second)
+ return;
+ RWDefs.push_back(RWDef);
+ // Reads don't currently have sequence records, but it can be added later.
+ if (RWDef->isSubClassOf("WriteSequence")) {
+ RecVec Seq = RWDef->getValueAsListOfDefs("Writes");
+ for (Record *WSRec : Seq)
+ scanSchedRW(WSRec, RWDefs, RWSet);
+ }
+ else if (RWDef->isSubClassOf("SchedVariant")) {
+ // Visit each variant (guarded by a different predicate).
+ RecVec Vars = RWDef->getValueAsListOfDefs("Variants");
+ for (Record *Variant : Vars) {
+ // Visit each RW in the sequence selected by the current variant.
+ RecVec Selected = Variant->getValueAsListOfDefs("Selected");
+ for (Record *SelDef : Selected)
+ scanSchedRW(SelDef, RWDefs, RWSet);
+ }
+ }
+}
+
+// Collect and sort all SchedReadWrites reachable via tablegen records.
+// More may be inferred later when inferring new SchedClasses from variants.
+void CodeGenSchedModels::collectSchedRW() {
+ // Reserve idx=0 for invalid writes/reads.
+ SchedWrites.resize(1);
+ SchedReads.resize(1);
+
+ SmallPtrSet<Record*, 16> RWSet;
+
+ // Find all SchedReadWrites referenced by instruction defs.
+ RecVec SWDefs, SRDefs;
+ for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) {
+ Record *SchedDef = Inst->TheDef;
+ if (SchedDef->isValueUnset("SchedRW"))
+ continue;
+ RecVec RWs = SchedDef->getValueAsListOfDefs("SchedRW");
+ for (Record *RW : RWs) {
+ if (RW->isSubClassOf("SchedWrite"))
+ scanSchedRW(RW, SWDefs, RWSet);
+ else {
+ assert(RW->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
+ scanSchedRW(RW, SRDefs, RWSet);
+ }
+ }
+ }
+ // Find all ReadWrites referenced by InstRW.
+ RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW");
+ for (Record *InstRWDef : InstRWDefs) {
+ // For all OperandReadWrites.
+ RecVec RWDefs = InstRWDef->getValueAsListOfDefs("OperandReadWrites");
+ for (Record *RWDef : RWDefs) {
+ if (RWDef->isSubClassOf("SchedWrite"))
+ scanSchedRW(RWDef, SWDefs, RWSet);
+ else {
+ assert(RWDef->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
+ scanSchedRW(RWDef, SRDefs, RWSet);
+ }
+ }
+ }
+ // Find all ReadWrites referenced by ItinRW.
+ RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW");
+ for (Record *ItinRWDef : ItinRWDefs) {
+ // For all OperandReadWrites.
+ RecVec RWDefs = ItinRWDef->getValueAsListOfDefs("OperandReadWrites");
+ for (Record *RWDef : RWDefs) {
+ if (RWDef->isSubClassOf("SchedWrite"))
+ scanSchedRW(RWDef, SWDefs, RWSet);
+ else {
+ assert(RWDef->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
+ scanSchedRW(RWDef, SRDefs, RWSet);
+ }
+ }
+ }
+ // Find all ReadWrites referenced by SchedAlias. AliasDefs needs to be sorted
+ // for the loop below that initializes Alias vectors.
+ RecVec AliasDefs = Records.getAllDerivedDefinitions("SchedAlias");
+ llvm::sort(AliasDefs, LessRecord());
+ for (Record *ADef : AliasDefs) {
+ Record *MatchDef = ADef->getValueAsDef("MatchRW");
+ Record *AliasDef = ADef->getValueAsDef("AliasRW");
+ if (MatchDef->isSubClassOf("SchedWrite")) {
+ if (!AliasDef->isSubClassOf("SchedWrite"))
+ PrintFatalError(ADef->getLoc(), "SchedWrite Alias must be SchedWrite");
+ scanSchedRW(AliasDef, SWDefs, RWSet);
+ }
+ else {
+ assert(MatchDef->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
+ if (!AliasDef->isSubClassOf("SchedRead"))
+ PrintFatalError(ADef->getLoc(), "SchedRead Alias must be SchedRead");
+ scanSchedRW(AliasDef, SRDefs, RWSet);
+ }
+ }
+ // Sort and add the SchedReadWrites directly referenced by instructions or
+ // itinerary resources. Index reads and writes in separate domains.
+ llvm::sort(SWDefs, LessRecord());
+ for (Record *SWDef : SWDefs) {
+ assert(!getSchedRWIdx(SWDef, /*IsRead=*/false) && "duplicate SchedWrite");
+ SchedWrites.emplace_back(SchedWrites.size(), SWDef);
+ }
+ llvm::sort(SRDefs, LessRecord());
+ for (Record *SRDef : SRDefs) {
+ assert(!getSchedRWIdx(SRDef, /*IsRead-*/true) && "duplicate SchedWrite");
+ SchedReads.emplace_back(SchedReads.size(), SRDef);
+ }
+ // Initialize WriteSequence vectors.
+ for (CodeGenSchedRW &CGRW : SchedWrites) {
+ if (!CGRW.IsSequence)
+ continue;
+ findRWs(CGRW.TheDef->getValueAsListOfDefs("Writes"), CGRW.Sequence,
+ /*IsRead=*/false);
+ }
+ // Initialize Aliases vectors.
+ for (Record *ADef : AliasDefs) {
+ Record *AliasDef = ADef->getValueAsDef("AliasRW");
+ getSchedRW(AliasDef).IsAlias = true;
+ Record *MatchDef = ADef->getValueAsDef("MatchRW");
+ CodeGenSchedRW &RW = getSchedRW(MatchDef);
+ if (RW.IsAlias)
+ PrintFatalError(ADef->getLoc(), "Cannot Alias an Alias");
+ RW.Aliases.push_back(ADef);
+ }
+ LLVM_DEBUG(
+ dbgs() << "\n+++ SCHED READS and WRITES (collectSchedRW) +++\n";
+ for (unsigned WIdx = 0, WEnd = SchedWrites.size(); WIdx != WEnd; ++WIdx) {
+ dbgs() << WIdx << ": ";
+ SchedWrites[WIdx].dump();
+ dbgs() << '\n';
+ } for (unsigned RIdx = 0, REnd = SchedReads.size(); RIdx != REnd;
+ ++RIdx) {
+ dbgs() << RIdx << ": ";
+ SchedReads[RIdx].dump();
+ dbgs() << '\n';
+ } RecVec RWDefs = Records.getAllDerivedDefinitions("SchedReadWrite");
+ for (Record *RWDef
+ : RWDefs) {
+ if (!getSchedRWIdx(RWDef, RWDef->isSubClassOf("SchedRead"))) {
+ StringRef Name = RWDef->getName();
+ if (Name != "NoWrite" && Name != "ReadDefault")
+ dbgs() << "Unused SchedReadWrite " << Name << '\n';
+ }
+ });
+}
+
+/// Compute a SchedWrite name from a sequence of writes.
+std::string CodeGenSchedModels::genRWName(ArrayRef<unsigned> Seq, bool IsRead) {
+ std::string Name("(");
+ ListSeparator LS("_");
+ for (unsigned I : Seq) {
+ Name += LS;
+ Name += getSchedRW(I, IsRead).Name;
+ }
+ Name += ')';
+ return Name;
+}
+
+unsigned CodeGenSchedModels::getSchedRWIdx(const Record *Def,
+ bool IsRead) const {
+ const std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites;
+ const auto I = find_if(
+ RWVec, [Def](const CodeGenSchedRW &RW) { return RW.TheDef == Def; });
+ return I == RWVec.end() ? 0 : std::distance(RWVec.begin(), I);
+}
+
+bool CodeGenSchedModels::hasReadOfWrite(Record *WriteDef) const {
+ for (auto& ProcModel : ProcModels) {
+ const RecVec &RADefs = ProcModel.ReadAdvanceDefs;
+ for (auto& RADef : RADefs) {
+ RecVec ValidWrites = RADef->getValueAsListOfDefs("ValidWrites");
+ if (is_contained(ValidWrites, WriteDef))
+ return true;
+ }
+ }
+ return false;
+}
+
+static void splitSchedReadWrites(const RecVec &RWDefs,
+ RecVec &WriteDefs, RecVec &ReadDefs) {
+ for (Record *RWDef : RWDefs) {
+ if (RWDef->isSubClassOf("SchedWrite"))
+ WriteDefs.push_back(RWDef);
+ else {
+ assert(RWDef->isSubClassOf("SchedRead") && "unknown SchedReadWrite");
+ ReadDefs.push_back(RWDef);
+ }
+ }
+}
+
+// Split the SchedReadWrites defs and call findRWs for each list.
+void CodeGenSchedModels::findRWs(const RecVec &RWDefs,
+ IdxVec &Writes, IdxVec &Reads) const {
+ RecVec WriteDefs;
+ RecVec ReadDefs;
+ splitSchedReadWrites(RWDefs, WriteDefs, ReadDefs);
+ findRWs(WriteDefs, Writes, false);
+ findRWs(ReadDefs, Reads, true);
+}
+
+// Call getSchedRWIdx for all elements in a sequence of SchedRW defs.
+void CodeGenSchedModels::findRWs(const RecVec &RWDefs, IdxVec &RWs,
+ bool IsRead) const {
+ for (Record *RWDef : RWDefs) {
+ unsigned Idx = getSchedRWIdx(RWDef, IsRead);
+ assert(Idx && "failed to collect SchedReadWrite");
+ RWs.push_back(Idx);
+ }
+}
+
+void CodeGenSchedModels::expandRWSequence(unsigned RWIdx, IdxVec &RWSeq,
+ bool IsRead) const {
+ const CodeGenSchedRW &SchedRW = getSchedRW(RWIdx, IsRead);
+ if (!SchedRW.IsSequence) {
+ RWSeq.push_back(RWIdx);
+ return;
+ }
+ int Repeat =
+ SchedRW.TheDef ? SchedRW.TheDef->getValueAsInt("Repeat") : 1;
+ for (int i = 0; i < Repeat; ++i) {
+ for (unsigned I : SchedRW.Sequence) {
+ expandRWSequence(I, RWSeq, IsRead);
+ }
+ }
+}
+
+// Expand a SchedWrite as a sequence following any aliases that coincide with
+// the given processor model.
+void CodeGenSchedModels::expandRWSeqForProc(
+ unsigned RWIdx, IdxVec &RWSeq, bool IsRead,
+ const CodeGenProcModel &ProcModel) const {
+
+ const CodeGenSchedRW &SchedWrite = getSchedRW(RWIdx, IsRead);
+ Record *AliasDef = nullptr;
+ for (const Record *Rec : SchedWrite.Aliases) {
+ const CodeGenSchedRW &AliasRW = getSchedRW(Rec->getValueAsDef("AliasRW"));
+ if (Rec->getValueInit("SchedModel")->isComplete()) {
+ Record *ModelDef = Rec->getValueAsDef("SchedModel");
+ if (&getProcModel(ModelDef) != &ProcModel)
+ continue;
+ }
+ if (AliasDef)
+ PrintFatalError(AliasRW.TheDef->getLoc(), "Multiple aliases "
+ "defined for processor " + ProcModel.ModelName +
+ " Ensure only one SchedAlias exists per RW.");
+ AliasDef = AliasRW.TheDef;
+ }
+ if (AliasDef) {
+ expandRWSeqForProc(getSchedRWIdx(AliasDef, IsRead),
+ RWSeq, IsRead,ProcModel);
+ return;
+ }
+ if (!SchedWrite.IsSequence) {
+ RWSeq.push_back(RWIdx);
+ return;
+ }
+ int Repeat =
+ SchedWrite.TheDef ? SchedWrite.TheDef->getValueAsInt("Repeat") : 1;
+ for (int I = 0, E = Repeat; I < E; ++I) {
+ for (unsigned Idx : SchedWrite.Sequence) {
+ expandRWSeqForProc(Idx, RWSeq, IsRead, ProcModel);
+ }
+ }
+}
+
+// Find the existing SchedWrite that models this sequence of writes.
+unsigned CodeGenSchedModels::findRWForSequence(ArrayRef<unsigned> Seq,
+ bool IsRead) {
+ std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites;
+
+ auto I = find_if(RWVec, [Seq](CodeGenSchedRW &RW) {
+ return ArrayRef(RW.Sequence) == Seq;
+ });
+ // Index zero reserved for invalid RW.
+ return I == RWVec.end() ? 0 : std::distance(RWVec.begin(), I);
+}
+
+/// Add this ReadWrite if it doesn't already exist.
+unsigned CodeGenSchedModels::findOrInsertRW(ArrayRef<unsigned> Seq,
+ bool IsRead) {
+ assert(!Seq.empty() && "cannot insert empty sequence");
+ if (Seq.size() == 1)
+ return Seq.back();
+
+ unsigned Idx = findRWForSequence(Seq, IsRead);
+ if (Idx)
+ return Idx;
+
+ std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites;
+ unsigned RWIdx = RWVec.size();
+ CodeGenSchedRW SchedRW(RWIdx, IsRead, Seq, genRWName(Seq, IsRead));
+ RWVec.push_back(SchedRW);
+ return RWIdx;
+}
+
+/// Visit all the instruction definitions for this target to gather and
+/// enumerate the itinerary classes. These are the explicitly specified
+/// SchedClasses. More SchedClasses may be inferred.
+void CodeGenSchedModels::collectSchedClasses() {
+
+ // NoItinerary is always the first class at Idx=0
+ assert(SchedClasses.empty() && "Expected empty sched class");
+ SchedClasses.emplace_back(0, "NoInstrModel",
+ Records.getDef("NoItinerary"));
+ SchedClasses.back().ProcIndices.push_back(0);
+
+ // Create a SchedClass for each unique combination of itinerary class and
+ // SchedRW list.
+ for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) {
+ Record *ItinDef = Inst->TheDef->getValueAsDef("Itinerary");
+ IdxVec Writes, Reads;
+ if (!Inst->TheDef->isValueUnset("SchedRW"))
+ findRWs(Inst->TheDef->getValueAsListOfDefs("SchedRW"), Writes, Reads);
+
+ // ProcIdx == 0 indicates the class applies to all processors.
+ unsigned SCIdx = addSchedClass(ItinDef, Writes, Reads, /*ProcIndices*/{0});
+ InstrClassMap[Inst->TheDef] = SCIdx;
+ }
+ // Create classes for InstRW defs.
+ RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW");
+ llvm::sort(InstRWDefs, LessRecord());
+ LLVM_DEBUG(dbgs() << "\n+++ SCHED CLASSES (createInstRWClass) +++\n");
+ for (Record *RWDef : InstRWDefs)
+ createInstRWClass(RWDef);
+
+ NumInstrSchedClasses = SchedClasses.size();
+
+ bool EnableDump = false;
+ LLVM_DEBUG(EnableDump = true);
+ if (!EnableDump)
+ return;
+
+ LLVM_DEBUG(
+ dbgs()
+ << "\n+++ ITINERARIES and/or MACHINE MODELS (collectSchedClasses) +++\n");
+ for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) {
+ StringRef InstName = Inst->TheDef->getName();
+ unsigned SCIdx = getSchedClassIdx(*Inst);
+ if (!SCIdx) {
+ LLVM_DEBUG({
+ if (!Inst->hasNoSchedulingInfo)
+ dbgs() << "No machine model for " << Inst->TheDef->getName() << '\n';
+ });
+ continue;
+ }
+ CodeGenSchedClass &SC = getSchedClass(SCIdx);
+ if (SC.ProcIndices[0] != 0)
+ PrintFatalError(Inst->TheDef->getLoc(), "Instruction's sched class "
+ "must not be subtarget specific.");
+
+ IdxVec ProcIndices;
+ if (SC.ItinClassDef->getName() != "NoItinerary") {
+ ProcIndices.push_back(0);
+ dbgs() << "Itinerary for " << InstName << ": "
+ << SC.ItinClassDef->getName() << '\n';
+ }
+ if (!SC.Writes.empty()) {
+ ProcIndices.push_back(0);
+ LLVM_DEBUG({
+ dbgs() << "SchedRW machine model for " << InstName;
+ for (unsigned int Write : SC.Writes)
+ dbgs() << " " << SchedWrites[Write].Name;
+ for (unsigned int Read : SC.Reads)
+ dbgs() << " " << SchedReads[Read].Name;
+ dbgs() << '\n';
+ });
+ }
+ const RecVec &RWDefs = SchedClasses[SCIdx].InstRWs;
+ for (Record *RWDef : RWDefs) {
+ const CodeGenProcModel &ProcModel =
+ getProcModel(RWDef->getValueAsDef("SchedModel"));
+ ProcIndices.push_back(ProcModel.Index);
+ LLVM_DEBUG(dbgs() << "InstRW on " << ProcModel.ModelName << " for "
+ << InstName);
+ IdxVec Writes;
+ IdxVec Reads;
+ findRWs(RWDef->getValueAsListOfDefs("OperandReadWrites"),
+ Writes, Reads);
+ LLVM_DEBUG({
+ for (unsigned WIdx : Writes)
+ dbgs() << " " << SchedWrites[WIdx].Name;
+ for (unsigned RIdx : Reads)
+ dbgs() << " " << SchedReads[RIdx].Name;
+ dbgs() << '\n';
+ });
+ }
+ // If ProcIndices contains zero, the class applies to all processors.
+ LLVM_DEBUG({
+ if (!llvm::is_contained(ProcIndices, 0)) {
+ for (const CodeGenProcModel &PM : ProcModels) {
+ if (!llvm::is_contained(ProcIndices, PM.Index))
+ dbgs() << "No machine model for " << Inst->TheDef->getName()
+ << " on processor " << PM.ModelName << '\n';
+ }
+ }
+ });
+ }
+}
+
+// Get the SchedClass index for an instruction.
+unsigned
+CodeGenSchedModels::getSchedClassIdx(const CodeGenInstruction &Inst) const {
+ return InstrClassMap.lookup(Inst.TheDef);
+}
+
+std::string
+CodeGenSchedModels::createSchedClassName(Record *ItinClassDef,
+ ArrayRef<unsigned> OperWrites,
+ ArrayRef<unsigned> OperReads) {
+
+ std::string Name;
+ if (ItinClassDef && ItinClassDef->getName() != "NoItinerary")
+ Name = std::string(ItinClassDef->getName());
+ for (unsigned Idx : OperWrites) {
+ if (!Name.empty())
+ Name += '_';
+ Name += SchedWrites[Idx].Name;
+ }
+ for (unsigned Idx : OperReads) {
+ Name += '_';
+ Name += SchedReads[Idx].Name;
+ }
+ return Name;
+}
+
+std::string CodeGenSchedModels::createSchedClassName(const RecVec &InstDefs) {
+
+ std::string Name;
+ ListSeparator LS("_");
+ for (const Record *InstDef : InstDefs) {
+ Name += LS;
+ Name += InstDef->getName();
+ }
+ return Name;
+}
+
+/// Add an inferred sched class from an itinerary class and per-operand list of
+/// SchedWrites and SchedReads. ProcIndices contains the set of IDs of
+/// processors that may utilize this class.
+unsigned CodeGenSchedModels::addSchedClass(Record *ItinClassDef,
+ ArrayRef<unsigned> OperWrites,
+ ArrayRef<unsigned> OperReads,
+ ArrayRef<unsigned> ProcIndices) {
+ assert(!ProcIndices.empty() && "expect at least one ProcIdx");
+
+ auto IsKeyEqual = [=](const CodeGenSchedClass &SC) {
+ return SC.isKeyEqual(ItinClassDef, OperWrites, OperReads);
+ };
+
+ auto I = find_if(make_range(schedClassBegin(), schedClassEnd()), IsKeyEqual);
+ unsigned Idx = I == schedClassEnd() ? 0 : std::distance(schedClassBegin(), I);
+ if (Idx || SchedClasses[0].isKeyEqual(ItinClassDef, OperWrites, OperReads)) {
+ IdxVec PI;
+ std::set_union(SchedClasses[Idx].ProcIndices.begin(),
+ SchedClasses[Idx].ProcIndices.end(),
+ ProcIndices.begin(), ProcIndices.end(),
+ std::back_inserter(PI));
+ SchedClasses[Idx].ProcIndices = std::move(PI);
+ return Idx;
+ }
+ Idx = SchedClasses.size();
+ SchedClasses.emplace_back(Idx,
+ createSchedClassName(ItinClassDef, OperWrites,
+ OperReads),
+ ItinClassDef);
+ CodeGenSchedClass &SC = SchedClasses.back();
+ SC.Writes = OperWrites;
+ SC.Reads = OperReads;
+ SC.ProcIndices = ProcIndices;
+
+ return Idx;
+}
+
+// Create classes for each set of opcodes that are in the same InstReadWrite
+// definition across all processors.
+void CodeGenSchedModels::createInstRWClass(Record *InstRWDef) {
+ // ClassInstrs will hold an entry for each subset of Instrs in InstRWDef that
+ // intersects with an existing class via a previous InstRWDef. Instrs that do
+ // not intersect with an existing class refer back to their former class as
+ // determined from ItinDef or SchedRW.
+ SmallMapVector<unsigned, SmallVector<Record *, 8>, 4> ClassInstrs;
+ // Sort Instrs into sets.
+ const RecVec *InstDefs = Sets.expand(InstRWDef);
+ if (InstDefs->empty())
+ PrintFatalError(InstRWDef->getLoc(), "No matching instruction opcodes");
+
+ for (Record *InstDef : *InstDefs) {
+ InstClassMapTy::const_iterator Pos = InstrClassMap.find(InstDef);
+ if (Pos == InstrClassMap.end())
+ PrintFatalError(InstDef->getLoc(), "No sched class for instruction.");
+ unsigned SCIdx = Pos->second;
+ ClassInstrs[SCIdx].push_back(InstDef);
+ }
+ // For each set of Instrs, create a new class if necessary, and map or remap
+ // the Instrs to it.
+ for (auto &Entry : ClassInstrs) {
+ unsigned OldSCIdx = Entry.first;
+ ArrayRef<Record*> InstDefs = Entry.second;
+ // If the all instrs in the current class are accounted for, then leave
+ // them mapped to their old class.
+ if (OldSCIdx) {
+ const RecVec &RWDefs = SchedClasses[OldSCIdx].InstRWs;
+ if (!RWDefs.empty()) {
+ const RecVec *OrigInstDefs = Sets.expand(RWDefs[0]);
+ unsigned OrigNumInstrs =
+ count_if(*OrigInstDefs, [&](Record *OIDef) {
+ return InstrClassMap[OIDef] == OldSCIdx;
+ });
+ if (OrigNumInstrs == InstDefs.size()) {
+ assert(SchedClasses[OldSCIdx].ProcIndices[0] == 0 &&
+ "expected a generic SchedClass");
+ Record *RWModelDef = InstRWDef->getValueAsDef("SchedModel");
+ // Make sure we didn't already have a InstRW containing this
+ // instruction on this model.
+ for (Record *RWD : RWDefs) {
+ if (RWD->getValueAsDef("SchedModel") == RWModelDef &&
+ RWModelDef->getValueAsBit("FullInstRWOverlapCheck")) {
+ assert(!InstDefs.empty()); // Checked at function start.
+ PrintError(
+ InstRWDef->getLoc(),
+ "Overlapping InstRW definition for \"" +
+ InstDefs.front()->getName() +
+ "\" also matches previous \"" +
+ RWD->getValue("Instrs")->getValue()->getAsString() +
+ "\".");
+ PrintFatalNote(RWD->getLoc(), "Previous match was here.");
+ }
+ }
+ LLVM_DEBUG(dbgs() << "InstRW: Reuse SC " << OldSCIdx << ":"
+ << SchedClasses[OldSCIdx].Name << " on "
+ << RWModelDef->getName() << "\n");
+ SchedClasses[OldSCIdx].InstRWs.push_back(InstRWDef);
+ continue;
+ }
+ }
+ }
+ unsigned SCIdx = SchedClasses.size();
+ SchedClasses.emplace_back(SCIdx, createSchedClassName(InstDefs), nullptr);
+ CodeGenSchedClass &SC = SchedClasses.back();
+ LLVM_DEBUG(dbgs() << "InstRW: New SC " << SCIdx << ":" << SC.Name << " on "
+ << InstRWDef->getValueAsDef("SchedModel")->getName()
+ << "\n");
+
+ // Preserve ItinDef and Writes/Reads for processors without an InstRW entry.
+ SC.ItinClassDef = SchedClasses[OldSCIdx].ItinClassDef;
+ SC.Writes = SchedClasses[OldSCIdx].Writes;
+ SC.Reads = SchedClasses[OldSCIdx].Reads;
+ SC.ProcIndices.push_back(0);
+ // If we had an old class, copy it's InstRWs to this new class.
+ if (OldSCIdx) {
+ Record *RWModelDef = InstRWDef->getValueAsDef("SchedModel");
+ for (Record *OldRWDef : SchedClasses[OldSCIdx].InstRWs) {
+ if (OldRWDef->getValueAsDef("SchedModel") == RWModelDef) {
+ assert(!InstDefs.empty()); // Checked at function start.
+ PrintError(
+ InstRWDef->getLoc(),
+ "Overlapping InstRW definition for \"" +
+ InstDefs.front()->getName() + "\" also matches previous \"" +
+ OldRWDef->getValue("Instrs")->getValue()->getAsString() +
+ "\".");
+ PrintFatalNote(OldRWDef->getLoc(), "Previous match was here.");
+ }
+ assert(OldRWDef != InstRWDef &&
+ "SchedClass has duplicate InstRW def");
+ SC.InstRWs.push_back(OldRWDef);
+ }
+ }
+ // Map each Instr to this new class.
+ for (Record *InstDef : InstDefs)
+ InstrClassMap[InstDef] = SCIdx;
+ SC.InstRWs.push_back(InstRWDef);
+ }
+}
+
+// True if collectProcItins found anything.
+bool CodeGenSchedModels::hasItineraries() const {
+ for (const CodeGenProcModel &PM : make_range(procModelBegin(),procModelEnd()))
+ if (PM.hasItineraries())
+ return true;
+ return false;
+}
+
+// Gather the processor itineraries.
+void CodeGenSchedModels::collectProcItins() {
+ LLVM_DEBUG(dbgs() << "\n+++ PROBLEM ITINERARIES (collectProcItins) +++\n");
+ for (CodeGenProcModel &ProcModel : ProcModels) {
+ if (!ProcModel.hasItineraries())
+ continue;
+
+ RecVec ItinRecords = ProcModel.ItinsDef->getValueAsListOfDefs("IID");
+ assert(!ItinRecords.empty() && "ProcModel.hasItineraries is incorrect");
+
+ // Populate ItinDefList with Itinerary records.
+ ProcModel.ItinDefList.resize(NumInstrSchedClasses);
+
+ // Insert each itinerary data record in the correct position within
+ // the processor model's ItinDefList.
+ for (Record *ItinData : ItinRecords) {
+ const Record *ItinDef = ItinData->getValueAsDef("TheClass");
+ bool FoundClass = false;
+
+ for (const CodeGenSchedClass &SC :
+ make_range(schedClassBegin(), schedClassEnd())) {
+ // Multiple SchedClasses may share an itinerary. Update all of them.
+ if (SC.ItinClassDef == ItinDef) {
+ ProcModel.ItinDefList[SC.Index] = ItinData;
+ FoundClass = true;
+ }
+ }
+ if (!FoundClass) {
+ LLVM_DEBUG(dbgs() << ProcModel.ItinsDef->getName()
+ << " missing class for itinerary "
+ << ItinDef->getName() << '\n');
+ }
+ }
+ // Check for missing itinerary entries.
+ assert(!ProcModel.ItinDefList[0] && "NoItinerary class can't have rec");
+ LLVM_DEBUG(
+ for (unsigned i = 1, N = ProcModel.ItinDefList.size(); i < N; ++i) {
+ if (!ProcModel.ItinDefList[i])
+ dbgs() << ProcModel.ItinsDef->getName()
+ << " missing itinerary for class " << SchedClasses[i].Name
+ << '\n';
+ });
+ }
+}
+
+// Gather the read/write types for each itinerary class.
+void CodeGenSchedModels::collectProcItinRW() {
+ RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW");
+ llvm::sort(ItinRWDefs, LessRecord());
+ for (Record *RWDef : ItinRWDefs) {
+ if (!RWDef->getValueInit("SchedModel")->isComplete())
+ PrintFatalError(RWDef->getLoc(), "SchedModel is undefined");
+ Record *ModelDef = RWDef->getValueAsDef("SchedModel");
+ ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef);
+ if (I == ProcModelMap.end()) {
+ PrintFatalError(RWDef->getLoc(), "Undefined SchedMachineModel "
+ + ModelDef->getName());
+ }
+ ProcModels[I->second].ItinRWDefs.push_back(RWDef);
+ }
+}
+
+// Gather the unsupported features for processor models.
+void CodeGenSchedModels::collectProcUnsupportedFeatures() {
+ for (CodeGenProcModel &ProcModel : ProcModels)
+ append_range(
+ ProcModel.UnsupportedFeaturesDefs,
+ ProcModel.ModelDef->getValueAsListOfDefs("UnsupportedFeatures"));
+}
+
+/// Infer new classes from existing classes. In the process, this may create new
+/// SchedWrites from sequences of existing SchedWrites.
+void CodeGenSchedModels::inferSchedClasses() {
+ LLVM_DEBUG(
+ dbgs() << "\n+++ INFERRING SCHED CLASSES (inferSchedClasses) +++\n");
+ LLVM_DEBUG(dbgs() << NumInstrSchedClasses << " instr sched classes.\n");
+
+ // Visit all existing classes and newly created classes.
+ for (unsigned Idx = 0; Idx != SchedClasses.size(); ++Idx) {
+ assert(SchedClasses[Idx].Index == Idx && "bad SCIdx");
+
+ if (SchedClasses[Idx].ItinClassDef)
+ inferFromItinClass(SchedClasses[Idx].ItinClassDef, Idx);
+ if (!SchedClasses[Idx].InstRWs.empty())
+ inferFromInstRWs(Idx);
+ if (!SchedClasses[Idx].Writes.empty()) {
+ inferFromRW(SchedClasses[Idx].Writes, SchedClasses[Idx].Reads,
+ Idx, SchedClasses[Idx].ProcIndices);
+ }
+ assert(SchedClasses.size() < (NumInstrSchedClasses*6) &&
+ "too many SchedVariants");
+ }
+}
+
+/// Infer classes from per-processor itinerary resources.
+void CodeGenSchedModels::inferFromItinClass(Record *ItinClassDef,
+ unsigned FromClassIdx) {
+ for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) {
+ const CodeGenProcModel &PM = ProcModels[PIdx];
+ // For all ItinRW entries.
+ bool HasMatch = false;
+ for (const Record *Rec : PM.ItinRWDefs) {
+ RecVec Matched = Rec->getValueAsListOfDefs("MatchedItinClasses");
+ if (!llvm::is_contained(Matched, ItinClassDef))
+ continue;
+ if (HasMatch)
+ PrintFatalError(Rec->getLoc(), "Duplicate itinerary class "
+ + ItinClassDef->getName()
+ + " in ItinResources for " + PM.ModelName);
+ HasMatch = true;
+ IdxVec Writes, Reads;
+ findRWs(Rec->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads);
+ inferFromRW(Writes, Reads, FromClassIdx, PIdx);
+ }
+ }
+}
+
+/// Infer classes from per-processor InstReadWrite definitions.
+void CodeGenSchedModels::inferFromInstRWs(unsigned SCIdx) {
+ for (unsigned I = 0, E = SchedClasses[SCIdx].InstRWs.size(); I != E; ++I) {
+ assert(SchedClasses[SCIdx].InstRWs.size() == E && "InstrRWs was mutated!");
+ Record *Rec = SchedClasses[SCIdx].InstRWs[I];
+ const RecVec *InstDefs = Sets.expand(Rec);
+ RecIter II = InstDefs->begin(), IE = InstDefs->end();
+ for (; II != IE; ++II) {
+ if (InstrClassMap[*II] == SCIdx)
+ break;
+ }
+ // If this class no longer has any instructions mapped to it, it has become
+ // irrelevant.
+ if (II == IE)
+ continue;
+ IdxVec Writes, Reads;
+ findRWs(Rec->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads);
+ unsigned PIdx = getProcModel(Rec->getValueAsDef("SchedModel")).Index;
+ inferFromRW(Writes, Reads, SCIdx, PIdx); // May mutate SchedClasses.
+ SchedClasses[SCIdx].InstRWProcIndices.insert(PIdx);
+ }
+}
+
+namespace {
+
+// Helper for substituteVariantOperand.
+struct TransVariant {
+ Record *VarOrSeqDef; // Variant or sequence.
+ unsigned RWIdx; // Index of this variant or sequence's matched type.
+ unsigned ProcIdx; // Processor model index or zero for any.
+ unsigned TransVecIdx; // Index into PredTransitions::TransVec.
+
+ TransVariant(Record *def, unsigned rwi, unsigned pi, unsigned ti):
+ VarOrSeqDef(def), RWIdx(rwi), ProcIdx(pi), TransVecIdx(ti) {}
+};
+
+// Associate a predicate with the SchedReadWrite that it guards.
+// RWIdx is the index of the read/write variant.
+struct PredCheck {
+ bool IsRead;
+ unsigned RWIdx;
+ Record *Predicate;
+
+ PredCheck(bool r, unsigned w, Record *p): IsRead(r), RWIdx(w), Predicate(p) {}
+};
+
+// A Predicate transition is a list of RW sequences guarded by a PredTerm.
+struct PredTransition {
+ // A predicate term is a conjunction of PredChecks.
+ SmallVector<PredCheck, 4> PredTerm;
+ SmallVector<SmallVector<unsigned,4>, 16> WriteSequences;
+ SmallVector<SmallVector<unsigned,4>, 16> ReadSequences;
+ unsigned ProcIndex = 0;
+
+ PredTransition() = default;
+ PredTransition(ArrayRef<PredCheck> PT, unsigned ProcId) {
+ PredTerm.assign(PT.begin(), PT.end());
+ ProcIndex = ProcId;
+ }
+};
+
+// Encapsulate a set of partially constructed transitions.
+// The results are built by repeated calls to substituteVariants.
+class PredTransitions {
+ CodeGenSchedModels &SchedModels;
+
+public:
+ std::vector<PredTransition> TransVec;
+
+ PredTransitions(CodeGenSchedModels &sm): SchedModels(sm) {}
+
+ bool substituteVariantOperand(const SmallVectorImpl<unsigned> &RWSeq,
+ bool IsRead, unsigned StartIdx);
+
+ bool substituteVariants(const PredTransition &Trans);
+
+#ifndef NDEBUG
+ void dump() const;
+#endif
+
+private:
+ bool mutuallyExclusive(Record *PredDef, ArrayRef<Record *> Preds,
+ ArrayRef<PredCheck> Term);
+ void getIntersectingVariants(
+ const CodeGenSchedRW &SchedRW, unsigned TransIdx,
+ std::vector<TransVariant> &IntersectingVariants);
+ void pushVariant(const TransVariant &VInfo, bool IsRead);
+};
+
+} // end anonymous namespace
+
+// Return true if this predicate is mutually exclusive with a PredTerm. This
+// degenerates into checking if the predicate is mutually exclusive with any
+// predicate in the Term's conjunction.
+//
+// All predicates associated with a given SchedRW are considered mutually
+// exclusive. This should work even if the conditions expressed by the
+// predicates are not exclusive because the predicates for a given SchedWrite
+// are always checked in the order they are defined in the .td file. Later
+// conditions implicitly negate any prior condition.
+bool PredTransitions::mutuallyExclusive(Record *PredDef,
+ ArrayRef<Record *> Preds,
+ ArrayRef<PredCheck> Term) {
+ for (const PredCheck &PC: Term) {
+ if (PC.Predicate == PredDef)
+ return false;
+
+ const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(PC.RWIdx, PC.IsRead);
+ assert(SchedRW.HasVariants && "PredCheck must refer to a SchedVariant");
+ RecVec Variants = SchedRW.TheDef->getValueAsListOfDefs("Variants");
+ if (any_of(Variants, [PredDef](const Record *R) {
+ return R->getValueAsDef("Predicate") == PredDef;
+ })) {
+ // To check if PredDef is mutually exclusive with PC we also need to
+ // check that PC.Predicate is exclusive with all predicates from variant
+ // we're expanding. Consider following RW sequence with two variants
+ // (1 & 2), where A, B and C are predicates from corresponding SchedVars:
+ //
+ // 1:A/B - 2:C/B
+ //
+ // Here C is not mutually exclusive with variant (1), because A doesn't
+ // exist in variant (2). This means we have possible transitions from A
+ // to C and from A to B, and fully expanded sequence would look like:
+ //
+ // if (A & C) return ...;
+ // if (A & B) return ...;
+ // if (B) return ...;
+ //
+ // Now let's consider another sequence:
+ //
+ // 1:A/B - 2:A/B
+ //
+ // Here A in variant (2) is mutually exclusive with variant (1), because
+ // A also exists in (2). This means A->B transition is impossible and
+ // expanded sequence would look like:
+ //
+ // if (A) return ...;
+ // if (B) return ...;
+ if (!llvm::is_contained(Preds, PC.Predicate))
+ continue;
+ return true;
+ }
+ }
+ return false;
+}
+
+static std::vector<Record *> getAllPredicates(ArrayRef<TransVariant> Variants,
+ unsigned ProcId) {
+ std::vector<Record *> Preds;
+ for (auto &Variant : Variants) {
+ if (!Variant.VarOrSeqDef->isSubClassOf("SchedVar"))
+ continue;
+ Preds.push_back(Variant.VarOrSeqDef->getValueAsDef("Predicate"));
+ }
+ return Preds;
+}
+
+// Populate IntersectingVariants with any variants or aliased sequences of the
+// given SchedRW whose processor indices and predicates are not mutually
+// exclusive with the given transition.
+void PredTransitions::getIntersectingVariants(
+ const CodeGenSchedRW &SchedRW, unsigned TransIdx,
+ std::vector<TransVariant> &IntersectingVariants) {
+
+ bool GenericRW = false;
+
+ std::vector<TransVariant> Variants;
+ if (SchedRW.HasVariants) {
+ unsigned VarProcIdx = 0;
+ if (SchedRW.TheDef->getValueInit("SchedModel")->isComplete()) {
+ Record *ModelDef = SchedRW.TheDef->getValueAsDef("SchedModel");
+ VarProcIdx = SchedModels.getProcModel(ModelDef).Index;
+ }
+ if (VarProcIdx == 0 || VarProcIdx == TransVec[TransIdx].ProcIndex) {
+ // Push each variant. Assign TransVecIdx later.
+ const RecVec VarDefs = SchedRW.TheDef->getValueAsListOfDefs("Variants");
+ for (Record *VarDef : VarDefs)
+ Variants.emplace_back(VarDef, SchedRW.Index, VarProcIdx, 0);
+ if (VarProcIdx == 0)
+ GenericRW = true;
+ }
+ }
+ for (RecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end();
+ AI != AE; ++AI) {
+ // If either the SchedAlias itself or the SchedReadWrite that it aliases
+ // to is defined within a processor model, constrain all variants to
+ // that processor.
+ unsigned AliasProcIdx = 0;
+ if ((*AI)->getValueInit("SchedModel")->isComplete()) {
+ Record *ModelDef = (*AI)->getValueAsDef("SchedModel");
+ AliasProcIdx = SchedModels.getProcModel(ModelDef).Index;
+ }
+ if (AliasProcIdx && AliasProcIdx != TransVec[TransIdx].ProcIndex)
+ continue;
+ if (!Variants.empty()) {
+ const CodeGenProcModel &PM =
+ *(SchedModels.procModelBegin() + AliasProcIdx);
+ PrintFatalError((*AI)->getLoc(),
+ "Multiple variants defined for processor " +
+ PM.ModelName +
+ " Ensure only one SchedAlias exists per RW.");
+ }
+
+ const CodeGenSchedRW &AliasRW =
+ SchedModels.getSchedRW((*AI)->getValueAsDef("AliasRW"));
+
+ if (AliasRW.HasVariants) {
+ const RecVec VarDefs = AliasRW.TheDef->getValueAsListOfDefs("Variants");
+ for (Record *VD : VarDefs)
+ Variants.emplace_back(VD, AliasRW.Index, AliasProcIdx, 0);
+ }
+ if (AliasRW.IsSequence)
+ Variants.emplace_back(AliasRW.TheDef, SchedRW.Index, AliasProcIdx, 0);
+ if (AliasProcIdx == 0)
+ GenericRW = true;
+ }
+ std::vector<Record *> AllPreds =
+ getAllPredicates(Variants, TransVec[TransIdx].ProcIndex);
+ for (TransVariant &Variant : Variants) {
+ // Don't expand variants if the processor models don't intersect.
+ // A zero processor index means any processor.
+ if (Variant.VarOrSeqDef->isSubClassOf("SchedVar")) {
+ Record *PredDef = Variant.VarOrSeqDef->getValueAsDef("Predicate");
+ if (mutuallyExclusive(PredDef, AllPreds, TransVec[TransIdx].PredTerm))
+ continue;
+ }
+
+ if (IntersectingVariants.empty()) {
+ // The first variant builds on the existing transition.
+ Variant.TransVecIdx = TransIdx;
+ IntersectingVariants.push_back(Variant);
+ }
+ else {
+ // Push another copy of the current transition for more variants.
+ Variant.TransVecIdx = TransVec.size();
+ IntersectingVariants.push_back(Variant);
+ TransVec.push_back(TransVec[TransIdx]);
+ }
+ }
+ if (GenericRW && IntersectingVariants.empty()) {
+ PrintFatalError(SchedRW.TheDef->getLoc(), "No variant of this type has "
+ "a matching predicate on any processor");
+ }
+}
+
+// Push the Reads/Writes selected by this variant onto the PredTransition
+// specified by VInfo.
+void PredTransitions::
+pushVariant(const TransVariant &VInfo, bool IsRead) {
+ PredTransition &Trans = TransVec[VInfo.TransVecIdx];
+
+ // If this operand transition is reached through a processor-specific alias,
+ // then the whole transition is specific to this processor.
+ IdxVec SelectedRWs;
+ if (VInfo.VarOrSeqDef->isSubClassOf("SchedVar")) {
+ Record *PredDef = VInfo.VarOrSeqDef->getValueAsDef("Predicate");
+ Trans.PredTerm.emplace_back(IsRead, VInfo.RWIdx,PredDef);
+ RecVec SelectedDefs = VInfo.VarOrSeqDef->getValueAsListOfDefs("Selected");
+ SchedModels.findRWs(SelectedDefs, SelectedRWs, IsRead);
+ }
+ else {
+ assert(VInfo.VarOrSeqDef->isSubClassOf("WriteSequence") &&
+ "variant must be a SchedVariant or aliased WriteSequence");
+ SelectedRWs.push_back(SchedModels.getSchedRWIdx(VInfo.VarOrSeqDef, IsRead));
+ }
+
+ const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(VInfo.RWIdx, IsRead);
+
+ SmallVectorImpl<SmallVector<unsigned,4>> &RWSequences = IsRead
+ ? Trans.ReadSequences : Trans.WriteSequences;
+ if (SchedRW.IsVariadic) {
+ unsigned OperIdx = RWSequences.size()-1;
+ // Make N-1 copies of this transition's last sequence.
+ RWSequences.reserve(RWSequences.size() + SelectedRWs.size() - 1);
+ RWSequences.insert(RWSequences.end(), SelectedRWs.size() - 1,
+ RWSequences[OperIdx]);
+ // Push each of the N elements of the SelectedRWs onto a copy of the last
+ // sequence (split the current operand into N operands).
+ // Note that write sequences should be expanded within this loop--the entire
+ // sequence belongs to a single operand.
+ for (IdxIter RWI = SelectedRWs.begin(), RWE = SelectedRWs.end();
+ RWI != RWE; ++RWI, ++OperIdx) {
+ IdxVec ExpandedRWs;
+ if (IsRead)
+ ExpandedRWs.push_back(*RWI);
+ else
+ SchedModels.expandRWSequence(*RWI, ExpandedRWs, IsRead);
+ llvm::append_range(RWSequences[OperIdx], ExpandedRWs);
+ }
+ assert(OperIdx == RWSequences.size() && "missed a sequence");
+ }
+ else {
+ // Push this transition's expanded sequence onto this transition's last
+ // sequence (add to the current operand's sequence).
+ SmallVectorImpl<unsigned> &Seq = RWSequences.back();
+ IdxVec ExpandedRWs;
+ for (unsigned int SelectedRW : SelectedRWs) {
+ if (IsRead)
+ ExpandedRWs.push_back(SelectedRW);
+ else
+ SchedModels.expandRWSequence(SelectedRW, ExpandedRWs, IsRead);
+ }
+ llvm::append_range(Seq, ExpandedRWs);
+ }
+}
+
+// RWSeq is a sequence of all Reads or all Writes for the next read or write
+// operand. StartIdx is an index into TransVec where partial results
+// starts. RWSeq must be applied to all transitions between StartIdx and the end
+// of TransVec.
+bool PredTransitions::substituteVariantOperand(
+ const SmallVectorImpl<unsigned> &RWSeq, bool IsRead, unsigned StartIdx) {
+ bool Subst = false;
+ // Visit each original RW within the current sequence.
+ for (unsigned int RWI : RWSeq) {
+ const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(RWI, IsRead);
+ // Push this RW on all partial PredTransitions or distribute variants.
+ // New PredTransitions may be pushed within this loop which should not be
+ // revisited (TransEnd must be loop invariant).
+ for (unsigned TransIdx = StartIdx, TransEnd = TransVec.size();
+ TransIdx != TransEnd; ++TransIdx) {
+ // Distribute this partial PredTransition across intersecting variants.
+ // This will push a copies of TransVec[TransIdx] on the back of TransVec.
+ std::vector<TransVariant> IntersectingVariants;
+ getIntersectingVariants(SchedRW, TransIdx, IntersectingVariants);
+ // Now expand each variant on top of its copy of the transition.
+ for (const TransVariant &IV : IntersectingVariants)
+ pushVariant(IV, IsRead);
+ if (IntersectingVariants.empty()) {
+ if (IsRead)
+ TransVec[TransIdx].ReadSequences.back().push_back(RWI);
+ else
+ TransVec[TransIdx].WriteSequences.back().push_back(RWI);
+ continue;
+ } else {
+ Subst = true;
+ }
+ }
+ }
+ return Subst;
+}
+
+// For each variant of a Read/Write in Trans, substitute the sequence of
+// Read/Writes guarded by the variant. This is exponential in the number of
+// variant Read/Writes, but in practice detection of mutually exclusive
+// predicates should result in linear growth in the total number variants.
+//
+// This is one step in a breadth-first search of nested variants.
+bool PredTransitions::substituteVariants(const PredTransition &Trans) {
+ // Build up a set of partial results starting at the back of
+ // PredTransitions. Remember the first new transition.
+ unsigned StartIdx = TransVec.size();
+ bool Subst = false;
+ assert(Trans.ProcIndex != 0);
+ TransVec.emplace_back(Trans.PredTerm, Trans.ProcIndex);
+
+ // Visit each original write sequence.
+ for (const auto &WriteSequence : Trans.WriteSequences) {
+ // Push a new (empty) write sequence onto all partial Transitions.
+ for (std::vector<PredTransition>::iterator I =
+ TransVec.begin() + StartIdx, E = TransVec.end(); I != E; ++I) {
+ I->WriteSequences.emplace_back();
+ }
+ Subst |=
+ substituteVariantOperand(WriteSequence, /*IsRead=*/false, StartIdx);
+ }
+ // Visit each original read sequence.
+ for (const auto &ReadSequence : Trans.ReadSequences) {
+ // Push a new (empty) read sequence onto all partial Transitions.
+ for (std::vector<PredTransition>::iterator I =
+ TransVec.begin() + StartIdx, E = TransVec.end(); I != E; ++I) {
+ I->ReadSequences.emplace_back();
+ }
+ Subst |= substituteVariantOperand(ReadSequence, /*IsRead=*/true, StartIdx);
+ }
+ return Subst;
+}
+
+static void addSequences(CodeGenSchedModels &SchedModels,
+ const SmallVectorImpl<SmallVector<unsigned, 4>> &Seqs,
+ IdxVec &Result, bool IsRead) {
+ for (const auto &S : Seqs)
+ if (!S.empty())
+ Result.push_back(SchedModels.findOrInsertRW(S, IsRead));
+}
+
+#ifndef NDEBUG
+static void dumpRecVec(const RecVec &RV) {
+ for (const Record *R : RV)
+ dbgs() << R->getName() << ", ";
+}
+#endif
+
+static void dumpTransition(const CodeGenSchedModels &SchedModels,
+ const CodeGenSchedClass &FromSC,
+ const CodeGenSchedTransition &SCTrans,
+ const RecVec &Preds) {
+ LLVM_DEBUG(dbgs() << "Adding transition from " << FromSC.Name << "("
+ << FromSC.Index << ") to "
+ << SchedModels.getSchedClass(SCTrans.ToClassIdx).Name << "("
+ << SCTrans.ToClassIdx << ") on pred term: (";
+ dumpRecVec(Preds);
+ dbgs() << ") on processor (" << SCTrans.ProcIndex << ")\n");
+}
+// Create a new SchedClass for each variant found by inferFromRW. Pass
+static void inferFromTransitions(ArrayRef<PredTransition> LastTransitions,
+ unsigned FromClassIdx,
+ CodeGenSchedModels &SchedModels) {
+ // For each PredTransition, create a new CodeGenSchedTransition, which usually
+ // requires creating a new SchedClass.
+ for (const auto &LastTransition : LastTransitions) {
+ // Variant expansion (substituteVariants) may create unconditional
+ // transitions. We don't need to build sched classes for them.
+ if (LastTransition.PredTerm.empty())
+ continue;
+ IdxVec OperWritesVariant, OperReadsVariant;
+ addSequences(SchedModels, LastTransition.WriteSequences, OperWritesVariant,
+ false);
+ addSequences(SchedModels, LastTransition.ReadSequences, OperReadsVariant,
+ true);
+ CodeGenSchedTransition SCTrans;
+
+ // Transition should not contain processor indices already assigned to
+ // InstRWs in this scheduling class.
+ const CodeGenSchedClass &FromSC = SchedModels.getSchedClass(FromClassIdx);
+ if (FromSC.InstRWProcIndices.count(LastTransition.ProcIndex))
+ continue;
+ SCTrans.ProcIndex = LastTransition.ProcIndex;
+ SCTrans.ToClassIdx =
+ SchedModels.addSchedClass(/*ItinClassDef=*/nullptr, OperWritesVariant,
+ OperReadsVariant, LastTransition.ProcIndex);
+
+ // The final PredTerm is unique set of predicates guarding the transition.
+ RecVec Preds;
+ transform(LastTransition.PredTerm, std::back_inserter(Preds),
+ [](const PredCheck &P) { return P.Predicate; });
+ Preds.erase(std::unique(Preds.begin(), Preds.end()), Preds.end());
+ dumpTransition(SchedModels, FromSC, SCTrans, Preds);
+ SCTrans.PredTerm = std::move(Preds);
+ SchedModels.getSchedClass(FromClassIdx)
+ .Transitions.push_back(std::move(SCTrans));
+ }
+}
+
+std::vector<unsigned> CodeGenSchedModels::getAllProcIndices() const {
+ std::vector<unsigned> ProcIdVec;
+ for (const auto &PM : ProcModelMap)
+ if (PM.second != 0)
+ ProcIdVec.push_back(PM.second);
+ // The order of the keys (Record pointers) of ProcModelMap are not stable.
+ // Sort to stabalize the values.
+ llvm::sort(ProcIdVec);
+ return ProcIdVec;
+}
+
+static std::vector<PredTransition>
+makePerProcessorTransitions(const PredTransition &Trans,
+ ArrayRef<unsigned> ProcIndices) {
+ std::vector<PredTransition> PerCpuTransVec;
+ for (unsigned ProcId : ProcIndices) {
+ assert(ProcId != 0);
+ PerCpuTransVec.push_back(Trans);
+ PerCpuTransVec.back().ProcIndex = ProcId;
+ }
+ return PerCpuTransVec;
+}
+
+// Create new SchedClasses for the given ReadWrite list. If any of the
+// ReadWrites refers to a SchedVariant, create a new SchedClass for each variant
+// of the ReadWrite list, following Aliases if necessary.
+void CodeGenSchedModels::inferFromRW(ArrayRef<unsigned> OperWrites,
+ ArrayRef<unsigned> OperReads,
+ unsigned FromClassIdx,
+ ArrayRef<unsigned> ProcIndices) {
+ LLVM_DEBUG(dbgs() << "INFER RW proc("; dumpIdxVec(ProcIndices);
+ dbgs() << ") ");
+ // Create a seed transition with an empty PredTerm and the expanded sequences
+ // of SchedWrites for the current SchedClass.
+ std::vector<PredTransition> LastTransitions;
+ LastTransitions.emplace_back();
+
+ for (unsigned WriteIdx : OperWrites) {
+ IdxVec WriteSeq;
+ expandRWSequence(WriteIdx, WriteSeq, /*IsRead=*/false);
+ LastTransitions[0].WriteSequences.emplace_back();
+ SmallVectorImpl<unsigned> &Seq = LastTransitions[0].WriteSequences.back();
+ Seq.append(WriteSeq.begin(), WriteSeq.end());
+ LLVM_DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") ");
+ }
+ LLVM_DEBUG(dbgs() << " Reads: ");
+ for (unsigned ReadIdx : OperReads) {
+ IdxVec ReadSeq;
+ expandRWSequence(ReadIdx, ReadSeq, /*IsRead=*/true);
+ LastTransitions[0].ReadSequences.emplace_back();
+ SmallVectorImpl<unsigned> &Seq = LastTransitions[0].ReadSequences.back();
+ Seq.append(ReadSeq.begin(), ReadSeq.end());
+ LLVM_DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") ");
+ }
+ LLVM_DEBUG(dbgs() << '\n');
+
+ LastTransitions = makePerProcessorTransitions(
+ LastTransitions[0], llvm::is_contained(ProcIndices, 0)
+ ? ArrayRef<unsigned>(getAllProcIndices())
+ : ProcIndices);
+ // Collect all PredTransitions for individual operands.
+ // Iterate until no variant writes remain.
+ bool SubstitutedAny;
+ do {
+ SubstitutedAny = false;
+ PredTransitions Transitions(*this);
+ for (const PredTransition &Trans : LastTransitions)
+ SubstitutedAny |= Transitions.substituteVariants(Trans);
+ LLVM_DEBUG(Transitions.dump());
+ LastTransitions.swap(Transitions.TransVec);
+ } while (SubstitutedAny);
+
+ // WARNING: We are about to mutate the SchedClasses vector. Do not refer to
+ // OperWrites, OperReads, or ProcIndices after calling inferFromTransitions.
+ inferFromTransitions(LastTransitions, FromClassIdx, *this);
+}
+
+// Check if any processor resource group contains all resource records in
+// SubUnits.
+bool CodeGenSchedModels::hasSuperGroup(RecVec &SubUnits, CodeGenProcModel &PM) {
+ for (Record *ProcResourceDef : PM.ProcResourceDefs) {
+ if (!ProcResourceDef->isSubClassOf("ProcResGroup"))
+ continue;
+ RecVec SuperUnits = ProcResourceDef->getValueAsListOfDefs("Resources");
+ RecIter RI = SubUnits.begin(), RE = SubUnits.end();
+ for ( ; RI != RE; ++RI) {
+ if (!is_contained(SuperUnits, *RI)) {
+ break;
+ }
+ }
+ if (RI == RE)
+ return true;
+ }
+ return false;
+}
+
+// Verify that overlapping groups have a common supergroup.
+void CodeGenSchedModels::verifyProcResourceGroups(CodeGenProcModel &PM) {
+ for (unsigned i = 0, e = PM.ProcResourceDefs.size(); i < e; ++i) {
+ if (!PM.ProcResourceDefs[i]->isSubClassOf("ProcResGroup"))
+ continue;
+ RecVec CheckUnits =
+ PM.ProcResourceDefs[i]->getValueAsListOfDefs("Resources");
+ for (unsigned j = i+1; j < e; ++j) {
+ if (!PM.ProcResourceDefs[j]->isSubClassOf("ProcResGroup"))
+ continue;
+ RecVec OtherUnits =
+ PM.ProcResourceDefs[j]->getValueAsListOfDefs("Resources");
+ if (std::find_first_of(CheckUnits.begin(), CheckUnits.end(),
+ OtherUnits.begin(), OtherUnits.end())
+ != CheckUnits.end()) {
+ // CheckUnits and OtherUnits overlap
+ llvm::append_range(OtherUnits, CheckUnits);
+ if (!hasSuperGroup(OtherUnits, PM)) {
+ PrintFatalError((PM.ProcResourceDefs[i])->getLoc(),
+ "proc resource group overlaps with "
+ + PM.ProcResourceDefs[j]->getName()
+ + " but no supergroup contains both.");
+ }
+ }
+ }
+ }
+}
+
+// Collect all the RegisterFile definitions available in this target.
+void CodeGenSchedModels::collectRegisterFiles() {
+ RecVec RegisterFileDefs = Records.getAllDerivedDefinitions("RegisterFile");
+
+ // RegisterFiles is the vector of CodeGenRegisterFile.
+ for (Record *RF : RegisterFileDefs) {
+ // For each register file definition, construct a CodeGenRegisterFile object
+ // and add it to the appropriate scheduling model.
+ CodeGenProcModel &PM = getProcModel(RF->getValueAsDef("SchedModel"));
+ PM.RegisterFiles.emplace_back(CodeGenRegisterFile(RF->getName(),RF));
+ CodeGenRegisterFile &CGRF = PM.RegisterFiles.back();
+ CGRF.MaxMovesEliminatedPerCycle =
+ RF->getValueAsInt("MaxMovesEliminatedPerCycle");
+ CGRF.AllowZeroMoveEliminationOnly =
+ RF->getValueAsBit("AllowZeroMoveEliminationOnly");
+
+ // Now set the number of physical registers as well as the cost of registers
+ // in each register class.
+ CGRF.NumPhysRegs = RF->getValueAsInt("NumPhysRegs");
+ if (!CGRF.NumPhysRegs) {
+ PrintFatalError(RF->getLoc(),
+ "Invalid RegisterFile with zero physical registers");
+ }
+
+ RecVec RegisterClasses = RF->getValueAsListOfDefs("RegClasses");
+ std::vector<int64_t> RegisterCosts = RF->getValueAsListOfInts("RegCosts");
+ ListInit *MoveElimInfo = RF->getValueAsListInit("AllowMoveElimination");
+ for (unsigned I = 0, E = RegisterClasses.size(); I < E; ++I) {
+ int Cost = RegisterCosts.size() > I ? RegisterCosts[I] : 1;
+
+ bool AllowMoveElim = false;
+ if (MoveElimInfo->size() > I) {
+ BitInit *Val = cast<BitInit>(MoveElimInfo->getElement(I));
+ AllowMoveElim = Val->getValue();
+ }
+
+ CGRF.Costs.emplace_back(RegisterClasses[I], Cost, AllowMoveElim);
+ }
+ }
+}
+
+// Collect and sort WriteRes, ReadAdvance, and ProcResources.
+void CodeGenSchedModels::collectProcResources() {
+ ProcResourceDefs = Records.getAllDerivedDefinitions("ProcResourceUnits");
+ ProcResGroups = Records.getAllDerivedDefinitions("ProcResGroup");
+
+ // Add any subtarget-specific SchedReadWrites that are directly associated
+ // with processor resources. Refer to the parent SchedClass's ProcIndices to
+ // determine which processors they apply to.
+ for (const CodeGenSchedClass &SC :
+ make_range(schedClassBegin(), schedClassEnd())) {
+ if (SC.ItinClassDef) {
+ collectItinProcResources(SC.ItinClassDef);
+ continue;
+ }
+
+ // This class may have a default ReadWrite list which can be overriden by
+ // InstRW definitions.
+ for (Record *RW : SC.InstRWs) {
+ Record *RWModelDef = RW->getValueAsDef("SchedModel");
+ unsigned PIdx = getProcModel(RWModelDef).Index;
+ IdxVec Writes, Reads;
+ findRWs(RW->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads);
+ collectRWResources(Writes, Reads, PIdx);
+ }
+
+ collectRWResources(SC.Writes, SC.Reads, SC.ProcIndices);
+ }
+ // Add resources separately defined by each subtarget.
+ RecVec WRDefs = Records.getAllDerivedDefinitions("WriteRes");
+ for (Record *WR : WRDefs) {
+ Record *ModelDef = WR->getValueAsDef("SchedModel");
+ addWriteRes(WR, getProcModel(ModelDef).Index);
+ }
+ RecVec SWRDefs = Records.getAllDerivedDefinitions("SchedWriteRes");
+ for (Record *SWR : SWRDefs) {
+ Record *ModelDef = SWR->getValueAsDef("SchedModel");
+ addWriteRes(SWR, getProcModel(ModelDef).Index);
+ }
+ RecVec RADefs = Records.getAllDerivedDefinitions("ReadAdvance");
+ for (Record *RA : RADefs) {
+ Record *ModelDef = RA->getValueAsDef("SchedModel");
+ addReadAdvance(RA, getProcModel(ModelDef).Index);
+ }
+ RecVec SRADefs = Records.getAllDerivedDefinitions("SchedReadAdvance");
+ for (Record *SRA : SRADefs) {
+ if (SRA->getValueInit("SchedModel")->isComplete()) {
+ Record *ModelDef = SRA->getValueAsDef("SchedModel");
+ addReadAdvance(SRA, getProcModel(ModelDef).Index);
+ }
+ }
+ // Add ProcResGroups that are defined within this processor model, which may
+ // not be directly referenced but may directly specify a buffer size.
+ RecVec ProcResGroups = Records.getAllDerivedDefinitions("ProcResGroup");
+ for (Record *PRG : ProcResGroups) {
+ if (!PRG->getValueInit("SchedModel")->isComplete())
+ continue;
+ CodeGenProcModel &PM = getProcModel(PRG->getValueAsDef("SchedModel"));
+ if (!is_contained(PM.ProcResourceDefs, PRG))
+ PM.ProcResourceDefs.push_back(PRG);
+ }
+ // Add ProcResourceUnits unconditionally.
+ for (Record *PRU : Records.getAllDerivedDefinitions("ProcResourceUnits")) {
+ if (!PRU->getValueInit("SchedModel")->isComplete())
+ continue;
+ CodeGenProcModel &PM = getProcModel(PRU->getValueAsDef("SchedModel"));
+ if (!is_contained(PM.ProcResourceDefs, PRU))
+ PM.ProcResourceDefs.push_back(PRU);
+ }
+ // Finalize each ProcModel by sorting the record arrays.
+ for (CodeGenProcModel &PM : ProcModels) {
+ llvm::sort(PM.WriteResDefs, LessRecord());
+ llvm::sort(PM.ReadAdvanceDefs, LessRecord());
+ llvm::sort(PM.ProcResourceDefs, LessRecord());
+ LLVM_DEBUG(
+ PM.dump(); dbgs() << "WriteResDefs: "; for (auto WriteResDef
+ : PM.WriteResDefs) {
+ if (WriteResDef->isSubClassOf("WriteRes"))
+ dbgs() << WriteResDef->getValueAsDef("WriteType")->getName() << " ";
+ else
+ dbgs() << WriteResDef->getName() << " ";
+ } dbgs() << "\nReadAdvanceDefs: ";
+ for (Record *ReadAdvanceDef
+ : PM.ReadAdvanceDefs) {
+ if (ReadAdvanceDef->isSubClassOf("ReadAdvance"))
+ dbgs() << ReadAdvanceDef->getValueAsDef("ReadType")->getName()
+ << " ";
+ else
+ dbgs() << ReadAdvanceDef->getName() << " ";
+ } dbgs()
+ << "\nProcResourceDefs: ";
+ for (Record *ProcResourceDef
+ : PM.ProcResourceDefs) {
+ dbgs() << ProcResourceDef->getName() << " ";
+ } dbgs()
+ << '\n');
+ verifyProcResourceGroups(PM);
+ }
+
+ ProcResourceDefs.clear();
+ ProcResGroups.clear();
+}
+
+void CodeGenSchedModels::checkCompleteness() {
+ bool Complete = true;
+ for (const CodeGenProcModel &ProcModel : procModels()) {
+ const bool HasItineraries = ProcModel.hasItineraries();
+ if (!ProcModel.ModelDef->getValueAsBit("CompleteModel"))
+ continue;
+ for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) {
+ if (Inst->hasNoSchedulingInfo)
+ continue;
+ if (ProcModel.isUnsupported(*Inst))
+ continue;
+ unsigned SCIdx = getSchedClassIdx(*Inst);
+ if (!SCIdx) {
+ if (Inst->TheDef->isValueUnset("SchedRW")) {
+ PrintError(Inst->TheDef->getLoc(),
+ "No schedule information for instruction '" +
+ Inst->TheDef->getName() + "' in SchedMachineModel '" +
+ ProcModel.ModelDef->getName() + "'");
+ Complete = false;
+ }
+ continue;
+ }
+
+ const CodeGenSchedClass &SC = getSchedClass(SCIdx);
+ if (!SC.Writes.empty())
+ continue;
+ if (HasItineraries && SC.ItinClassDef != nullptr &&
+ SC.ItinClassDef->getName() != "NoItinerary")
+ continue;
+
+ const RecVec &InstRWs = SC.InstRWs;
+ auto I = find_if(InstRWs, [&ProcModel](const Record *R) {
+ return R->getValueAsDef("SchedModel") == ProcModel.ModelDef;
+ });
+ if (I == InstRWs.end()) {
+ PrintError(Inst->TheDef->getLoc(), "'" + ProcModel.ModelName +
+ "' lacks information for '" +
+ Inst->TheDef->getName() + "'");
+ Complete = false;
+ }
+ }
+ }
+ if (!Complete) {
+ errs() << "\n\nIncomplete schedule models found.\n"
+ << "- Consider setting 'CompleteModel = 0' while developing new models.\n"
+ << "- Pseudo instructions can be marked with 'hasNoSchedulingInfo = 1'.\n"
+ << "- Instructions should usually have Sched<[...]> as a superclass, "
+ "you may temporarily use an empty list.\n"
+ << "- Instructions related to unsupported features can be excluded with "
+ "list<Predicate> UnsupportedFeatures = [HasA,..,HasY]; in the "
+ "processor model.\n\n";
+ PrintFatalError("Incomplete schedule model");
+ }
+}
+
+// Collect itinerary class resources for each processor.
+void CodeGenSchedModels::collectItinProcResources(Record *ItinClassDef) {
+ for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) {
+ const CodeGenProcModel &PM = ProcModels[PIdx];
+ // For all ItinRW entries.
+ bool HasMatch = false;
+ for (RecIter II = PM.ItinRWDefs.begin(), IE = PM.ItinRWDefs.end();
+ II != IE; ++II) {
+ RecVec Matched = (*II)->getValueAsListOfDefs("MatchedItinClasses");
+ if (!llvm::is_contained(Matched, ItinClassDef))
+ continue;
+ if (HasMatch)
+ PrintFatalError((*II)->getLoc(), "Duplicate itinerary class "
+ + ItinClassDef->getName()
+ + " in ItinResources for " + PM.ModelName);
+ HasMatch = true;
+ IdxVec Writes, Reads;
+ findRWs((*II)->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads);
+ collectRWResources(Writes, Reads, PIdx);
+ }
+ }
+}
+
+void CodeGenSchedModels::collectRWResources(unsigned RWIdx, bool IsRead,
+ ArrayRef<unsigned> ProcIndices) {
+ const CodeGenSchedRW &SchedRW = getSchedRW(RWIdx, IsRead);
+ if (SchedRW.TheDef) {
+ if (!IsRead && SchedRW.TheDef->isSubClassOf("SchedWriteRes")) {
+ for (unsigned Idx : ProcIndices)
+ addWriteRes(SchedRW.TheDef, Idx);
+ }
+ else if (IsRead && SchedRW.TheDef->isSubClassOf("SchedReadAdvance")) {
+ for (unsigned Idx : ProcIndices)
+ addReadAdvance(SchedRW.TheDef, Idx);
+ }
+ }
+ for (auto *Alias : SchedRW.Aliases) {
+ IdxVec AliasProcIndices;
+ if (Alias->getValueInit("SchedModel")->isComplete()) {
+ AliasProcIndices.push_back(
+ getProcModel(Alias->getValueAsDef("SchedModel")).Index);
+ } else
+ AliasProcIndices = ProcIndices;
+ const CodeGenSchedRW &AliasRW = getSchedRW(Alias->getValueAsDef("AliasRW"));
+ assert(AliasRW.IsRead == IsRead && "cannot alias reads to writes");
+
+ IdxVec ExpandedRWs;
+ expandRWSequence(AliasRW.Index, ExpandedRWs, IsRead);
+ for (unsigned int ExpandedRW : ExpandedRWs) {
+ collectRWResources(ExpandedRW, IsRead, AliasProcIndices);
+ }
+ }
+}
+
+// Collect resources for a set of read/write types and processor indices.
+void CodeGenSchedModels::collectRWResources(ArrayRef<unsigned> Writes,
+ ArrayRef<unsigned> Reads,
+ ArrayRef<unsigned> ProcIndices) {
+ for (unsigned Idx : Writes)
+ collectRWResources(Idx, /*IsRead=*/false, ProcIndices);
+
+ for (unsigned Idx : Reads)
+ collectRWResources(Idx, /*IsRead=*/true, ProcIndices);
+}
+
+// Find the processor's resource units for this kind of resource.
+Record *CodeGenSchedModels::findProcResUnits(Record *ProcResKind,
+ const CodeGenProcModel &PM,
+ ArrayRef<SMLoc> Loc) const {
+ if (ProcResKind->isSubClassOf("ProcResourceUnits"))
+ return ProcResKind;
+
+ Record *ProcUnitDef = nullptr;
+ assert(!ProcResourceDefs.empty());
+ assert(!ProcResGroups.empty());
+
+ for (Record *ProcResDef : ProcResourceDefs) {
+ if (ProcResDef->getValueAsDef("Kind") == ProcResKind
+ && ProcResDef->getValueAsDef("SchedModel") == PM.ModelDef) {
+ if (ProcUnitDef) {
+ PrintFatalError(Loc,
+ "Multiple ProcessorResourceUnits associated with "
+ + ProcResKind->getName());
+ }
+ ProcUnitDef = ProcResDef;
+ }
+ }
+ for (Record *ProcResGroup : ProcResGroups) {
+ if (ProcResGroup == ProcResKind
+ && ProcResGroup->getValueAsDef("SchedModel") == PM.ModelDef) {
+ if (ProcUnitDef) {
+ PrintFatalError(Loc,
+ "Multiple ProcessorResourceUnits associated with "
+ + ProcResKind->getName());
+ }
+ ProcUnitDef = ProcResGroup;
+ }
+ }
+ if (!ProcUnitDef) {
+ PrintFatalError(Loc,
+ "No ProcessorResources associated with "
+ + ProcResKind->getName());
+ }
+ return ProcUnitDef;
+}
+
+// Iteratively add a resource and its super resources.
+void CodeGenSchedModels::addProcResource(Record *ProcResKind,
+ CodeGenProcModel &PM,
+ ArrayRef<SMLoc> Loc) {
+ while (true) {
+ Record *ProcResUnits = findProcResUnits(ProcResKind, PM, Loc);
+
+ // See if this ProcResource is already associated with this processor.
+ if (is_contained(PM.ProcResourceDefs, ProcResUnits))
+ return;
+
+ PM.ProcResourceDefs.push_back(ProcResUnits);
+ if (ProcResUnits->isSubClassOf("ProcResGroup"))
+ return;
+
+ if (!ProcResUnits->getValueInit("Super")->isComplete())
+ return;
+
+ ProcResKind = ProcResUnits->getValueAsDef("Super");
+ }
+}
+
+// Add resources for a SchedWrite to this processor if they don't exist.
+void CodeGenSchedModels::addWriteRes(Record *ProcWriteResDef, unsigned PIdx) {
+ assert(PIdx && "don't add resources to an invalid Processor model");
+
+ RecVec &WRDefs = ProcModels[PIdx].WriteResDefs;
+ if (is_contained(WRDefs, ProcWriteResDef))
+ return;
+ WRDefs.push_back(ProcWriteResDef);
+
+ // Visit ProcResourceKinds referenced by the newly discovered WriteRes.
+ RecVec ProcResDefs = ProcWriteResDef->getValueAsListOfDefs("ProcResources");
+ for (auto *ProcResDef : ProcResDefs) {
+ addProcResource(ProcResDef, ProcModels[PIdx], ProcWriteResDef->getLoc());
+ }
+}
+
+// Add resources for a ReadAdvance to this processor if they don't exist.
+void CodeGenSchedModels::addReadAdvance(Record *ProcReadAdvanceDef,
+ unsigned PIdx) {
+ RecVec &RADefs = ProcModels[PIdx].ReadAdvanceDefs;
+ if (is_contained(RADefs, ProcReadAdvanceDef))
+ return;
+ RADefs.push_back(ProcReadAdvanceDef);
+}
+
+unsigned CodeGenProcModel::getProcResourceIdx(Record *PRDef) const {
+ RecIter PRPos = find(ProcResourceDefs, PRDef);
+ if (PRPos == ProcResourceDefs.end())
+ PrintFatalError(PRDef->getLoc(), "ProcResource def is not included in "
+ "the ProcResources list for " + ModelName);
+ // Idx=0 is reserved for invalid.
+ return 1 + (PRPos - ProcResourceDefs.begin());
+}
+
+bool CodeGenProcModel::isUnsupported(const CodeGenInstruction &Inst) const {
+ for (const Record *TheDef : UnsupportedFeaturesDefs) {
+ for (const Record *PredDef : Inst.TheDef->getValueAsListOfDefs("Predicates")) {
+ if (TheDef->getName() == PredDef->getName())
+ return true;
+ }
+ }
+ return false;
+}
+
+#ifndef NDEBUG
+void CodeGenProcModel::dump() const {
+ dbgs() << Index << ": " << ModelName << " "
+ << (ModelDef ? ModelDef->getName() : "inferred") << " "
+ << (ItinsDef ? ItinsDef->getName() : "no itinerary") << '\n';
+}
+
+void CodeGenSchedRW::dump() const {
+ dbgs() << Name << (IsVariadic ? " (V) " : " ");
+ if (IsSequence) {
+ dbgs() << "(";
+ dumpIdxVec(Sequence);
+ dbgs() << ")";
+ }
+}
+
+void CodeGenSchedClass::dump(const CodeGenSchedModels* SchedModels) const {
+ dbgs() << "SCHEDCLASS " << Index << ":" << Name << '\n'
+ << " Writes: ";
+ for (unsigned i = 0, N = Writes.size(); i < N; ++i) {
+ SchedModels->getSchedWrite(Writes[i]).dump();
+ if (i < N-1) {
+ dbgs() << '\n';
+ dbgs().indent(10);
+ }
+ }
+ dbgs() << "\n Reads: ";
+ for (unsigned i = 0, N = Reads.size(); i < N; ++i) {
+ SchedModels->getSchedRead(Reads[i]).dump();
+ if (i < N-1) {
+ dbgs() << '\n';
+ dbgs().indent(10);
+ }
+ }
+ dbgs() << "\n ProcIdx: "; dumpIdxVec(ProcIndices);
+ if (!Transitions.empty()) {
+ dbgs() << "\n Transitions for Proc ";
+ for (const CodeGenSchedTransition &Transition : Transitions) {
+ dbgs() << Transition.ProcIndex << ", ";
+ }
+ }
+ dbgs() << '\n';
+}
+
+void PredTransitions::dump() const {
+ dbgs() << "Expanded Variants:\n";
+ for (const auto &TI : TransVec) {
+ dbgs() << "{";
+ ListSeparator LS;
+ for (const PredCheck &PC : TI.PredTerm)
+ dbgs() << LS << SchedModels.getSchedRW(PC.RWIdx, PC.IsRead).Name << ":"
+ << PC.Predicate->getName();
+ dbgs() << "},\n => {";
+ for (SmallVectorImpl<SmallVector<unsigned, 4>>::const_iterator
+ WSI = TI.WriteSequences.begin(),
+ WSE = TI.WriteSequences.end();
+ WSI != WSE; ++WSI) {
+ dbgs() << "(";
+ ListSeparator LS;
+ for (unsigned N : *WSI)
+ dbgs() << LS << SchedModels.getSchedWrite(N).Name;
+ dbgs() << "),";
+ }
+ dbgs() << "}\n";
+ }
+}
+#endif // NDEBUG
diff --git a/contrib/libs/llvm16/utils/TableGen/CodeGenSchedule.h b/contrib/libs/llvm16/utils/TableGen/CodeGenSchedule.h
new file mode 100644
index 0000000000..bbf5381ad0
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/CodeGenSchedule.h
@@ -0,0 +1,646 @@
+//===- CodeGenSchedule.h - Scheduling Machine Models ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines structures to encapsulate the machine model as described in
+// the target description.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_CODEGENSCHEDULE_H
+#define LLVM_UTILS_TABLEGEN_CODEGENSCHEDULE_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/SetTheory.h"
+
+namespace llvm {
+
+class CodeGenTarget;
+class CodeGenSchedModels;
+class CodeGenInstruction;
+
+using RecVec = std::vector<Record*>;
+using RecIter = std::vector<Record*>::const_iterator;
+
+using IdxVec = std::vector<unsigned>;
+using IdxIter = std::vector<unsigned>::const_iterator;
+
+/// We have two kinds of SchedReadWrites. Explicitly defined and inferred
+/// sequences. TheDef is nonnull for explicit SchedWrites, but Sequence may or
+/// may not be empty. TheDef is null for inferred sequences, and Sequence must
+/// be nonempty.
+///
+/// IsVariadic controls whether the variants are expanded into multiple operands
+/// or a sequence of writes on one operand.
+struct CodeGenSchedRW {
+ unsigned Index;
+ std::string Name;
+ Record *TheDef;
+ bool IsRead;
+ bool IsAlias;
+ bool HasVariants;
+ bool IsVariadic;
+ bool IsSequence;
+ IdxVec Sequence;
+ RecVec Aliases;
+
+ CodeGenSchedRW()
+ : Index(0), TheDef(nullptr), IsRead(false), IsAlias(false),
+ HasVariants(false), IsVariadic(false), IsSequence(false) {}
+ CodeGenSchedRW(unsigned Idx, Record *Def)
+ : Index(Idx), TheDef(Def), IsAlias(false), IsVariadic(false) {
+ Name = std::string(Def->getName());
+ IsRead = Def->isSubClassOf("SchedRead");
+ HasVariants = Def->isSubClassOf("SchedVariant");
+ if (HasVariants)
+ IsVariadic = Def->getValueAsBit("Variadic");
+
+ // Read records don't currently have sequences, but it can be easily
+ // added. Note that implicit Reads (from ReadVariant) may have a Sequence
+ // (but no record).
+ IsSequence = Def->isSubClassOf("WriteSequence");
+ }
+
+ CodeGenSchedRW(unsigned Idx, bool Read, ArrayRef<unsigned> Seq,
+ const std::string &Name)
+ : Index(Idx), Name(Name), TheDef(nullptr), IsRead(Read), IsAlias(false),
+ HasVariants(false), IsVariadic(false), IsSequence(true), Sequence(Seq) {
+ assert(Sequence.size() > 1 && "implied sequence needs >1 RWs");
+ }
+
+ bool isValid() const {
+ assert((!HasVariants || TheDef) && "Variant write needs record def");
+ assert((!IsVariadic || HasVariants) && "Variadic write needs variants");
+ assert((!IsSequence || !HasVariants) && "Sequence can't have variant");
+ assert((!IsSequence || !Sequence.empty()) && "Sequence should be nonempty");
+ assert((!IsAlias || Aliases.empty()) && "Alias cannot have aliases");
+ return TheDef || !Sequence.empty();
+ }
+
+#ifndef NDEBUG
+ void dump() const;
+#endif
+};
+
+/// Represent a transition between SchedClasses induced by SchedVariant.
+struct CodeGenSchedTransition {
+ unsigned ToClassIdx;
+ unsigned ProcIndex;
+ RecVec PredTerm;
+};
+
+/// Scheduling class.
+///
+/// Each instruction description will be mapped to a scheduling class. There are
+/// four types of classes:
+///
+/// 1) An explicitly defined itinerary class with ItinClassDef set.
+/// Writes and ReadDefs are empty. ProcIndices contains 0 for any processor.
+///
+/// 2) An implied class with a list of SchedWrites and SchedReads that are
+/// defined in an instruction definition and which are common across all
+/// subtargets. ProcIndices contains 0 for any processor.
+///
+/// 3) An implied class with a list of InstRW records that map instructions to
+/// SchedWrites and SchedReads per-processor. InstrClassMap should map the same
+/// instructions to this class. ProcIndices contains all the processors that
+/// provided InstrRW records for this class. ItinClassDef or Writes/Reads may
+/// still be defined for processors with no InstRW entry.
+///
+/// 4) An inferred class represents a variant of another class that may be
+/// resolved at runtime. ProcIndices contains the set of processors that may
+/// require the class. ProcIndices are propagated through SchedClasses as
+/// variants are expanded. Multiple SchedClasses may be inferred from an
+/// itinerary class. Each inherits the processor index from the ItinRW record
+/// that mapped the itinerary class to the variant Writes or Reads.
+struct CodeGenSchedClass {
+ unsigned Index;
+ std::string Name;
+ Record *ItinClassDef;
+
+ IdxVec Writes;
+ IdxVec Reads;
+ // Sorted list of ProcIdx, where ProcIdx==0 implies any processor.
+ IdxVec ProcIndices;
+
+ std::vector<CodeGenSchedTransition> Transitions;
+
+ // InstRW records associated with this class. These records may refer to an
+ // Instruction no longer mapped to this class by InstrClassMap. These
+ // Instructions should be ignored by this class because they have been split
+ // off to join another inferred class.
+ RecVec InstRWs;
+ // InstRWs processor indices. Filled in inferFromInstRWs
+ DenseSet<unsigned> InstRWProcIndices;
+
+ CodeGenSchedClass(unsigned Index, std::string Name, Record *ItinClassDef)
+ : Index(Index), Name(std::move(Name)), ItinClassDef(ItinClassDef) {}
+
+ bool isKeyEqual(Record *IC, ArrayRef<unsigned> W,
+ ArrayRef<unsigned> R) const {
+ return ItinClassDef == IC && ArrayRef(Writes) == W && ArrayRef(Reads) == R;
+ }
+
+ // Is this class generated from a variants if existing classes? Instructions
+ // are never mapped directly to inferred scheduling classes.
+ bool isInferred() const { return !ItinClassDef; }
+
+#ifndef NDEBUG
+ void dump(const CodeGenSchedModels *SchedModels) const;
+#endif
+};
+
+/// Represent the cost of allocating a register of register class RCDef.
+///
+/// The cost of allocating a register is equivalent to the number of physical
+/// registers used by the register renamer. Register costs are defined at
+/// register class granularity.
+struct CodeGenRegisterCost {
+ Record *RCDef;
+ unsigned Cost;
+ bool AllowMoveElimination;
+ CodeGenRegisterCost(Record *RC, unsigned RegisterCost, bool AllowMoveElim = false)
+ : RCDef(RC), Cost(RegisterCost), AllowMoveElimination(AllowMoveElim) {}
+ CodeGenRegisterCost(const CodeGenRegisterCost &) = default;
+ CodeGenRegisterCost &operator=(const CodeGenRegisterCost &) = delete;
+};
+
+/// A processor register file.
+///
+/// This class describes a processor register file. Register file information is
+/// currently consumed by external tools like llvm-mca to predict dispatch
+/// stalls due to register pressure.
+struct CodeGenRegisterFile {
+ std::string Name;
+ Record *RegisterFileDef;
+ unsigned MaxMovesEliminatedPerCycle;
+ bool AllowZeroMoveEliminationOnly;
+
+ unsigned NumPhysRegs;
+ std::vector<CodeGenRegisterCost> Costs;
+
+ CodeGenRegisterFile(StringRef name, Record *def, unsigned MaxMoveElimPerCy = 0,
+ bool AllowZeroMoveElimOnly = false)
+ : Name(name), RegisterFileDef(def),
+ MaxMovesEliminatedPerCycle(MaxMoveElimPerCy),
+ AllowZeroMoveEliminationOnly(AllowZeroMoveElimOnly),
+ NumPhysRegs(0) {}
+
+ bool hasDefaultCosts() const { return Costs.empty(); }
+};
+
+// Processor model.
+//
+// ModelName is a unique name used to name an instantiation of MCSchedModel.
+//
+// ModelDef is NULL for inferred Models. This happens when a processor defines
+// an itinerary but no machine model. If the processor defines neither a machine
+// model nor itinerary, then ModelDef remains pointing to NoModel. NoModel has
+// the special "NoModel" field set to true.
+//
+// ItinsDef always points to a valid record definition, but may point to the
+// default NoItineraries. NoItineraries has an empty list of InstrItinData
+// records.
+//
+// ItinDefList orders this processor's InstrItinData records by SchedClass idx.
+struct CodeGenProcModel {
+ unsigned Index;
+ std::string ModelName;
+ Record *ModelDef;
+ Record *ItinsDef;
+
+ // Derived members...
+
+ // Array of InstrItinData records indexed by a CodeGenSchedClass index.
+ // This list is empty if the Processor has no value for Itineraries.
+ // Initialized by collectProcItins().
+ RecVec ItinDefList;
+
+ // Map itinerary classes to per-operand resources.
+ // This list is empty if no ItinRW refers to this Processor.
+ RecVec ItinRWDefs;
+
+ // List of unsupported feature.
+ // This list is empty if the Processor has no UnsupportedFeatures.
+ RecVec UnsupportedFeaturesDefs;
+
+ // All read/write resources associated with this processor.
+ RecVec WriteResDefs;
+ RecVec ReadAdvanceDefs;
+
+ // Per-operand machine model resources associated with this processor.
+ RecVec ProcResourceDefs;
+
+ // List of Register Files.
+ std::vector<CodeGenRegisterFile> RegisterFiles;
+
+ // Optional Retire Control Unit definition.
+ Record *RetireControlUnit;
+
+ // Load/Store queue descriptors.
+ Record *LoadQueue;
+ Record *StoreQueue;
+
+ CodeGenProcModel(unsigned Idx, std::string Name, Record *MDef,
+ Record *IDef) :
+ Index(Idx), ModelName(std::move(Name)), ModelDef(MDef), ItinsDef(IDef),
+ RetireControlUnit(nullptr), LoadQueue(nullptr), StoreQueue(nullptr) {}
+
+ bool hasItineraries() const {
+ return !ItinsDef->getValueAsListOfDefs("IID").empty();
+ }
+
+ bool hasInstrSchedModel() const {
+ return !WriteResDefs.empty() || !ItinRWDefs.empty();
+ }
+
+ bool hasExtraProcessorInfo() const {
+ return RetireControlUnit || LoadQueue || StoreQueue ||
+ !RegisterFiles.empty();
+ }
+
+ unsigned getProcResourceIdx(Record *PRDef) const;
+
+ bool isUnsupported(const CodeGenInstruction &Inst) const;
+
+#ifndef NDEBUG
+ void dump() const;
+#endif
+};
+
+/// Used to correlate instructions to MCInstPredicates specified by
+/// InstructionEquivalentClass tablegen definitions.
+///
+/// Example: a XOR of a register with self, is a known zero-idiom for most
+/// X86 processors.
+///
+/// Each processor can use a (potentially different) InstructionEquivalenceClass
+/// definition to classify zero-idioms. That means, XORrr is likely to appear
+/// in more than one equivalence class (where each class definition is
+/// contributed by a different processor).
+///
+/// There is no guarantee that the same MCInstPredicate will be used to describe
+/// equivalence classes that identify XORrr as a zero-idiom.
+///
+/// To be more specific, the requirements for being a zero-idiom XORrr may be
+/// different for different processors.
+///
+/// Class PredicateInfo identifies a subset of processors that specify the same
+/// requirements (i.e. same MCInstPredicate and OperandMask) for an instruction
+/// opcode.
+///
+/// Back to the example. Field `ProcModelMask` will have one bit set for every
+/// processor model that sees XORrr as a zero-idiom, and that specifies the same
+/// set of constraints.
+///
+/// By construction, there can be multiple instances of PredicateInfo associated
+/// with a same instruction opcode. For example, different processors may define
+/// different constraints on the same opcode.
+///
+/// Field OperandMask can be used as an extra constraint.
+/// It may be used to describe conditions that appy only to a subset of the
+/// operands of a machine instruction, and the operands subset may not be the
+/// same for all processor models.
+struct PredicateInfo {
+ llvm::APInt ProcModelMask; // A set of processor model indices.
+ llvm::APInt OperandMask; // An operand mask.
+ const Record *Predicate; // MCInstrPredicate definition.
+ PredicateInfo(llvm::APInt CpuMask, llvm::APInt Operands, const Record *Pred)
+ : ProcModelMask(CpuMask), OperandMask(Operands), Predicate(Pred) {}
+
+ bool operator==(const PredicateInfo &Other) const {
+ return ProcModelMask == Other.ProcModelMask &&
+ OperandMask == Other.OperandMask && Predicate == Other.Predicate;
+ }
+};
+
+/// A collection of PredicateInfo objects.
+///
+/// There is at least one OpcodeInfo object for every opcode specified by a
+/// TIPredicate definition.
+class OpcodeInfo {
+ std::vector<PredicateInfo> Predicates;
+
+ OpcodeInfo(const OpcodeInfo &Other) = delete;
+ OpcodeInfo &operator=(const OpcodeInfo &Other) = delete;
+
+public:
+ OpcodeInfo() = default;
+ OpcodeInfo &operator=(OpcodeInfo &&Other) = default;
+ OpcodeInfo(OpcodeInfo &&Other) = default;
+
+ ArrayRef<PredicateInfo> getPredicates() const { return Predicates; }
+
+ void addPredicateForProcModel(const llvm::APInt &CpuMask,
+ const llvm::APInt &OperandMask,
+ const Record *Predicate);
+};
+
+/// Used to group together tablegen instruction definitions that are subject
+/// to a same set of constraints (identified by an instance of OpcodeInfo).
+class OpcodeGroup {
+ OpcodeInfo Info;
+ std::vector<const Record *> Opcodes;
+
+ OpcodeGroup(const OpcodeGroup &Other) = delete;
+ OpcodeGroup &operator=(const OpcodeGroup &Other) = delete;
+
+public:
+ OpcodeGroup(OpcodeInfo &&OpInfo) : Info(std::move(OpInfo)) {}
+ OpcodeGroup(OpcodeGroup &&Other) = default;
+
+ void addOpcode(const Record *Opcode) {
+ assert(!llvm::is_contained(Opcodes, Opcode) && "Opcode already in set!");
+ Opcodes.push_back(Opcode);
+ }
+
+ ArrayRef<const Record *> getOpcodes() const { return Opcodes; }
+ const OpcodeInfo &getOpcodeInfo() const { return Info; }
+};
+
+/// An STIPredicateFunction descriptor used by tablegen backends to
+/// auto-generate the body of a predicate function as a member of tablegen'd
+/// class XXXGenSubtargetInfo.
+class STIPredicateFunction {
+ const Record *FunctionDeclaration;
+
+ std::vector<const Record *> Definitions;
+ std::vector<OpcodeGroup> Groups;
+
+ STIPredicateFunction(const STIPredicateFunction &Other) = delete;
+ STIPredicateFunction &operator=(const STIPredicateFunction &Other) = delete;
+
+public:
+ STIPredicateFunction(const Record *Rec) : FunctionDeclaration(Rec) {}
+ STIPredicateFunction(STIPredicateFunction &&Other) = default;
+
+ bool isCompatibleWith(const STIPredicateFunction &Other) const {
+ return FunctionDeclaration == Other.FunctionDeclaration;
+ }
+
+ void addDefinition(const Record *Def) { Definitions.push_back(Def); }
+ void addOpcode(const Record *OpcodeRec, OpcodeInfo &&Info) {
+ if (Groups.empty() ||
+ Groups.back().getOpcodeInfo().getPredicates() != Info.getPredicates())
+ Groups.emplace_back(std::move(Info));
+ Groups.back().addOpcode(OpcodeRec);
+ }
+
+ StringRef getName() const {
+ return FunctionDeclaration->getValueAsString("Name");
+ }
+ const Record *getDefaultReturnPredicate() const {
+ return FunctionDeclaration->getValueAsDef("DefaultReturnValue");
+ }
+
+ const Record *getDeclaration() const { return FunctionDeclaration; }
+ ArrayRef<const Record *> getDefinitions() const { return Definitions; }
+ ArrayRef<OpcodeGroup> getGroups() const { return Groups; }
+};
+
+using ProcModelMapTy = DenseMap<const Record *, unsigned>;
+
+/// Top level container for machine model data.
+class CodeGenSchedModels {
+ RecordKeeper &Records;
+ const CodeGenTarget &Target;
+
+ // Map dag expressions to Instruction lists.
+ SetTheory Sets;
+
+ // List of unique processor models.
+ std::vector<CodeGenProcModel> ProcModels;
+
+ // Map Processor's MachineModel or ProcItin to a CodeGenProcModel index.
+ ProcModelMapTy ProcModelMap;
+
+ // Per-operand SchedReadWrite types.
+ std::vector<CodeGenSchedRW> SchedWrites;
+ std::vector<CodeGenSchedRW> SchedReads;
+
+ // List of unique SchedClasses.
+ std::vector<CodeGenSchedClass> SchedClasses;
+
+ // Any inferred SchedClass has an index greater than NumInstrSchedClassses.
+ unsigned NumInstrSchedClasses;
+
+ RecVec ProcResourceDefs;
+ RecVec ProcResGroups;
+
+ // Map each instruction to its unique SchedClass index considering the
+ // combination of it's itinerary class, SchedRW list, and InstRW records.
+ using InstClassMapTy = DenseMap<Record*, unsigned>;
+ InstClassMapTy InstrClassMap;
+
+ std::vector<STIPredicateFunction> STIPredicates;
+ std::vector<unsigned> getAllProcIndices() const;
+
+public:
+ CodeGenSchedModels(RecordKeeper& RK, const CodeGenTarget &TGT);
+
+ // iterator access to the scheduling classes.
+ using class_iterator = std::vector<CodeGenSchedClass>::iterator;
+ using const_class_iterator = std::vector<CodeGenSchedClass>::const_iterator;
+ class_iterator classes_begin() { return SchedClasses.begin(); }
+ const_class_iterator classes_begin() const { return SchedClasses.begin(); }
+ class_iterator classes_end() { return SchedClasses.end(); }
+ const_class_iterator classes_end() const { return SchedClasses.end(); }
+ iterator_range<class_iterator> classes() {
+ return make_range(classes_begin(), classes_end());
+ }
+ iterator_range<const_class_iterator> classes() const {
+ return make_range(classes_begin(), classes_end());
+ }
+ iterator_range<class_iterator> explicit_classes() {
+ return make_range(classes_begin(), classes_begin() + NumInstrSchedClasses);
+ }
+ iterator_range<const_class_iterator> explicit_classes() const {
+ return make_range(classes_begin(), classes_begin() + NumInstrSchedClasses);
+ }
+
+ Record *getModelOrItinDef(Record *ProcDef) const {
+ Record *ModelDef = ProcDef->getValueAsDef("SchedModel");
+ Record *ItinsDef = ProcDef->getValueAsDef("ProcItin");
+ if (!ItinsDef->getValueAsListOfDefs("IID").empty()) {
+ assert(ModelDef->getValueAsBit("NoModel")
+ && "Itineraries must be defined within SchedMachineModel");
+ return ItinsDef;
+ }
+ return ModelDef;
+ }
+
+ const CodeGenProcModel &getModelForProc(Record *ProcDef) const {
+ Record *ModelDef = getModelOrItinDef(ProcDef);
+ ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef);
+ assert(I != ProcModelMap.end() && "missing machine model");
+ return ProcModels[I->second];
+ }
+
+ CodeGenProcModel &getProcModel(Record *ModelDef) {
+ ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef);
+ assert(I != ProcModelMap.end() && "missing machine model");
+ return ProcModels[I->second];
+ }
+ const CodeGenProcModel &getProcModel(Record *ModelDef) const {
+ return const_cast<CodeGenSchedModels*>(this)->getProcModel(ModelDef);
+ }
+
+ // Iterate over the unique processor models.
+ using ProcIter = std::vector<CodeGenProcModel>::const_iterator;
+ ProcIter procModelBegin() const { return ProcModels.begin(); }
+ ProcIter procModelEnd() const { return ProcModels.end(); }
+ ArrayRef<CodeGenProcModel> procModels() const { return ProcModels; }
+
+ // Return true if any processors have itineraries.
+ bool hasItineraries() const;
+
+ // Get a SchedWrite from its index.
+ const CodeGenSchedRW &getSchedWrite(unsigned Idx) const {
+ assert(Idx < SchedWrites.size() && "bad SchedWrite index");
+ assert(SchedWrites[Idx].isValid() && "invalid SchedWrite");
+ return SchedWrites[Idx];
+ }
+ // Get a SchedWrite from its index.
+ const CodeGenSchedRW &getSchedRead(unsigned Idx) const {
+ assert(Idx < SchedReads.size() && "bad SchedRead index");
+ assert(SchedReads[Idx].isValid() && "invalid SchedRead");
+ return SchedReads[Idx];
+ }
+
+ const CodeGenSchedRW &getSchedRW(unsigned Idx, bool IsRead) const {
+ return IsRead ? getSchedRead(Idx) : getSchedWrite(Idx);
+ }
+ CodeGenSchedRW &getSchedRW(Record *Def) {
+ bool IsRead = Def->isSubClassOf("SchedRead");
+ unsigned Idx = getSchedRWIdx(Def, IsRead);
+ return const_cast<CodeGenSchedRW&>(
+ IsRead ? getSchedRead(Idx) : getSchedWrite(Idx));
+ }
+ const CodeGenSchedRW &getSchedRW(Record *Def) const {
+ return const_cast<CodeGenSchedModels&>(*this).getSchedRW(Def);
+ }
+
+ unsigned getSchedRWIdx(const Record *Def, bool IsRead) const;
+
+ // Return true if the given write record is referenced by a ReadAdvance.
+ bool hasReadOfWrite(Record *WriteDef) const;
+
+ // Get a SchedClass from its index.
+ CodeGenSchedClass &getSchedClass(unsigned Idx) {
+ assert(Idx < SchedClasses.size() && "bad SchedClass index");
+ return SchedClasses[Idx];
+ }
+ const CodeGenSchedClass &getSchedClass(unsigned Idx) const {
+ assert(Idx < SchedClasses.size() && "bad SchedClass index");
+ return SchedClasses[Idx];
+ }
+
+ // Get the SchedClass index for an instruction. Instructions with no
+ // itinerary, no SchedReadWrites, and no InstrReadWrites references return 0
+ // for NoItinerary.
+ unsigned getSchedClassIdx(const CodeGenInstruction &Inst) const;
+
+ using SchedClassIter = std::vector<CodeGenSchedClass>::const_iterator;
+ SchedClassIter schedClassBegin() const { return SchedClasses.begin(); }
+ SchedClassIter schedClassEnd() const { return SchedClasses.end(); }
+ ArrayRef<CodeGenSchedClass> schedClasses() const { return SchedClasses; }
+
+ unsigned numInstrSchedClasses() const { return NumInstrSchedClasses; }
+
+ void findRWs(const RecVec &RWDefs, IdxVec &Writes, IdxVec &Reads) const;
+ void findRWs(const RecVec &RWDefs, IdxVec &RWs, bool IsRead) const;
+ void expandRWSequence(unsigned RWIdx, IdxVec &RWSeq, bool IsRead) const;
+ void expandRWSeqForProc(unsigned RWIdx, IdxVec &RWSeq, bool IsRead,
+ const CodeGenProcModel &ProcModel) const;
+
+ unsigned addSchedClass(Record *ItinDef, ArrayRef<unsigned> OperWrites,
+ ArrayRef<unsigned> OperReads,
+ ArrayRef<unsigned> ProcIndices);
+
+ unsigned findOrInsertRW(ArrayRef<unsigned> Seq, bool IsRead);
+
+ Record *findProcResUnits(Record *ProcResKind, const CodeGenProcModel &PM,
+ ArrayRef<SMLoc> Loc) const;
+
+ ArrayRef<STIPredicateFunction> getSTIPredicates() const {
+ return STIPredicates;
+ }
+private:
+ void collectProcModels();
+
+ // Initialize a new processor model if it is unique.
+ void addProcModel(Record *ProcDef);
+
+ void collectSchedRW();
+
+ std::string genRWName(ArrayRef<unsigned> Seq, bool IsRead);
+ unsigned findRWForSequence(ArrayRef<unsigned> Seq, bool IsRead);
+
+ void collectSchedClasses();
+
+ void collectRetireControlUnits();
+
+ void collectRegisterFiles();
+
+ void collectOptionalProcessorInfo();
+
+ std::string createSchedClassName(Record *ItinClassDef,
+ ArrayRef<unsigned> OperWrites,
+ ArrayRef<unsigned> OperReads);
+ std::string createSchedClassName(const RecVec &InstDefs);
+ void createInstRWClass(Record *InstRWDef);
+
+ void collectProcItins();
+
+ void collectProcItinRW();
+
+ void collectProcUnsupportedFeatures();
+
+ void inferSchedClasses();
+
+ void checkMCInstPredicates() const;
+
+ void checkSTIPredicates() const;
+
+ void collectSTIPredicates();
+
+ void collectLoadStoreQueueInfo();
+
+ void checkCompleteness();
+
+ void inferFromRW(ArrayRef<unsigned> OperWrites, ArrayRef<unsigned> OperReads,
+ unsigned FromClassIdx, ArrayRef<unsigned> ProcIndices);
+ void inferFromItinClass(Record *ItinClassDef, unsigned FromClassIdx);
+ void inferFromInstRWs(unsigned SCIdx);
+
+ bool hasSuperGroup(RecVec &SubUnits, CodeGenProcModel &PM);
+ void verifyProcResourceGroups(CodeGenProcModel &PM);
+
+ void collectProcResources();
+
+ void collectItinProcResources(Record *ItinClassDef);
+
+ void collectRWResources(unsigned RWIdx, bool IsRead,
+ ArrayRef<unsigned> ProcIndices);
+
+ void collectRWResources(ArrayRef<unsigned> Writes, ArrayRef<unsigned> Reads,
+ ArrayRef<unsigned> ProcIndices);
+
+ void addProcResource(Record *ProcResourceKind, CodeGenProcModel &PM,
+ ArrayRef<SMLoc> Loc);
+
+ void addWriteRes(Record *ProcWriteResDef, unsigned PIdx);
+
+ void addReadAdvance(Record *ProcReadAdvanceDef, unsigned PIdx);
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/libs/llvm16/utils/TableGen/CodeGenTarget.cpp b/contrib/libs/llvm16/utils/TableGen/CodeGenTarget.cpp
new file mode 100644
index 0000000000..b7240f0130
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/CodeGenTarget.cpp
@@ -0,0 +1,952 @@
+//===- CodeGenTarget.cpp - CodeGen Target Class Wrapper -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This class wraps target description classes used by the various code
+// generation TableGen backends. This makes it easier to access the data and
+// provides a single place that needs to check it for validity. All of these
+// classes abort on error conditions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenTarget.h"
+#include "CodeGenInstruction.h"
+#include "CodeGenIntrinsics.h"
+#include "CodeGenSchedule.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include <algorithm>
+using namespace llvm;
+
+cl::OptionCategory AsmParserCat("Options for -gen-asm-parser");
+cl::OptionCategory AsmWriterCat("Options for -gen-asm-writer");
+
+static cl::opt<unsigned>
+ AsmParserNum("asmparsernum", cl::init(0),
+ cl::desc("Make -gen-asm-parser emit assembly parser #N"),
+ cl::cat(AsmParserCat));
+
+static cl::opt<unsigned>
+ AsmWriterNum("asmwriternum", cl::init(0),
+ cl::desc("Make -gen-asm-writer emit assembly writer #N"),
+ cl::cat(AsmWriterCat));
+
+/// getValueType - Return the MVT::SimpleValueType that the specified TableGen
+/// record corresponds to.
+MVT::SimpleValueType llvm::getValueType(Record *Rec) {
+ return (MVT::SimpleValueType)Rec->getValueAsInt("Value");
+}
+
+StringRef llvm::getName(MVT::SimpleValueType T) {
+ switch (T) {
+ case MVT::Other: return "UNKNOWN";
+ case MVT::iPTR: return "TLI.getPointerTy()";
+ case MVT::iPTRAny: return "TLI.getPointerTy()";
+ default: return getEnumName(T);
+ }
+}
+
+StringRef llvm::getEnumName(MVT::SimpleValueType T) {
+ // clang-format off
+ switch (T) {
+ case MVT::Other: return "MVT::Other";
+ case MVT::i1: return "MVT::i1";
+ case MVT::i2: return "MVT::i2";
+ case MVT::i4: return "MVT::i4";
+ case MVT::i8: return "MVT::i8";
+ case MVT::i16: return "MVT::i16";
+ case MVT::i32: return "MVT::i32";
+ case MVT::i64: return "MVT::i64";
+ case MVT::i128: return "MVT::i128";
+ case MVT::Any: return "MVT::Any";
+ case MVT::iAny: return "MVT::iAny";
+ case MVT::fAny: return "MVT::fAny";
+ case MVT::vAny: return "MVT::vAny";
+ case MVT::f16: return "MVT::f16";
+ case MVT::bf16: return "MVT::bf16";
+ case MVT::f32: return "MVT::f32";
+ case MVT::f64: return "MVT::f64";
+ case MVT::f80: return "MVT::f80";
+ case MVT::f128: return "MVT::f128";
+ case MVT::ppcf128: return "MVT::ppcf128";
+ case MVT::x86mmx: return "MVT::x86mmx";
+ case MVT::x86amx: return "MVT::x86amx";
+ case MVT::i64x8: return "MVT::i64x8";
+ case MVT::Glue: return "MVT::Glue";
+ case MVT::isVoid: return "MVT::isVoid";
+ case MVT::v1i1: return "MVT::v1i1";
+ case MVT::v2i1: return "MVT::v2i1";
+ case MVT::v4i1: return "MVT::v4i1";
+ case MVT::v8i1: return "MVT::v8i1";
+ case MVT::v16i1: return "MVT::v16i1";
+ case MVT::v32i1: return "MVT::v32i1";
+ case MVT::v64i1: return "MVT::v64i1";
+ case MVT::v128i1: return "MVT::v128i1";
+ case MVT::v256i1: return "MVT::v256i1";
+ case MVT::v512i1: return "MVT::v512i1";
+ case MVT::v1024i1: return "MVT::v1024i1";
+ case MVT::v2048i1: return "MVT::v2048i1";
+ case MVT::v128i2: return "MVT::v128i2";
+ case MVT::v256i2: return "MVT::v256i2";
+ case MVT::v64i4: return "MVT::v64i4";
+ case MVT::v128i4: return "MVT::v128i4";
+ case MVT::v1i8: return "MVT::v1i8";
+ case MVT::v2i8: return "MVT::v2i8";
+ case MVT::v4i8: return "MVT::v4i8";
+ case MVT::v8i8: return "MVT::v8i8";
+ case MVT::v16i8: return "MVT::v16i8";
+ case MVT::v32i8: return "MVT::v32i8";
+ case MVT::v64i8: return "MVT::v64i8";
+ case MVT::v128i8: return "MVT::v128i8";
+ case MVT::v256i8: return "MVT::v256i8";
+ case MVT::v512i8: return "MVT::v512i8";
+ case MVT::v1024i8: return "MVT::v1024i8";
+ case MVT::v1i16: return "MVT::v1i16";
+ case MVT::v2i16: return "MVT::v2i16";
+ case MVT::v3i16: return "MVT::v3i16";
+ case MVT::v4i16: return "MVT::v4i16";
+ case MVT::v8i16: return "MVT::v8i16";
+ case MVT::v16i16: return "MVT::v16i16";
+ case MVT::v32i16: return "MVT::v32i16";
+ case MVT::v64i16: return "MVT::v64i16";
+ case MVT::v128i16: return "MVT::v128i16";
+ case MVT::v256i16: return "MVT::v256i16";
+ case MVT::v512i16: return "MVT::v512i16";
+ case MVT::v1i32: return "MVT::v1i32";
+ case MVT::v2i32: return "MVT::v2i32";
+ case MVT::v3i32: return "MVT::v3i32";
+ case MVT::v4i32: return "MVT::v4i32";
+ case MVT::v5i32: return "MVT::v5i32";
+ case MVT::v6i32: return "MVT::v6i32";
+ case MVT::v7i32: return "MVT::v7i32";
+ case MVT::v8i32: return "MVT::v8i32";
+ case MVT::v9i32: return "MVT::v9i32";
+ case MVT::v10i32: return "MVT::v10i32";
+ case MVT::v11i32: return "MVT::v11i32";
+ case MVT::v12i32: return "MVT::v12i32";
+ case MVT::v16i32: return "MVT::v16i32";
+ case MVT::v32i32: return "MVT::v32i32";
+ case MVT::v64i32: return "MVT::v64i32";
+ case MVT::v128i32: return "MVT::v128i32";
+ case MVT::v256i32: return "MVT::v256i32";
+ case MVT::v512i32: return "MVT::v512i32";
+ case MVT::v1024i32: return "MVT::v1024i32";
+ case MVT::v2048i32: return "MVT::v2048i32";
+ case MVT::v1i64: return "MVT::v1i64";
+ case MVT::v2i64: return "MVT::v2i64";
+ case MVT::v3i64: return "MVT::v3i64";
+ case MVT::v4i64: return "MVT::v4i64";
+ case MVT::v8i64: return "MVT::v8i64";
+ case MVT::v16i64: return "MVT::v16i64";
+ case MVT::v32i64: return "MVT::v32i64";
+ case MVT::v64i64: return "MVT::v64i64";
+ case MVT::v128i64: return "MVT::v128i64";
+ case MVT::v256i64: return "MVT::v256i64";
+ case MVT::v1i128: return "MVT::v1i128";
+ case MVT::v1f16: return "MVT::v1f16";
+ case MVT::v2f16: return "MVT::v2f16";
+ case MVT::v3f16: return "MVT::v3f16";
+ case MVT::v4f16: return "MVT::v4f16";
+ case MVT::v8f16: return "MVT::v8f16";
+ case MVT::v16f16: return "MVT::v16f16";
+ case MVT::v32f16: return "MVT::v32f16";
+ case MVT::v64f16: return "MVT::v64f16";
+ case MVT::v128f16: return "MVT::v128f16";
+ case MVT::v256f16: return "MVT::v256f16";
+ case MVT::v512f16: return "MVT::v512f16";
+ case MVT::v2bf16: return "MVT::v2bf16";
+ case MVT::v3bf16: return "MVT::v3bf16";
+ case MVT::v4bf16: return "MVT::v4bf16";
+ case MVT::v8bf16: return "MVT::v8bf16";
+ case MVT::v16bf16: return "MVT::v16bf16";
+ case MVT::v32bf16: return "MVT::v32bf16";
+ case MVT::v64bf16: return "MVT::v64bf16";
+ case MVT::v128bf16: return "MVT::v128bf16";
+ case MVT::v1f32: return "MVT::v1f32";
+ case MVT::v2f32: return "MVT::v2f32";
+ case MVT::v3f32: return "MVT::v3f32";
+ case MVT::v4f32: return "MVT::v4f32";
+ case MVT::v5f32: return "MVT::v5f32";
+ case MVT::v6f32: return "MVT::v6f32";
+ case MVT::v7f32: return "MVT::v7f32";
+ case MVT::v8f32: return "MVT::v8f32";
+ case MVT::v9f32: return "MVT::v9f32";
+ case MVT::v10f32: return "MVT::v10f32";
+ case MVT::v11f32: return "MVT::v11f32";
+ case MVT::v12f32: return "MVT::v12f32";
+ case MVT::v16f32: return "MVT::v16f32";
+ case MVT::v32f32: return "MVT::v32f32";
+ case MVT::v64f32: return "MVT::v64f32";
+ case MVT::v128f32: return "MVT::v128f32";
+ case MVT::v256f32: return "MVT::v256f32";
+ case MVT::v512f32: return "MVT::v512f32";
+ case MVT::v1024f32: return "MVT::v1024f32";
+ case MVT::v2048f32: return "MVT::v2048f32";
+ case MVT::v1f64: return "MVT::v1f64";
+ case MVT::v2f64: return "MVT::v2f64";
+ case MVT::v3f64: return "MVT::v3f64";
+ case MVT::v4f64: return "MVT::v4f64";
+ case MVT::v8f64: return "MVT::v8f64";
+ case MVT::v16f64: return "MVT::v16f64";
+ case MVT::v32f64: return "MVT::v32f64";
+ case MVT::v64f64: return "MVT::v64f64";
+ case MVT::v128f64: return "MVT::v128f64";
+ case MVT::v256f64: return "MVT::v256f64";
+ case MVT::nxv1i1: return "MVT::nxv1i1";
+ case MVT::nxv2i1: return "MVT::nxv2i1";
+ case MVT::nxv4i1: return "MVT::nxv4i1";
+ case MVT::nxv8i1: return "MVT::nxv8i1";
+ case MVT::nxv16i1: return "MVT::nxv16i1";
+ case MVT::nxv32i1: return "MVT::nxv32i1";
+ case MVT::nxv64i1: return "MVT::nxv64i1";
+ case MVT::nxv1i8: return "MVT::nxv1i8";
+ case MVT::nxv2i8: return "MVT::nxv2i8";
+ case MVT::nxv4i8: return "MVT::nxv4i8";
+ case MVT::nxv8i8: return "MVT::nxv8i8";
+ case MVT::nxv16i8: return "MVT::nxv16i8";
+ case MVT::nxv32i8: return "MVT::nxv32i8";
+ case MVT::nxv64i8: return "MVT::nxv64i8";
+ case MVT::nxv1i16: return "MVT::nxv1i16";
+ case MVT::nxv2i16: return "MVT::nxv2i16";
+ case MVT::nxv4i16: return "MVT::nxv4i16";
+ case MVT::nxv8i16: return "MVT::nxv8i16";
+ case MVT::nxv16i16: return "MVT::nxv16i16";
+ case MVT::nxv32i16: return "MVT::nxv32i16";
+ case MVT::nxv1i32: return "MVT::nxv1i32";
+ case MVT::nxv2i32: return "MVT::nxv2i32";
+ case MVT::nxv4i32: return "MVT::nxv4i32";
+ case MVT::nxv8i32: return "MVT::nxv8i32";
+ case MVT::nxv16i32: return "MVT::nxv16i32";
+ case MVT::nxv32i32: return "MVT::nxv32i32";
+ case MVT::nxv1i64: return "MVT::nxv1i64";
+ case MVT::nxv2i64: return "MVT::nxv2i64";
+ case MVT::nxv4i64: return "MVT::nxv4i64";
+ case MVT::nxv8i64: return "MVT::nxv8i64";
+ case MVT::nxv16i64: return "MVT::nxv16i64";
+ case MVT::nxv32i64: return "MVT::nxv32i64";
+ case MVT::nxv1f16: return "MVT::nxv1f16";
+ case MVT::nxv2f16: return "MVT::nxv2f16";
+ case MVT::nxv4f16: return "MVT::nxv4f16";
+ case MVT::nxv8f16: return "MVT::nxv8f16";
+ case MVT::nxv16f16: return "MVT::nxv16f16";
+ case MVT::nxv32f16: return "MVT::nxv32f16";
+ case MVT::nxv1bf16: return "MVT::nxv1bf16";
+ case MVT::nxv2bf16: return "MVT::nxv2bf16";
+ case MVT::nxv4bf16: return "MVT::nxv4bf16";
+ case MVT::nxv8bf16: return "MVT::nxv8bf16";
+ case MVT::nxv16bf16: return "MVT::nxv16bf16";
+ case MVT::nxv32bf16: return "MVT::nxv32bf16";
+ case MVT::nxv1f32: return "MVT::nxv1f32";
+ case MVT::nxv2f32: return "MVT::nxv2f32";
+ case MVT::nxv4f32: return "MVT::nxv4f32";
+ case MVT::nxv8f32: return "MVT::nxv8f32";
+ case MVT::nxv16f32: return "MVT::nxv16f32";
+ case MVT::nxv1f64: return "MVT::nxv1f64";
+ case MVT::nxv2f64: return "MVT::nxv2f64";
+ case MVT::nxv4f64: return "MVT::nxv4f64";
+ case MVT::nxv8f64: return "MVT::nxv8f64";
+ case MVT::token: return "MVT::token";
+ case MVT::Metadata: return "MVT::Metadata";
+ case MVT::iPTR: return "MVT::iPTR";
+ case MVT::iPTRAny: return "MVT::iPTRAny";
+ case MVT::Untyped: return "MVT::Untyped";
+ case MVT::funcref: return "MVT::funcref";
+ case MVT::externref: return "MVT::externref";
+ default: llvm_unreachable("ILLEGAL VALUE TYPE!");
+ }
+ // clang-format on
+}
+
+/// getQualifiedName - Return the name of the specified record, with a
+/// namespace qualifier if the record contains one.
+///
+std::string llvm::getQualifiedName(const Record *R) {
+ std::string Namespace;
+ if (R->getValue("Namespace"))
+ Namespace = std::string(R->getValueAsString("Namespace"));
+ if (Namespace.empty())
+ return std::string(R->getName());
+ return Namespace + "::" + R->getName().str();
+}
+
+
+/// getTarget - Return the current instance of the Target class.
+///
+CodeGenTarget::CodeGenTarget(RecordKeeper &records)
+ : Records(records), CGH(records) {
+ std::vector<Record*> Targets = Records.getAllDerivedDefinitions("Target");
+ if (Targets.size() == 0)
+ PrintFatalError("No 'Target' subclasses defined!");
+ if (Targets.size() != 1)
+ PrintFatalError("Multiple subclasses of Target defined!");
+ TargetRec = Targets[0];
+}
+
+CodeGenTarget::~CodeGenTarget() {
+}
+
+StringRef CodeGenTarget::getName() const { return TargetRec->getName(); }
+
+/// getInstNamespace - Find and return the target machine's instruction
+/// namespace. The namespace is cached because it is requested multiple times.
+StringRef CodeGenTarget::getInstNamespace() const {
+ if (InstNamespace.empty()) {
+ for (const CodeGenInstruction *Inst : getInstructionsByEnumValue()) {
+ // We are not interested in the "TargetOpcode" namespace.
+ if (Inst->Namespace != "TargetOpcode") {
+ InstNamespace = Inst->Namespace;
+ break;
+ }
+ }
+ }
+
+ return InstNamespace;
+}
+
+StringRef CodeGenTarget::getRegNamespace() const {
+ auto &RegClasses = RegBank->getRegClasses();
+ return RegClasses.size() > 0 ? RegClasses.front().Namespace : "";
+}
+
+Record *CodeGenTarget::getInstructionSet() const {
+ return TargetRec->getValueAsDef("InstructionSet");
+}
+
+bool CodeGenTarget::getAllowRegisterRenaming() const {
+ return TargetRec->getValueAsInt("AllowRegisterRenaming");
+}
+
+/// getAsmParser - Return the AssemblyParser definition for this target.
+///
+Record *CodeGenTarget::getAsmParser() const {
+ std::vector<Record*> LI = TargetRec->getValueAsListOfDefs("AssemblyParsers");
+ if (AsmParserNum >= LI.size())
+ PrintFatalError("Target does not have an AsmParser #" +
+ Twine(AsmParserNum) + "!");
+ return LI[AsmParserNum];
+}
+
+/// getAsmParserVariant - Return the AssemblyParserVariant definition for
+/// this target.
+///
+Record *CodeGenTarget::getAsmParserVariant(unsigned i) const {
+ std::vector<Record*> LI =
+ TargetRec->getValueAsListOfDefs("AssemblyParserVariants");
+ if (i >= LI.size())
+ PrintFatalError("Target does not have an AsmParserVariant #" + Twine(i) +
+ "!");
+ return LI[i];
+}
+
+/// getAsmParserVariantCount - Return the AssemblyParserVariant definition
+/// available for this target.
+///
+unsigned CodeGenTarget::getAsmParserVariantCount() const {
+ std::vector<Record*> LI =
+ TargetRec->getValueAsListOfDefs("AssemblyParserVariants");
+ return LI.size();
+}
+
+/// getAsmWriter - Return the AssemblyWriter definition for this target.
+///
+Record *CodeGenTarget::getAsmWriter() const {
+ std::vector<Record*> LI = TargetRec->getValueAsListOfDefs("AssemblyWriters");
+ if (AsmWriterNum >= LI.size())
+ PrintFatalError("Target does not have an AsmWriter #" +
+ Twine(AsmWriterNum) + "!");
+ return LI[AsmWriterNum];
+}
+
+CodeGenRegBank &CodeGenTarget::getRegBank() const {
+ if (!RegBank)
+ RegBank = std::make_unique<CodeGenRegBank>(Records, getHwModes());
+ return *RegBank;
+}
+
+std::optional<CodeGenRegisterClass *> CodeGenTarget::getSuperRegForSubReg(
+ const ValueTypeByHwMode &ValueTy, CodeGenRegBank &RegBank,
+ const CodeGenSubRegIndex *SubIdx, bool MustBeAllocatable) const {
+ std::vector<CodeGenRegisterClass *> Candidates;
+ auto &RegClasses = RegBank.getRegClasses();
+
+ // Try to find a register class which supports ValueTy, and also contains
+ // SubIdx.
+ for (CodeGenRegisterClass &RC : RegClasses) {
+ // Is there a subclass of this class which contains this subregister index?
+ CodeGenRegisterClass *SubClassWithSubReg = RC.getSubClassWithSubReg(SubIdx);
+ if (!SubClassWithSubReg)
+ continue;
+
+ // We have a class. Check if it supports this value type.
+ if (!llvm::is_contained(SubClassWithSubReg->VTs, ValueTy))
+ continue;
+
+ // If necessary, check that it is allocatable.
+ if (MustBeAllocatable && !SubClassWithSubReg->Allocatable)
+ continue;
+
+ // We have a register class which supports both the value type and
+ // subregister index. Remember it.
+ Candidates.push_back(SubClassWithSubReg);
+ }
+
+ // If we didn't find anything, we're done.
+ if (Candidates.empty())
+ return std::nullopt;
+
+ // Find and return the largest of our candidate classes.
+ llvm::stable_sort(Candidates, [&](const CodeGenRegisterClass *A,
+ const CodeGenRegisterClass *B) {
+ if (A->getMembers().size() > B->getMembers().size())
+ return true;
+
+ if (A->getMembers().size() < B->getMembers().size())
+ return false;
+
+ // Order by name as a tie-breaker.
+ return StringRef(A->getName()) < B->getName();
+ });
+
+ return Candidates[0];
+}
+
+void CodeGenTarget::ReadRegAltNameIndices() const {
+ RegAltNameIndices = Records.getAllDerivedDefinitions("RegAltNameIndex");
+ llvm::sort(RegAltNameIndices, LessRecord());
+}
+
+/// getRegisterByName - If there is a register with the specific AsmName,
+/// return it.
+const CodeGenRegister *CodeGenTarget::getRegisterByName(StringRef Name) const {
+ return getRegBank().getRegistersByName().lookup(Name);
+}
+
+std::vector<ValueTypeByHwMode> CodeGenTarget::getRegisterVTs(Record *R)
+ const {
+ const CodeGenRegister *Reg = getRegBank().getReg(R);
+ std::vector<ValueTypeByHwMode> Result;
+ for (const auto &RC : getRegBank().getRegClasses()) {
+ if (RC.contains(Reg)) {
+ ArrayRef<ValueTypeByHwMode> InVTs = RC.getValueTypes();
+ llvm::append_range(Result, InVTs);
+ }
+ }
+
+ // Remove duplicates.
+ llvm::sort(Result);
+ Result.erase(std::unique(Result.begin(), Result.end()), Result.end());
+ return Result;
+}
+
+
+void CodeGenTarget::ReadLegalValueTypes() const {
+ for (const auto &RC : getRegBank().getRegClasses())
+ llvm::append_range(LegalValueTypes, RC.VTs);
+
+ // Remove duplicates.
+ llvm::sort(LegalValueTypes);
+ LegalValueTypes.erase(std::unique(LegalValueTypes.begin(),
+ LegalValueTypes.end()),
+ LegalValueTypes.end());
+}
+
+CodeGenSchedModels &CodeGenTarget::getSchedModels() const {
+ if (!SchedModels)
+ SchedModels = std::make_unique<CodeGenSchedModels>(Records, *this);
+ return *SchedModels;
+}
+
+void CodeGenTarget::ReadInstructions() const {
+ std::vector<Record*> Insts = Records.getAllDerivedDefinitions("Instruction");
+ if (Insts.size() <= 2)
+ PrintFatalError("No 'Instruction' subclasses defined!");
+
+ // Parse the instructions defined in the .td file.
+ for (unsigned i = 0, e = Insts.size(); i != e; ++i)
+ Instructions[Insts[i]] = std::make_unique<CodeGenInstruction>(Insts[i]);
+}
+
+static const CodeGenInstruction *
+GetInstByName(const char *Name,
+ const DenseMap<const Record*,
+ std::unique_ptr<CodeGenInstruction>> &Insts,
+ RecordKeeper &Records) {
+ const Record *Rec = Records.getDef(Name);
+
+ const auto I = Insts.find(Rec);
+ if (!Rec || I == Insts.end())
+ PrintFatalError(Twine("Could not find '") + Name + "' instruction!");
+ return I->second.get();
+}
+
+static const char *FixedInstrs[] = {
+#define HANDLE_TARGET_OPCODE(OPC) #OPC,
+#include "llvm/Support/TargetOpcodes.def"
+ nullptr};
+
+unsigned CodeGenTarget::getNumFixedInstructions() {
+ return std::size(FixedInstrs) - 1;
+}
+
+/// Return all of the instructions defined by the target, ordered by
+/// their enum value.
+void CodeGenTarget::ComputeInstrsByEnum() const {
+ const auto &Insts = getInstructions();
+ for (const char *const *p = FixedInstrs; *p; ++p) {
+ const CodeGenInstruction *Instr = GetInstByName(*p, Insts, Records);
+ assert(Instr && "Missing target independent instruction");
+ assert(Instr->Namespace == "TargetOpcode" && "Bad namespace");
+ InstrsByEnum.push_back(Instr);
+ }
+ unsigned EndOfPredefines = InstrsByEnum.size();
+ assert(EndOfPredefines == getNumFixedInstructions() &&
+ "Missing generic opcode");
+
+ for (const auto &I : Insts) {
+ const CodeGenInstruction *CGI = I.second.get();
+ if (CGI->Namespace != "TargetOpcode") {
+ InstrsByEnum.push_back(CGI);
+ if (CGI->TheDef->getValueAsBit("isPseudo"))
+ ++NumPseudoInstructions;
+ }
+ }
+
+ assert(InstrsByEnum.size() == Insts.size() && "Missing predefined instr");
+
+ // All of the instructions are now in random order based on the map iteration.
+ llvm::sort(
+ InstrsByEnum.begin() + EndOfPredefines, InstrsByEnum.end(),
+ [](const CodeGenInstruction *Rec1, const CodeGenInstruction *Rec2) {
+ const auto &D1 = *Rec1->TheDef;
+ const auto &D2 = *Rec2->TheDef;
+ return std::make_tuple(!D1.getValueAsBit("isPseudo"), D1.getName()) <
+ std::make_tuple(!D2.getValueAsBit("isPseudo"), D2.getName());
+ });
+}
+
+
+/// isLittleEndianEncoding - Return whether this target encodes its instruction
+/// in little-endian format, i.e. bits laid out in the order [0..n]
+///
+bool CodeGenTarget::isLittleEndianEncoding() const {
+ return getInstructionSet()->getValueAsBit("isLittleEndianEncoding");
+}
+
+/// reverseBitsForLittleEndianEncoding - For little-endian instruction bit
+/// encodings, reverse the bit order of all instructions.
+void CodeGenTarget::reverseBitsForLittleEndianEncoding() {
+ if (!isLittleEndianEncoding())
+ return;
+
+ std::vector<Record *> Insts =
+ Records.getAllDerivedDefinitions("InstructionEncoding");
+ for (Record *R : Insts) {
+ if (R->getValueAsString("Namespace") == "TargetOpcode" ||
+ R->getValueAsBit("isPseudo"))
+ continue;
+
+ BitsInit *BI = R->getValueAsBitsInit("Inst");
+
+ unsigned numBits = BI->getNumBits();
+
+ SmallVector<Init *, 16> NewBits(numBits);
+
+ for (unsigned bit = 0, end = numBits / 2; bit != end; ++bit) {
+ unsigned bitSwapIdx = numBits - bit - 1;
+ Init *OrigBit = BI->getBit(bit);
+ Init *BitSwap = BI->getBit(bitSwapIdx);
+ NewBits[bit] = BitSwap;
+ NewBits[bitSwapIdx] = OrigBit;
+ }
+ if (numBits % 2) {
+ unsigned middle = (numBits + 1) / 2;
+ NewBits[middle] = BI->getBit(middle);
+ }
+
+ BitsInit *NewBI = BitsInit::get(Records, NewBits);
+
+ // Update the bits in reversed order so that emitInstrOpBits will get the
+ // correct endianness.
+ R->getValue("Inst")->setValue(NewBI);
+ }
+}
+
+/// guessInstructionProperties - Return true if it's OK to guess instruction
+/// properties instead of raising an error.
+///
+/// This is configurable as a temporary migration aid. It will eventually be
+/// permanently false.
+bool CodeGenTarget::guessInstructionProperties() const {
+ return getInstructionSet()->getValueAsBit("guessInstructionProperties");
+}
+
+//===----------------------------------------------------------------------===//
+// ComplexPattern implementation
+//
+ComplexPattern::ComplexPattern(Record *R) {
+ Ty = R->getValueAsDef("Ty");
+ NumOperands = R->getValueAsInt("NumOperands");
+ SelectFunc = std::string(R->getValueAsString("SelectFunc"));
+ RootNodes = R->getValueAsListOfDefs("RootNodes");
+
+ // FIXME: This is a hack to statically increase the priority of patterns which
+ // maps a sub-dag to a complex pattern. e.g. favors LEA over ADD. To get best
+ // possible pattern match we'll need to dynamically calculate the complexity
+ // of all patterns a dag can potentially map to.
+ int64_t RawComplexity = R->getValueAsInt("Complexity");
+ if (RawComplexity == -1)
+ Complexity = NumOperands * 3;
+ else
+ Complexity = RawComplexity;
+
+ // FIXME: Why is this different from parseSDPatternOperatorProperties?
+ // Parse the properties.
+ Properties = 0;
+ std::vector<Record*> PropList = R->getValueAsListOfDefs("Properties");
+ for (unsigned i = 0, e = PropList.size(); i != e; ++i)
+ if (PropList[i]->getName() == "SDNPHasChain") {
+ Properties |= 1 << SDNPHasChain;
+ } else if (PropList[i]->getName() == "SDNPOptInGlue") {
+ Properties |= 1 << SDNPOptInGlue;
+ } else if (PropList[i]->getName() == "SDNPMayStore") {
+ Properties |= 1 << SDNPMayStore;
+ } else if (PropList[i]->getName() == "SDNPMayLoad") {
+ Properties |= 1 << SDNPMayLoad;
+ } else if (PropList[i]->getName() == "SDNPSideEffect") {
+ Properties |= 1 << SDNPSideEffect;
+ } else if (PropList[i]->getName() == "SDNPMemOperand") {
+ Properties |= 1 << SDNPMemOperand;
+ } else if (PropList[i]->getName() == "SDNPVariadic") {
+ Properties |= 1 << SDNPVariadic;
+ } else if (PropList[i]->getName() == "SDNPWantRoot") {
+ Properties |= 1 << SDNPWantRoot;
+ } else if (PropList[i]->getName() == "SDNPWantParent") {
+ Properties |= 1 << SDNPWantParent;
+ } else {
+ PrintFatalError(R->getLoc(), "Unsupported SD Node property '" +
+ PropList[i]->getName() +
+ "' on ComplexPattern '" + R->getName() +
+ "'!");
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// CodeGenIntrinsic Implementation
+//===----------------------------------------------------------------------===//
+
+CodeGenIntrinsicTable::CodeGenIntrinsicTable(const RecordKeeper &RC) {
+ std::vector<Record *> IntrProperties =
+ RC.getAllDerivedDefinitions("IntrinsicProperty");
+
+ std::vector<Record *> DefaultProperties;
+ for (Record *Rec : IntrProperties)
+ if (Rec->getValueAsBit("IsDefault"))
+ DefaultProperties.push_back(Rec);
+
+ std::vector<Record *> Defs = RC.getAllDerivedDefinitions("Intrinsic");
+ Intrinsics.reserve(Defs.size());
+
+ for (unsigned I = 0, e = Defs.size(); I != e; ++I)
+ Intrinsics.push_back(CodeGenIntrinsic(Defs[I], DefaultProperties));
+
+ llvm::sort(Intrinsics,
+ [](const CodeGenIntrinsic &LHS, const CodeGenIntrinsic &RHS) {
+ return std::tie(LHS.TargetPrefix, LHS.Name) <
+ std::tie(RHS.TargetPrefix, RHS.Name);
+ });
+ Targets.push_back({"", 0, 0});
+ for (size_t I = 0, E = Intrinsics.size(); I < E; ++I)
+ if (Intrinsics[I].TargetPrefix != Targets.back().Name) {
+ Targets.back().Count = I - Targets.back().Offset;
+ Targets.push_back({Intrinsics[I].TargetPrefix, I, 0});
+ }
+ Targets.back().Count = Intrinsics.size() - Targets.back().Offset;
+}
+
+CodeGenIntrinsic::CodeGenIntrinsic(Record *R,
+ std::vector<Record *> DefaultProperties) {
+ TheDef = R;
+ std::string DefName = std::string(R->getName());
+ ArrayRef<SMLoc> DefLoc = R->getLoc();
+ Properties = 0;
+ isOverloaded = false;
+ isCommutative = false;
+ canThrow = false;
+ isNoReturn = false;
+ isNoCallback = false;
+ isNoSync = false;
+ isNoFree = false;
+ isWillReturn = false;
+ isCold = false;
+ isNoDuplicate = false;
+ isNoMerge = false;
+ isConvergent = false;
+ isSpeculatable = false;
+ hasSideEffects = false;
+
+ if (DefName.size() <= 4 || DefName.substr(0, 4) != "int_")
+ PrintFatalError(DefLoc,
+ "Intrinsic '" + DefName + "' does not start with 'int_'!");
+
+ EnumName = DefName.substr(4);
+
+ if (R->getValue("ClangBuiltinName")) // Ignore a missing ClangBuiltinName field.
+ ClangBuiltinName = std::string(R->getValueAsString("ClangBuiltinName"));
+ if (R->getValue("MSBuiltinName")) // Ignore a missing MSBuiltinName field.
+ MSBuiltinName = std::string(R->getValueAsString("MSBuiltinName"));
+
+ TargetPrefix = std::string(R->getValueAsString("TargetPrefix"));
+ Name = std::string(R->getValueAsString("LLVMName"));
+
+ if (Name == "") {
+ // If an explicit name isn't specified, derive one from the DefName.
+ Name = "llvm.";
+
+ for (unsigned i = 0, e = EnumName.size(); i != e; ++i)
+ Name += (EnumName[i] == '_') ? '.' : EnumName[i];
+ } else {
+ // Verify it starts with "llvm.".
+ if (Name.size() <= 5 || Name.substr(0, 5) != "llvm.")
+ PrintFatalError(DefLoc, "Intrinsic '" + DefName +
+ "'s name does not start with 'llvm.'!");
+ }
+
+ // If TargetPrefix is specified, make sure that Name starts with
+ // "llvm.<targetprefix>.".
+ if (!TargetPrefix.empty()) {
+ if (Name.size() < 6+TargetPrefix.size() ||
+ Name.substr(5, 1 + TargetPrefix.size()) != (TargetPrefix + "."))
+ PrintFatalError(DefLoc, "Intrinsic '" + DefName +
+ "' does not start with 'llvm." +
+ TargetPrefix + ".'!");
+ }
+
+ ListInit *RetTypes = R->getValueAsListInit("RetTypes");
+ ListInit *ParamTypes = R->getValueAsListInit("ParamTypes");
+
+ // First collate a list of overloaded types.
+ std::vector<MVT::SimpleValueType> OverloadedVTs;
+ for (ListInit *TypeList : {RetTypes, ParamTypes}) {
+ for (unsigned i = 0, e = TypeList->size(); i != e; ++i) {
+ Record *TyEl = TypeList->getElementAsRecord(i);
+ assert(TyEl->isSubClassOf("LLVMType") && "Expected a type!");
+
+ if (TyEl->isSubClassOf("LLVMMatchType"))
+ continue;
+
+ MVT::SimpleValueType VT = getValueType(TyEl->getValueAsDef("VT"));
+ if (MVT(VT).isOverloaded()) {
+ OverloadedVTs.push_back(VT);
+ isOverloaded = true;
+ }
+ }
+ }
+
+ // Parse the list of return types.
+ ListInit *TypeList = RetTypes;
+ for (unsigned i = 0, e = TypeList->size(); i != e; ++i) {
+ Record *TyEl = TypeList->getElementAsRecord(i);
+ assert(TyEl->isSubClassOf("LLVMType") && "Expected a type!");
+ MVT::SimpleValueType VT;
+ if (TyEl->isSubClassOf("LLVMMatchType")) {
+ unsigned MatchTy = TyEl->getValueAsInt("Number");
+ assert(MatchTy < OverloadedVTs.size() &&
+ "Invalid matching number!");
+ VT = OverloadedVTs[MatchTy];
+ // It only makes sense to use the extended and truncated vector element
+ // variants with iAny types; otherwise, if the intrinsic is not
+ // overloaded, all the types can be specified directly.
+ assert(((!TyEl->isSubClassOf("LLVMExtendedType") &&
+ !TyEl->isSubClassOf("LLVMTruncatedType")) ||
+ VT == MVT::iAny || VT == MVT::vAny) &&
+ "Expected iAny or vAny type");
+ } else {
+ VT = getValueType(TyEl->getValueAsDef("VT"));
+ }
+
+ // Reject invalid types.
+ if (VT == MVT::isVoid)
+ PrintFatalError(DefLoc, "Intrinsic '" + DefName +
+ " has void in result type list!");
+
+ IS.RetVTs.push_back(VT);
+ IS.RetTypeDefs.push_back(TyEl);
+ }
+
+ // Parse the list of parameter types.
+ TypeList = ParamTypes;
+ for (unsigned i = 0, e = TypeList->size(); i != e; ++i) {
+ Record *TyEl = TypeList->getElementAsRecord(i);
+ assert(TyEl->isSubClassOf("LLVMType") && "Expected a type!");
+ MVT::SimpleValueType VT;
+ if (TyEl->isSubClassOf("LLVMMatchType")) {
+ unsigned MatchTy = TyEl->getValueAsInt("Number");
+ if (MatchTy >= OverloadedVTs.size()) {
+ PrintError(R->getLoc(),
+ "Parameter #" + Twine(i) + " has out of bounds matching "
+ "number " + Twine(MatchTy));
+ PrintFatalError(DefLoc,
+ Twine("ParamTypes is ") + TypeList->getAsString());
+ }
+ VT = OverloadedVTs[MatchTy];
+ // It only makes sense to use the extended and truncated vector element
+ // variants with iAny types; otherwise, if the intrinsic is not
+ // overloaded, all the types can be specified directly.
+ assert(((!TyEl->isSubClassOf("LLVMExtendedType") &&
+ !TyEl->isSubClassOf("LLVMTruncatedType")) ||
+ VT == MVT::iAny || VT == MVT::vAny) &&
+ "Expected iAny or vAny type");
+ } else
+ VT = getValueType(TyEl->getValueAsDef("VT"));
+
+ // Reject invalid types.
+ if (VT == MVT::isVoid && i != e-1 /*void at end means varargs*/)
+ PrintFatalError(DefLoc, "Intrinsic '" + DefName +
+ " has void in result type list!");
+
+ IS.ParamVTs.push_back(VT);
+ IS.ParamTypeDefs.push_back(TyEl);
+ }
+
+ // Parse the intrinsic properties.
+ ListInit *PropList = R->getValueAsListInit("IntrProperties");
+ for (unsigned i = 0, e = PropList->size(); i != e; ++i) {
+ Record *Property = PropList->getElementAsRecord(i);
+ assert(Property->isSubClassOf("IntrinsicProperty") &&
+ "Expected a property!");
+
+ setProperty(Property);
+ }
+
+ // Set default properties to true.
+ setDefaultProperties(R, DefaultProperties);
+
+ // Also record the SDPatternOperator Properties.
+ Properties = parseSDPatternOperatorProperties(R);
+
+ // Sort the argument attributes for later benefit.
+ for (auto &Attrs : ArgumentAttributes)
+ llvm::sort(Attrs);
+}
+
+void CodeGenIntrinsic::setDefaultProperties(
+ Record *R, std::vector<Record *> DefaultProperties) {
+ // opt-out of using default attributes.
+ if (R->getValueAsBit("DisableDefaultAttributes"))
+ return;
+
+ for (Record *Rec : DefaultProperties)
+ setProperty(Rec);
+}
+
+void CodeGenIntrinsic::setProperty(Record *R) {
+ if (R->getName() == "IntrNoMem")
+ ME = MemoryEffects::none();
+ else if (R->getName() == "IntrReadMem") {
+ if (ME.onlyWritesMemory())
+ PrintFatalError(TheDef->getLoc(),
+ Twine("IntrReadMem cannot be used after IntrNoMem or "
+ "IntrWriteMem. Default is ReadWrite"));
+ ME &= MemoryEffects::readOnly();
+ } else if (R->getName() == "IntrWriteMem") {
+ if (ME.onlyReadsMemory())
+ PrintFatalError(TheDef->getLoc(),
+ Twine("IntrWriteMem cannot be used after IntrNoMem or "
+ "IntrReadMem. Default is ReadWrite"));
+ ME &= MemoryEffects::writeOnly();
+ } else if (R->getName() == "IntrArgMemOnly")
+ ME &= MemoryEffects::argMemOnly();
+ else if (R->getName() == "IntrInaccessibleMemOnly")
+ ME &= MemoryEffects::inaccessibleMemOnly();
+ else if (R->getName() == "IntrInaccessibleMemOrArgMemOnly")
+ ME &= MemoryEffects::inaccessibleOrArgMemOnly();
+ else if (R->getName() == "Commutative")
+ isCommutative = true;
+ else if (R->getName() == "Throws")
+ canThrow = true;
+ else if (R->getName() == "IntrNoDuplicate")
+ isNoDuplicate = true;
+ else if (R->getName() == "IntrNoMerge")
+ isNoMerge = true;
+ else if (R->getName() == "IntrConvergent")
+ isConvergent = true;
+ else if (R->getName() == "IntrNoReturn")
+ isNoReturn = true;
+ else if (R->getName() == "IntrNoCallback")
+ isNoCallback = true;
+ else if (R->getName() == "IntrNoSync")
+ isNoSync = true;
+ else if (R->getName() == "IntrNoFree")
+ isNoFree = true;
+ else if (R->getName() == "IntrWillReturn")
+ isWillReturn = !isNoReturn;
+ else if (R->getName() == "IntrCold")
+ isCold = true;
+ else if (R->getName() == "IntrSpeculatable")
+ isSpeculatable = true;
+ else if (R->getName() == "IntrHasSideEffects")
+ hasSideEffects = true;
+ else if (R->isSubClassOf("NoCapture")) {
+ unsigned ArgNo = R->getValueAsInt("ArgNo");
+ addArgAttribute(ArgNo, NoCapture);
+ } else if (R->isSubClassOf("NoAlias")) {
+ unsigned ArgNo = R->getValueAsInt("ArgNo");
+ addArgAttribute(ArgNo, NoAlias);
+ } else if (R->isSubClassOf("NoUndef")) {
+ unsigned ArgNo = R->getValueAsInt("ArgNo");
+ addArgAttribute(ArgNo, NoUndef);
+ } else if (R->isSubClassOf("NonNull")) {
+ unsigned ArgNo = R->getValueAsInt("ArgNo");
+ addArgAttribute(ArgNo, NonNull);
+ } else if (R->isSubClassOf("Returned")) {
+ unsigned ArgNo = R->getValueAsInt("ArgNo");
+ addArgAttribute(ArgNo, Returned);
+ } else if (R->isSubClassOf("ReadOnly")) {
+ unsigned ArgNo = R->getValueAsInt("ArgNo");
+ addArgAttribute(ArgNo, ReadOnly);
+ } else if (R->isSubClassOf("WriteOnly")) {
+ unsigned ArgNo = R->getValueAsInt("ArgNo");
+ addArgAttribute(ArgNo, WriteOnly);
+ } else if (R->isSubClassOf("ReadNone")) {
+ unsigned ArgNo = R->getValueAsInt("ArgNo");
+ addArgAttribute(ArgNo, ReadNone);
+ } else if (R->isSubClassOf("ImmArg")) {
+ unsigned ArgNo = R->getValueAsInt("ArgNo");
+ addArgAttribute(ArgNo, ImmArg);
+ } else if (R->isSubClassOf("Align")) {
+ unsigned ArgNo = R->getValueAsInt("ArgNo");
+ uint64_t Align = R->getValueAsInt("Align");
+ addArgAttribute(ArgNo, Alignment, Align);
+ } else
+ llvm_unreachable("Unknown property!");
+}
+
+bool CodeGenIntrinsic::isParamAPointer(unsigned ParamIdx) const {
+ if (ParamIdx >= IS.ParamVTs.size())
+ return false;
+ MVT ParamType = MVT(IS.ParamVTs[ParamIdx]);
+ return ParamType == MVT::iPTR || ParamType == MVT::iPTRAny;
+}
+
+bool CodeGenIntrinsic::isParamImmArg(unsigned ParamIdx) const {
+ // Convert argument index to attribute index starting from `FirstArgIndex`.
+ ++ParamIdx;
+ if (ParamIdx >= ArgumentAttributes.size())
+ return false;
+ ArgAttribute Val{ImmArg, 0};
+ return std::binary_search(ArgumentAttributes[ParamIdx].begin(),
+ ArgumentAttributes[ParamIdx].end(), Val);
+}
+
+void CodeGenIntrinsic::addArgAttribute(unsigned Idx, ArgAttrKind AK,
+ uint64_t V) {
+ if (Idx >= ArgumentAttributes.size())
+ ArgumentAttributes.resize(Idx + 1);
+ ArgumentAttributes[Idx].emplace_back(AK, V);
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/CodeGenTarget.h b/contrib/libs/llvm16/utils/TableGen/CodeGenTarget.h
new file mode 100644
index 0000000000..6846e6b5c7
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/CodeGenTarget.h
@@ -0,0 +1,225 @@
+//===- CodeGenTarget.h - Target Class Wrapper -------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines wrappers for the Target class and related global
+// functionality. This makes it easier to access the data and provides a single
+// place that needs to check it for validity. All of these classes abort
+// on error conditions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_CODEGENTARGET_H
+#define LLVM_UTILS_TABLEGEN_CODEGENTARGET_H
+
+#include "CodeGenHwModes.h"
+#include "CodeGenRegisters.h"
+#include "InfoByHwMode.h"
+#include "SDNodeProperties.h"
+
+namespace llvm {
+
+class RecordKeeper;
+class Record;
+class CodeGenInstruction;
+struct CodeGenRegister;
+class CodeGenSchedModels;
+class CodeGenTarget;
+
+/// getValueType - Return the MVT::SimpleValueType that the specified TableGen
+/// record corresponds to.
+MVT::SimpleValueType getValueType(Record *Rec);
+
+StringRef getName(MVT::SimpleValueType T);
+StringRef getEnumName(MVT::SimpleValueType T);
+
+/// getQualifiedName - Return the name of the specified record, with a
+/// namespace qualifier if the record contains one.
+std::string getQualifiedName(const Record *R);
+
+/// CodeGenTarget - This class corresponds to the Target class in the .td files.
+///
+class CodeGenTarget {
+ RecordKeeper &Records;
+ Record *TargetRec;
+
+ mutable DenseMap<const Record*,
+ std::unique_ptr<CodeGenInstruction>> Instructions;
+ mutable std::unique_ptr<CodeGenRegBank> RegBank;
+ mutable std::vector<Record*> RegAltNameIndices;
+ mutable SmallVector<ValueTypeByHwMode, 8> LegalValueTypes;
+ CodeGenHwModes CGH;
+ void ReadRegAltNameIndices() const;
+ void ReadInstructions() const;
+ void ReadLegalValueTypes() const;
+
+ mutable std::unique_ptr<CodeGenSchedModels> SchedModels;
+
+ mutable StringRef InstNamespace;
+ mutable std::vector<const CodeGenInstruction*> InstrsByEnum;
+ mutable unsigned NumPseudoInstructions = 0;
+public:
+ CodeGenTarget(RecordKeeper &Records);
+ ~CodeGenTarget();
+
+ Record *getTargetRecord() const { return TargetRec; }
+ StringRef getName() const;
+
+ /// getInstNamespace - Return the target-specific instruction namespace.
+ ///
+ StringRef getInstNamespace() const;
+
+ /// getRegNamespace - Return the target-specific register namespace.
+ StringRef getRegNamespace() const;
+
+ /// getInstructionSet - Return the InstructionSet object.
+ ///
+ Record *getInstructionSet() const;
+
+ /// getAllowRegisterRenaming - Return the AllowRegisterRenaming flag value for
+ /// this target.
+ ///
+ bool getAllowRegisterRenaming() const;
+
+ /// getAsmParser - Return the AssemblyParser definition for this target.
+ ///
+ Record *getAsmParser() const;
+
+ /// getAsmParserVariant - Return the AssemblyParserVariant definition for
+ /// this target.
+ ///
+ Record *getAsmParserVariant(unsigned i) const;
+
+ /// getAsmParserVariantCount - Return the AssemblyParserVariant definition
+ /// available for this target.
+ ///
+ unsigned getAsmParserVariantCount() const;
+
+ /// getAsmWriter - Return the AssemblyWriter definition for this target.
+ ///
+ Record *getAsmWriter() const;
+
+ /// getRegBank - Return the register bank description.
+ CodeGenRegBank &getRegBank() const;
+
+ /// Return the largest register class on \p RegBank which supports \p Ty and
+ /// covers \p SubIdx if it exists.
+ std::optional<CodeGenRegisterClass *>
+ getSuperRegForSubReg(const ValueTypeByHwMode &Ty, CodeGenRegBank &RegBank,
+ const CodeGenSubRegIndex *SubIdx,
+ bool MustBeAllocatable = false) const;
+
+ /// getRegisterByName - If there is a register with the specific AsmName,
+ /// return it.
+ const CodeGenRegister *getRegisterByName(StringRef Name) const;
+
+ const std::vector<Record*> &getRegAltNameIndices() const {
+ if (RegAltNameIndices.empty()) ReadRegAltNameIndices();
+ return RegAltNameIndices;
+ }
+
+ const CodeGenRegisterClass &getRegisterClass(Record *R) const {
+ return *getRegBank().getRegClass(R);
+ }
+
+ /// getRegisterVTs - Find the union of all possible SimpleValueTypes for the
+ /// specified physical register.
+ std::vector<ValueTypeByHwMode> getRegisterVTs(Record *R) const;
+
+ ArrayRef<ValueTypeByHwMode> getLegalValueTypes() const {
+ if (LegalValueTypes.empty())
+ ReadLegalValueTypes();
+ return LegalValueTypes;
+ }
+
+ CodeGenSchedModels &getSchedModels() const;
+
+ const CodeGenHwModes &getHwModes() const { return CGH; }
+
+private:
+ DenseMap<const Record*, std::unique_ptr<CodeGenInstruction>> &
+ getInstructions() const {
+ if (Instructions.empty()) ReadInstructions();
+ return Instructions;
+ }
+public:
+
+ CodeGenInstruction &getInstruction(const Record *InstRec) const {
+ if (Instructions.empty()) ReadInstructions();
+ auto I = Instructions.find(InstRec);
+ assert(I != Instructions.end() && "Not an instruction");
+ return *I->second;
+ }
+
+ /// Returns the number of predefined instructions.
+ static unsigned getNumFixedInstructions();
+
+ /// Returns the number of pseudo instructions.
+ unsigned getNumPseudoInstructions() const {
+ if (InstrsByEnum.empty())
+ ComputeInstrsByEnum();
+ return NumPseudoInstructions;
+ }
+
+ /// Return all of the instructions defined by the target, ordered by their
+ /// enum value.
+ /// The following order of instructions is also guaranteed:
+ /// - fixed / generic instructions as declared in TargetOpcodes.def, in order;
+ /// - pseudo instructions in lexicographical order sorted by name;
+ /// - other instructions in lexicographical order sorted by name.
+ ArrayRef<const CodeGenInstruction *> getInstructionsByEnumValue() const {
+ if (InstrsByEnum.empty())
+ ComputeInstrsByEnum();
+ return InstrsByEnum;
+ }
+
+ typedef ArrayRef<const CodeGenInstruction *>::const_iterator inst_iterator;
+ inst_iterator inst_begin() const{return getInstructionsByEnumValue().begin();}
+ inst_iterator inst_end() const { return getInstructionsByEnumValue().end(); }
+
+
+ /// isLittleEndianEncoding - are instruction bit patterns defined as [0..n]?
+ ///
+ bool isLittleEndianEncoding() const;
+
+ /// reverseBitsForLittleEndianEncoding - For little-endian instruction bit
+ /// encodings, reverse the bit order of all instructions.
+ void reverseBitsForLittleEndianEncoding();
+
+ /// guessInstructionProperties - should we just guess unset instruction
+ /// properties?
+ bool guessInstructionProperties() const;
+
+private:
+ void ComputeInstrsByEnum() const;
+};
+
+/// ComplexPattern - ComplexPattern info, corresponding to the ComplexPattern
+/// tablegen class in TargetSelectionDAG.td
+class ComplexPattern {
+ Record *Ty;
+ unsigned NumOperands;
+ std::string SelectFunc;
+ std::vector<Record*> RootNodes;
+ unsigned Properties; // Node properties
+ unsigned Complexity;
+public:
+ ComplexPattern(Record *R);
+
+ Record *getValueType() const { return Ty; }
+ unsigned getNumOperands() const { return NumOperands; }
+ const std::string &getSelectFunc() const { return SelectFunc; }
+ const std::vector<Record*> &getRootNodes() const {
+ return RootNodes;
+ }
+ bool hasProperty(enum SDNP Prop) const { return Properties & (1 << Prop); }
+ unsigned getComplexity() const { return Complexity; }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/libs/llvm16/utils/TableGen/CompressInstEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/CompressInstEmitter.cpp
new file mode 100644
index 0000000000..a18d6a6b88
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/CompressInstEmitter.cpp
@@ -0,0 +1,911 @@
+//===-------- CompressInstEmitter.cpp - Generator for Compression ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+// CompressInstEmitter implements a tablegen-driven CompressPat based
+// Instruction Compression mechanism.
+//
+//===----------------------------------------------------------------------===//
+//
+// CompressInstEmitter implements a tablegen-driven CompressPat Instruction
+// Compression mechanism for generating compressed instructions from the
+// expanded instruction form.
+
+// This tablegen backend processes CompressPat declarations in a
+// td file and generates all the required checks to validate the pattern
+// declarations; validate the input and output operands to generate the correct
+// compressed instructions. The checks include validating different types of
+// operands; register operands, immediate operands, fixed register and fixed
+// immediate inputs.
+//
+// Example:
+// /// Defines a Pat match between compressed and uncompressed instruction.
+// /// The relationship and helper function generation are handled by
+// /// CompressInstEmitter backend.
+// class CompressPat<dag input, dag output, list<Predicate> predicates = []> {
+// /// Uncompressed instruction description.
+// dag Input = input;
+// /// Compressed instruction description.
+// dag Output = output;
+// /// Predicates that must be true for this to match.
+// list<Predicate> Predicates = predicates;
+// /// Duplicate match when tied operand is just different.
+// bit isCompressOnly = false;
+// }
+//
+// let Predicates = [HasStdExtC] in {
+// def : CompressPat<(ADD GPRNoX0:$rs1, GPRNoX0:$rs1, GPRNoX0:$rs2),
+// (C_ADD GPRNoX0:$rs1, GPRNoX0:$rs2)>;
+// }
+//
+// The <TargetName>GenCompressInstEmitter.inc is an auto-generated header
+// file which exports two functions for compressing/uncompressing MCInst
+// instructions, plus some helper functions:
+//
+// bool compressInst(MCInst &OutInst, const MCInst &MI,
+// const MCSubtargetInfo &STI);
+//
+// bool uncompressInst(MCInst &OutInst, const MCInst &MI,
+// const MCSubtargetInfo &STI);
+//
+// In addition, it exports a function for checking whether
+// an instruction is compressable:
+//
+// bool isCompressibleInst(const MachineInstr& MI,
+// const <TargetName>Subtarget &STI);
+//
+// The clients that include this auto-generated header file and
+// invoke these functions can compress an instruction before emitting
+// it in the target-specific ASM or ELF streamer or can uncompress
+// an instruction before printing it when the expanded instruction
+// format aliases is favored.
+
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenInstruction.h"
+#include "CodeGenTarget.h"
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include <set>
+#include <vector>
+using namespace llvm;
+
+#define DEBUG_TYPE "compress-inst-emitter"
+
+namespace {
+class CompressInstEmitter {
+ struct OpData {
+ enum MapKind { Operand, Imm, Reg };
+ MapKind Kind;
+ union {
+ // Operand number mapped to.
+ unsigned Operand;
+ // Integer immediate value.
+ int64_t Imm;
+ // Physical register.
+ Record *Reg;
+ } Data;
+ // Tied operand index within the instruction.
+ int TiedOpIdx = -1;
+ };
+ struct CompressPat {
+ // The source instruction definition.
+ CodeGenInstruction Source;
+ // The destination instruction to transform to.
+ CodeGenInstruction Dest;
+ // Required target features to enable pattern.
+ std::vector<Record *> PatReqFeatures;
+ // Maps operands in the Source Instruction to
+ IndexedMap<OpData> SourceOperandMap;
+ // the corresponding Dest instruction operand.
+ // Maps operands in the Dest Instruction
+ // to the corresponding Source instruction operand.
+ IndexedMap<OpData> DestOperandMap;
+
+ bool IsCompressOnly;
+ CompressPat(CodeGenInstruction &S, CodeGenInstruction &D,
+ std::vector<Record *> RF, IndexedMap<OpData> &SourceMap,
+ IndexedMap<OpData> &DestMap, bool IsCompressOnly)
+ : Source(S), Dest(D), PatReqFeatures(RF), SourceOperandMap(SourceMap),
+ DestOperandMap(DestMap), IsCompressOnly(IsCompressOnly) {}
+ };
+ enum EmitterType { Compress, Uncompress, CheckCompress };
+ RecordKeeper &Records;
+ CodeGenTarget Target;
+ SmallVector<CompressPat, 4> CompressPatterns;
+
+ void addDagOperandMapping(Record *Rec, DagInit *Dag, CodeGenInstruction &Inst,
+ IndexedMap<OpData> &OperandMap, bool IsSourceInst);
+ void evaluateCompressPat(Record *Compress);
+ void emitCompressInstEmitter(raw_ostream &o, EmitterType EType);
+ bool validateTypes(Record *SubType, Record *Type, bool IsSourceInst);
+ bool validateRegister(Record *Reg, Record *RegClass);
+ void createDagOperandMapping(Record *Rec, StringMap<unsigned> &SourceOperands,
+ StringMap<unsigned> &DestOperands,
+ DagInit *SourceDag, DagInit *DestDag,
+ IndexedMap<OpData> &SourceOperandMap);
+
+ void createInstOperandMapping(Record *Rec, DagInit *SourceDag,
+ DagInit *DestDag,
+ IndexedMap<OpData> &SourceOperandMap,
+ IndexedMap<OpData> &DestOperandMap,
+ StringMap<unsigned> &SourceOperands,
+ CodeGenInstruction &DestInst);
+
+public:
+ CompressInstEmitter(RecordKeeper &R) : Records(R), Target(R) {}
+
+ void run(raw_ostream &o);
+};
+} // End anonymous namespace.
+
+bool CompressInstEmitter::validateRegister(Record *Reg, Record *RegClass) {
+ assert(Reg->isSubClassOf("Register") && "Reg record should be a Register");
+ assert(RegClass->isSubClassOf("RegisterClass") &&
+ "RegClass record should be a RegisterClass");
+ const CodeGenRegisterClass &RC = Target.getRegisterClass(RegClass);
+ const CodeGenRegister *R = Target.getRegisterByName(Reg->getName().lower());
+ assert((R != nullptr) && "Register not defined!!");
+ return RC.contains(R);
+}
+
+bool CompressInstEmitter::validateTypes(Record *DagOpType, Record *InstOpType,
+ bool IsSourceInst) {
+ if (DagOpType == InstOpType)
+ return true;
+ // Only source instruction operands are allowed to not match Input Dag
+ // operands.
+ if (!IsSourceInst)
+ return false;
+
+ if (DagOpType->isSubClassOf("RegisterClass") &&
+ InstOpType->isSubClassOf("RegisterClass")) {
+ const CodeGenRegisterClass &RC = Target.getRegisterClass(InstOpType);
+ const CodeGenRegisterClass &SubRC = Target.getRegisterClass(DagOpType);
+ return RC.hasSubClass(&SubRC);
+ }
+
+ // At this point either or both types are not registers, reject the pattern.
+ if (DagOpType->isSubClassOf("RegisterClass") ||
+ InstOpType->isSubClassOf("RegisterClass"))
+ return false;
+
+ // Let further validation happen when compress()/uncompress() functions are
+ // invoked.
+ LLVM_DEBUG(dbgs() << (IsSourceInst ? "Input" : "Output")
+ << " Dag Operand Type: '" << DagOpType->getName()
+ << "' and "
+ << "Instruction Operand Type: '" << InstOpType->getName()
+ << "' can't be checked at pattern validation time!\n");
+ return true;
+}
+
+/// The patterns in the Dag contain different types of operands:
+/// Register operands, e.g.: GPRC:$rs1; Fixed registers, e.g: X1; Immediate
+/// operands, e.g.: simm6:$imm; Fixed immediate operands, e.g.: 0. This function
+/// maps Dag operands to its corresponding instruction operands. For register
+/// operands and fixed registers it expects the Dag operand type to be contained
+/// in the instantiated instruction operand type. For immediate operands and
+/// immediates no validation checks are enforced at pattern validation time.
+void CompressInstEmitter::addDagOperandMapping(Record *Rec, DagInit *Dag,
+ CodeGenInstruction &Inst,
+ IndexedMap<OpData> &OperandMap,
+ bool IsSourceInst) {
+ // TiedCount keeps track of the number of operands skipped in Inst
+ // operands list to get to the corresponding Dag operand. This is
+ // necessary because the number of operands in Inst might be greater
+ // than number of operands in the Dag due to how tied operands
+ // are represented.
+ unsigned TiedCount = 0;
+ for (unsigned i = 0, e = Inst.Operands.size(); i != e; ++i) {
+ int TiedOpIdx = Inst.Operands[i].getTiedRegister();
+ if (-1 != TiedOpIdx) {
+ // Set the entry in OperandMap for the tied operand we're skipping.
+ OperandMap[i].Kind = OperandMap[TiedOpIdx].Kind;
+ OperandMap[i].Data = OperandMap[TiedOpIdx].Data;
+ TiedCount++;
+ continue;
+ }
+ if (DefInit *DI = dyn_cast<DefInit>(Dag->getArg(i - TiedCount))) {
+ if (DI->getDef()->isSubClassOf("Register")) {
+ // Check if the fixed register belongs to the Register class.
+ if (!validateRegister(DI->getDef(), Inst.Operands[i].Rec))
+ PrintFatalError(Rec->getLoc(),
+ "Error in Dag '" + Dag->getAsString() +
+ "'Register: '" + DI->getDef()->getName() +
+ "' is not in register class '" +
+ Inst.Operands[i].Rec->getName() + "'");
+ OperandMap[i].Kind = OpData::Reg;
+ OperandMap[i].Data.Reg = DI->getDef();
+ continue;
+ }
+ // Validate that Dag operand type matches the type defined in the
+ // corresponding instruction. Operands in the input Dag pattern are
+ // allowed to be a subclass of the type specified in corresponding
+ // instruction operand instead of being an exact match.
+ if (!validateTypes(DI->getDef(), Inst.Operands[i].Rec, IsSourceInst))
+ PrintFatalError(Rec->getLoc(),
+ "Error in Dag '" + Dag->getAsString() + "'. Operand '" +
+ Dag->getArgNameStr(i - TiedCount) + "' has type '" +
+ DI->getDef()->getName() +
+ "' which does not match the type '" +
+ Inst.Operands[i].Rec->getName() +
+ "' in the corresponding instruction operand!");
+
+ OperandMap[i].Kind = OpData::Operand;
+ } else if (IntInit *II = dyn_cast<IntInit>(Dag->getArg(i - TiedCount))) {
+ // Validate that corresponding instruction operand expects an immediate.
+ if (Inst.Operands[i].Rec->isSubClassOf("RegisterClass"))
+ PrintFatalError(
+ Rec->getLoc(),
+ "Error in Dag '" + Dag->getAsString() + "' Found immediate: '" +
+ II->getAsString() +
+ "' but corresponding instruction operand expected a register!");
+ // No pattern validation check possible for values of fixed immediate.
+ OperandMap[i].Kind = OpData::Imm;
+ OperandMap[i].Data.Imm = II->getValue();
+ LLVM_DEBUG(
+ dbgs() << " Found immediate '" << II->getValue() << "' at "
+ << (IsSourceInst ? "input " : "output ")
+ << "Dag. No validation time check possible for values of "
+ "fixed immediate.\n");
+ } else
+ llvm_unreachable("Unhandled CompressPat argument type!");
+ }
+}
+
+// Verify the Dag operand count is enough to build an instruction.
+static bool verifyDagOpCount(CodeGenInstruction &Inst, DagInit *Dag,
+ bool IsSource) {
+ if (Dag->getNumArgs() == Inst.Operands.size())
+ return true;
+ // Source instructions are non compressed instructions and don't have tied
+ // operands.
+ if (IsSource)
+ PrintFatalError(Inst.TheDef->getLoc(),
+ "Input operands for Inst '" + Inst.TheDef->getName() +
+ "' and input Dag operand count mismatch");
+ // The Dag can't have more arguments than the Instruction.
+ if (Dag->getNumArgs() > Inst.Operands.size())
+ PrintFatalError(Inst.TheDef->getLoc(),
+ "Inst '" + Inst.TheDef->getName() +
+ "' and Dag operand count mismatch");
+
+ // The Instruction might have tied operands so the Dag might have
+ // a fewer operand count.
+ unsigned RealCount = Inst.Operands.size();
+ for (const auto &Operand : Inst.Operands)
+ if (Operand.getTiedRegister() != -1)
+ --RealCount;
+
+ if (Dag->getNumArgs() != RealCount)
+ PrintFatalError(Inst.TheDef->getLoc(),
+ "Inst '" + Inst.TheDef->getName() +
+ "' and Dag operand count mismatch");
+ return true;
+}
+
+static bool validateArgsTypes(Init *Arg1, Init *Arg2) {
+ return cast<DefInit>(Arg1)->getDef() == cast<DefInit>(Arg2)->getDef();
+}
+
+// Creates a mapping between the operand name in the Dag (e.g. $rs1) and
+// its index in the list of Dag operands and checks that operands with the same
+// name have the same types. For example in 'C_ADD $rs1, $rs2' we generate the
+// mapping $rs1 --> 0, $rs2 ---> 1. If the operand appears twice in the (tied)
+// same Dag we use the last occurrence for indexing.
+void CompressInstEmitter::createDagOperandMapping(
+ Record *Rec, StringMap<unsigned> &SourceOperands,
+ StringMap<unsigned> &DestOperands, DagInit *SourceDag, DagInit *DestDag,
+ IndexedMap<OpData> &SourceOperandMap) {
+ for (unsigned i = 0; i < DestDag->getNumArgs(); ++i) {
+ // Skip fixed immediates and registers, they were handled in
+ // addDagOperandMapping.
+ if ("" == DestDag->getArgNameStr(i))
+ continue;
+ DestOperands[DestDag->getArgNameStr(i)] = i;
+ }
+
+ for (unsigned i = 0; i < SourceDag->getNumArgs(); ++i) {
+ // Skip fixed immediates and registers, they were handled in
+ // addDagOperandMapping.
+ if ("" == SourceDag->getArgNameStr(i))
+ continue;
+
+ StringMap<unsigned>::iterator it =
+ SourceOperands.find(SourceDag->getArgNameStr(i));
+ if (it != SourceOperands.end()) {
+ // Operand sharing the same name in the Dag should be mapped as tied.
+ SourceOperandMap[i].TiedOpIdx = it->getValue();
+ if (!validateArgsTypes(SourceDag->getArg(it->getValue()),
+ SourceDag->getArg(i)))
+ PrintFatalError(Rec->getLoc(),
+ "Input Operand '" + SourceDag->getArgNameStr(i) +
+ "' has a mismatched tied operand!\n");
+ }
+ it = DestOperands.find(SourceDag->getArgNameStr(i));
+ if (it == DestOperands.end())
+ PrintFatalError(Rec->getLoc(), "Operand " + SourceDag->getArgNameStr(i) +
+ " defined in Input Dag but not used in"
+ " Output Dag!\n");
+ // Input Dag operand types must match output Dag operand type.
+ if (!validateArgsTypes(DestDag->getArg(it->getValue()),
+ SourceDag->getArg(i)))
+ PrintFatalError(Rec->getLoc(), "Type mismatch between Input and "
+ "Output Dag operand '" +
+ SourceDag->getArgNameStr(i) + "'!");
+ SourceOperands[SourceDag->getArgNameStr(i)] = i;
+ }
+}
+
+/// Map operand names in the Dag to their index in both corresponding input and
+/// output instructions. Validate that operands defined in the input are
+/// used in the output pattern while populating the maps.
+void CompressInstEmitter::createInstOperandMapping(
+ Record *Rec, DagInit *SourceDag, DagInit *DestDag,
+ IndexedMap<OpData> &SourceOperandMap, IndexedMap<OpData> &DestOperandMap,
+ StringMap<unsigned> &SourceOperands, CodeGenInstruction &DestInst) {
+ // TiedCount keeps track of the number of operands skipped in Inst
+ // operands list to get to the corresponding Dag operand.
+ unsigned TiedCount = 0;
+ LLVM_DEBUG(dbgs() << " Operand mapping:\n Source Dest\n");
+ for (unsigned i = 0, e = DestInst.Operands.size(); i != e; ++i) {
+ int TiedInstOpIdx = DestInst.Operands[i].getTiedRegister();
+ if (TiedInstOpIdx != -1) {
+ ++TiedCount;
+ DestOperandMap[i].Data = DestOperandMap[TiedInstOpIdx].Data;
+ DestOperandMap[i].Kind = DestOperandMap[TiedInstOpIdx].Kind;
+ if (DestOperandMap[i].Kind == OpData::Operand)
+ // No need to fill the SourceOperandMap here since it was mapped to
+ // destination operand 'TiedInstOpIdx' in a previous iteration.
+ LLVM_DEBUG(dbgs() << " " << DestOperandMap[i].Data.Operand
+ << " ====> " << i
+ << " Dest operand tied with operand '"
+ << TiedInstOpIdx << "'\n");
+ continue;
+ }
+ // Skip fixed immediates and registers, they were handled in
+ // addDagOperandMapping.
+ if (DestOperandMap[i].Kind != OpData::Operand)
+ continue;
+
+ unsigned DagArgIdx = i - TiedCount;
+ StringMap<unsigned>::iterator SourceOp =
+ SourceOperands.find(DestDag->getArgNameStr(DagArgIdx));
+ if (SourceOp == SourceOperands.end())
+ PrintFatalError(Rec->getLoc(),
+ "Output Dag operand '" +
+ DestDag->getArgNameStr(DagArgIdx) +
+ "' has no matching input Dag operand.");
+
+ assert(DestDag->getArgNameStr(DagArgIdx) ==
+ SourceDag->getArgNameStr(SourceOp->getValue()) &&
+ "Incorrect operand mapping detected!\n");
+ DestOperandMap[i].Data.Operand = SourceOp->getValue();
+ SourceOperandMap[SourceOp->getValue()].Data.Operand = i;
+ LLVM_DEBUG(dbgs() << " " << SourceOp->getValue() << " ====> " << i
+ << "\n");
+ }
+}
+
+/// Validates the CompressPattern and create operand mapping.
+/// These are the checks to validate a CompressPat pattern declarations.
+/// Error out with message under these conditions:
+/// - Dag Input opcode is an expanded instruction and Dag Output opcode is a
+/// compressed instruction.
+/// - Operands in Dag Input must be all used in Dag Output.
+/// Register Operand type in Dag Input Type must be contained in the
+/// corresponding Source Instruction type.
+/// - Register Operand type in Dag Input must be the same as in Dag Ouput.
+/// - Register Operand type in Dag Output must be the same as the
+/// corresponding Destination Inst type.
+/// - Immediate Operand type in Dag Input must be the same as in Dag Ouput.
+/// - Immediate Operand type in Dag Ouput must be the same as the corresponding
+/// Destination Instruction type.
+/// - Fixed register must be contained in the corresponding Source Instruction
+/// type.
+/// - Fixed register must be contained in the corresponding Destination
+/// Instruction type. Warning message printed under these conditions:
+/// - Fixed immediate in Dag Input or Dag Ouput cannot be checked at this time
+/// and generate warning.
+/// - Immediate operand type in Dag Input differs from the corresponding Source
+/// Instruction type and generate a warning.
+void CompressInstEmitter::evaluateCompressPat(Record *Rec) {
+ // Validate input Dag operands.
+ DagInit *SourceDag = Rec->getValueAsDag("Input");
+ assert(SourceDag && "Missing 'Input' in compress pattern!");
+ LLVM_DEBUG(dbgs() << "Input: " << *SourceDag << "\n");
+
+ // Checking we are transforming from compressed to uncompressed instructions.
+ Record *Operator = SourceDag->getOperatorAsDef(Rec->getLoc());
+ CodeGenInstruction SourceInst(Operator);
+ verifyDagOpCount(SourceInst, SourceDag, true);
+
+ // Validate output Dag operands.
+ DagInit *DestDag = Rec->getValueAsDag("Output");
+ assert(DestDag && "Missing 'Output' in compress pattern!");
+ LLVM_DEBUG(dbgs() << "Output: " << *DestDag << "\n");
+
+ Record *DestOperator = DestDag->getOperatorAsDef(Rec->getLoc());
+ CodeGenInstruction DestInst(DestOperator);
+ verifyDagOpCount(DestInst, DestDag, false);
+
+ if (Operator->getValueAsInt("Size") <= DestOperator->getValueAsInt("Size"))
+ PrintFatalError(
+ Rec->getLoc(),
+ "Compressed instruction '" + DestOperator->getName() +
+ "'is not strictly smaller than the uncompressed instruction '" +
+ Operator->getName() + "' !");
+
+ // Fill the mapping from the source to destination instructions.
+
+ IndexedMap<OpData> SourceOperandMap;
+ SourceOperandMap.grow(SourceInst.Operands.size());
+ // Create a mapping between source Dag operands and source Inst operands.
+ addDagOperandMapping(Rec, SourceDag, SourceInst, SourceOperandMap,
+ /*IsSourceInst*/ true);
+
+ IndexedMap<OpData> DestOperandMap;
+ DestOperandMap.grow(DestInst.Operands.size());
+ // Create a mapping between destination Dag operands and destination Inst
+ // operands.
+ addDagOperandMapping(Rec, DestDag, DestInst, DestOperandMap,
+ /*IsSourceInst*/ false);
+
+ StringMap<unsigned> SourceOperands;
+ StringMap<unsigned> DestOperands;
+ createDagOperandMapping(Rec, SourceOperands, DestOperands, SourceDag, DestDag,
+ SourceOperandMap);
+ // Create operand mapping between the source and destination instructions.
+ createInstOperandMapping(Rec, SourceDag, DestDag, SourceOperandMap,
+ DestOperandMap, SourceOperands, DestInst);
+
+ // Get the target features for the CompressPat.
+ std::vector<Record *> PatReqFeatures;
+ std::vector<Record *> RF = Rec->getValueAsListOfDefs("Predicates");
+ copy_if(RF, std::back_inserter(PatReqFeatures), [](Record *R) {
+ return R->getValueAsBit("AssemblerMatcherPredicate");
+ });
+
+ CompressPatterns.push_back(CompressPat(SourceInst, DestInst, PatReqFeatures,
+ SourceOperandMap, DestOperandMap,
+ Rec->getValueAsBit("isCompressOnly")));
+}
+
+static void
+getReqFeatures(std::set<std::pair<bool, StringRef>> &FeaturesSet,
+ std::set<std::set<std::pair<bool, StringRef>>> &AnyOfFeatureSets,
+ const std::vector<Record *> &ReqFeatures) {
+ for (auto &R : ReqFeatures) {
+ const DagInit *D = R->getValueAsDag("AssemblerCondDag");
+ std::string CombineType = D->getOperator()->getAsString();
+ if (CombineType != "any_of" && CombineType != "all_of")
+ PrintFatalError(R->getLoc(), "Invalid AssemblerCondDag!");
+ if (D->getNumArgs() == 0)
+ PrintFatalError(R->getLoc(), "Invalid AssemblerCondDag!");
+ bool IsOr = CombineType == "any_of";
+ std::set<std::pair<bool, StringRef>> AnyOfSet;
+
+ for (auto *Arg : D->getArgs()) {
+ bool IsNot = false;
+ if (auto *NotArg = dyn_cast<DagInit>(Arg)) {
+ if (NotArg->getOperator()->getAsString() != "not" ||
+ NotArg->getNumArgs() != 1)
+ PrintFatalError(R->getLoc(), "Invalid AssemblerCondDag!");
+ Arg = NotArg->getArg(0);
+ IsNot = true;
+ }
+ if (!isa<DefInit>(Arg) ||
+ !cast<DefInit>(Arg)->getDef()->isSubClassOf("SubtargetFeature"))
+ PrintFatalError(R->getLoc(), "Invalid AssemblerCondDag!");
+ if (IsOr)
+ AnyOfSet.insert({IsNot, cast<DefInit>(Arg)->getDef()->getName()});
+ else
+ FeaturesSet.insert({IsNot, cast<DefInit>(Arg)->getDef()->getName()});
+ }
+
+ if (IsOr)
+ AnyOfFeatureSets.insert(AnyOfSet);
+ }
+}
+
+static unsigned getPredicates(DenseMap<const Record *, unsigned> &PredicateMap,
+ std::vector<const Record *> &Predicates,
+ Record *Rec, StringRef Name) {
+ unsigned &Entry = PredicateMap[Rec];
+ if (Entry)
+ return Entry;
+
+ if (!Rec->isValueUnset(Name)) {
+ Predicates.push_back(Rec);
+ Entry = Predicates.size();
+ return Entry;
+ }
+
+ PrintFatalError(Rec->getLoc(), "No " + Name +
+ " predicate on this operand at all: '" +
+ Rec->getName() + "'");
+ return 0;
+}
+
+static void printPredicates(const std::vector<const Record *> &Predicates,
+ StringRef Name, raw_ostream &o) {
+ for (unsigned i = 0; i < Predicates.size(); ++i) {
+ StringRef Pred = Predicates[i]->getValueAsString(Name);
+ o << " case " << i + 1 << ": {\n"
+ << " // " << Predicates[i]->getName() << "\n"
+ << " " << Pred << "\n"
+ << " }\n";
+ }
+}
+
+static void mergeCondAndCode(raw_ostream &CombinedStream, StringRef CondStr,
+ StringRef CodeStr) {
+ // Remove first indentation and last '&&'.
+ CondStr = CondStr.drop_front(6).drop_back(4);
+ CombinedStream.indent(4) << "if (" << CondStr << ") {\n";
+ CombinedStream << CodeStr;
+ CombinedStream.indent(4) << " return true;\n";
+ CombinedStream.indent(4) << "} // if\n";
+}
+
+void CompressInstEmitter::emitCompressInstEmitter(raw_ostream &o,
+ EmitterType EType) {
+ Record *AsmWriter = Target.getAsmWriter();
+ if (!AsmWriter->getValueAsInt("PassSubtarget"))
+ PrintFatalError(AsmWriter->getLoc(),
+ "'PassSubtarget' is false. SubTargetInfo object is needed "
+ "for target features.\n");
+
+ StringRef TargetName = Target.getName();
+
+ // Sort entries in CompressPatterns to handle instructions that can have more
+ // than one candidate for compression\uncompression, e.g ADD can be
+ // transformed to a C_ADD or a C_MV. When emitting 'uncompress()' function the
+ // source and destination are flipped and the sort key needs to change
+ // accordingly.
+ llvm::stable_sort(CompressPatterns, [EType](const CompressPat &LHS,
+ const CompressPat &RHS) {
+ if (EType == EmitterType::Compress || EType == EmitterType::CheckCompress)
+ return (LHS.Source.TheDef->getName() < RHS.Source.TheDef->getName());
+ else
+ return (LHS.Dest.TheDef->getName() < RHS.Dest.TheDef->getName());
+ });
+
+ // A list of MCOperandPredicates for all operands in use, and the reverse map.
+ std::vector<const Record *> MCOpPredicates;
+ DenseMap<const Record *, unsigned> MCOpPredicateMap;
+ // A list of ImmLeaf Predicates for all operands in use, and the reverse map.
+ std::vector<const Record *> ImmLeafPredicates;
+ DenseMap<const Record *, unsigned> ImmLeafPredicateMap;
+
+ std::string F;
+ std::string FH;
+ raw_string_ostream Func(F);
+ raw_string_ostream FuncH(FH);
+
+ if (EType == EmitterType::Compress)
+ o << "\n#ifdef GEN_COMPRESS_INSTR\n"
+ << "#undef GEN_COMPRESS_INSTR\n\n";
+ else if (EType == EmitterType::Uncompress)
+ o << "\n#ifdef GEN_UNCOMPRESS_INSTR\n"
+ << "#undef GEN_UNCOMPRESS_INSTR\n\n";
+ else if (EType == EmitterType::CheckCompress)
+ o << "\n#ifdef GEN_CHECK_COMPRESS_INSTR\n"
+ << "#undef GEN_CHECK_COMPRESS_INSTR\n\n";
+
+ if (EType == EmitterType::Compress) {
+ FuncH << "static bool compressInst(MCInst &OutInst,\n";
+ FuncH.indent(25) << "const MCInst &MI,\n";
+ FuncH.indent(25) << "const MCSubtargetInfo &STI) {\n";
+ } else if (EType == EmitterType::Uncompress) {
+ FuncH << "static bool uncompressInst(MCInst &OutInst,\n";
+ FuncH.indent(27) << "const MCInst &MI,\n";
+ FuncH.indent(27) << "const MCSubtargetInfo &STI) {\n";
+ } else if (EType == EmitterType::CheckCompress) {
+ FuncH << "static bool isCompressibleInst(const MachineInstr &MI,\n";
+ FuncH.indent(31) << "const " << TargetName << "Subtarget &STI) {\n";
+ }
+
+ if (CompressPatterns.empty()) {
+ o << FuncH.str();
+ o.indent(2) << "return false;\n}\n";
+ if (EType == EmitterType::Compress)
+ o << "\n#endif //GEN_COMPRESS_INSTR\n";
+ else if (EType == EmitterType::Uncompress)
+ o << "\n#endif //GEN_UNCOMPRESS_INSTR\n\n";
+ else if (EType == EmitterType::CheckCompress)
+ o << "\n#endif //GEN_CHECK_COMPRESS_INSTR\n\n";
+ return;
+ }
+
+ std::string CaseString;
+ raw_string_ostream CaseStream(CaseString);
+ StringRef PrevOp;
+ StringRef CurOp;
+ CaseStream << " switch (MI.getOpcode()) {\n";
+ CaseStream << " default: return false;\n";
+
+ bool CompressOrCheck =
+ EType == EmitterType::Compress || EType == EmitterType::CheckCompress;
+ bool CompressOrUncompress =
+ EType == EmitterType::Compress || EType == EmitterType::Uncompress;
+ std::string ValidatorName =
+ CompressOrUncompress
+ ? (TargetName + "ValidateMCOperandFor" +
+ (EType == EmitterType::Compress ? "Compress" : "Uncompress"))
+ .str()
+ : "";
+
+ for (auto &CompressPat : CompressPatterns) {
+ if (EType == EmitterType::Uncompress && CompressPat.IsCompressOnly)
+ continue;
+
+ std::string CondString;
+ std::string CodeString;
+ raw_string_ostream CondStream(CondString);
+ raw_string_ostream CodeStream(CodeString);
+ CodeGenInstruction &Source =
+ CompressOrCheck ? CompressPat.Source : CompressPat.Dest;
+ CodeGenInstruction &Dest =
+ CompressOrCheck ? CompressPat.Dest : CompressPat.Source;
+ IndexedMap<OpData> SourceOperandMap = CompressOrCheck
+ ? CompressPat.SourceOperandMap
+ : CompressPat.DestOperandMap;
+ IndexedMap<OpData> &DestOperandMap = CompressOrCheck
+ ? CompressPat.DestOperandMap
+ : CompressPat.SourceOperandMap;
+
+ CurOp = Source.TheDef->getName();
+ // Check current and previous opcode to decide to continue or end a case.
+ if (CurOp != PrevOp) {
+ if (!PrevOp.empty())
+ CaseStream.indent(6) << "break;\n } // case " + PrevOp + "\n";
+ CaseStream.indent(4) << "case " + TargetName + "::" + CurOp + ": {\n";
+ }
+
+ std::set<std::pair<bool, StringRef>> FeaturesSet;
+ std::set<std::set<std::pair<bool, StringRef>>> AnyOfFeatureSets;
+ // Add CompressPat required features.
+ getReqFeatures(FeaturesSet, AnyOfFeatureSets, CompressPat.PatReqFeatures);
+
+ // Add Dest instruction required features.
+ std::vector<Record *> ReqFeatures;
+ std::vector<Record *> RF = Dest.TheDef->getValueAsListOfDefs("Predicates");
+ copy_if(RF, std::back_inserter(ReqFeatures), [](Record *R) {
+ return R->getValueAsBit("AssemblerMatcherPredicate");
+ });
+ getReqFeatures(FeaturesSet, AnyOfFeatureSets, ReqFeatures);
+
+ // Emit checks for all required features.
+ for (auto &Op : FeaturesSet) {
+ StringRef Not = Op.first ? "!" : "";
+ CondStream.indent(6) << Not << "STI.getFeatureBits()[" << TargetName
+ << "::" << Op.second << "]"
+ << " &&\n";
+ }
+
+ // Emit checks for all required feature groups.
+ for (auto &Set : AnyOfFeatureSets) {
+ CondStream.indent(6) << "(";
+ for (auto &Op : Set) {
+ bool isLast = &Op == &*Set.rbegin();
+ StringRef Not = Op.first ? "!" : "";
+ CondStream << Not << "STI.getFeatureBits()[" << TargetName
+ << "::" << Op.second << "]";
+ if (!isLast)
+ CondStream << " || ";
+ }
+ CondStream << ") &&\n";
+ }
+
+ // Start Source Inst operands validation.
+ unsigned OpNo = 0;
+ for (OpNo = 0; OpNo < Source.Operands.size(); ++OpNo) {
+ if (SourceOperandMap[OpNo].TiedOpIdx != -1) {
+ if (Source.Operands[OpNo].Rec->isSubClassOf("RegisterClass"))
+ CondStream.indent(6)
+ << "(MI.getOperand(" << OpNo << ").isReg()) && (MI.getOperand("
+ << SourceOperandMap[OpNo].TiedOpIdx << ").isReg()) &&\n"
+ << " (MI.getOperand(" << OpNo
+ << ").getReg() == MI.getOperand("
+ << SourceOperandMap[OpNo].TiedOpIdx << ").getReg()) &&\n";
+ else
+ PrintFatalError("Unexpected tied operand types!\n");
+ }
+ // Check for fixed immediates\registers in the source instruction.
+ switch (SourceOperandMap[OpNo].Kind) {
+ case OpData::Operand:
+ // We don't need to do anything for source instruction operand checks.
+ break;
+ case OpData::Imm:
+ CondStream.indent(6)
+ << "(MI.getOperand(" << OpNo << ").isImm()) &&\n"
+ << " (MI.getOperand(" << OpNo
+ << ").getImm() == " << SourceOperandMap[OpNo].Data.Imm << ") &&\n";
+ break;
+ case OpData::Reg: {
+ Record *Reg = SourceOperandMap[OpNo].Data.Reg;
+ CondStream.indent(6)
+ << "(MI.getOperand(" << OpNo << ").isReg()) &&\n"
+ << " (MI.getOperand(" << OpNo << ").getReg() == " << TargetName
+ << "::" << Reg->getName() << ") &&\n";
+ break;
+ }
+ }
+ }
+ CodeStream.indent(6) << "// " << Dest.AsmString << "\n";
+ if (CompressOrUncompress)
+ CodeStream.indent(6) << "OutInst.setOpcode(" << TargetName
+ << "::" << Dest.TheDef->getName() << ");\n";
+ OpNo = 0;
+ for (const auto &DestOperand : Dest.Operands) {
+ CodeStream.indent(6) << "// Operand: " << DestOperand.Name << "\n";
+ switch (DestOperandMap[OpNo].Kind) {
+ case OpData::Operand: {
+ unsigned OpIdx = DestOperandMap[OpNo].Data.Operand;
+ // Check that the operand in the Source instruction fits
+ // the type for the Dest instruction.
+ if (DestOperand.Rec->isSubClassOf("RegisterClass") ||
+ DestOperand.Rec->isSubClassOf("RegisterOperand")) {
+ auto *ClassRec = DestOperand.Rec->isSubClassOf("RegisterClass")
+ ? DestOperand.Rec
+ : DestOperand.Rec->getValueAsDef("RegClass");
+ // This is a register operand. Check the register class.
+ // Don't check register class if this is a tied operand, it was done
+ // for the operand its tied to.
+ if (DestOperand.getTiedRegister() == -1)
+ CondStream.indent(6)
+ << "(MI.getOperand(" << OpIdx << ").isReg()) &&\n"
+ << " (" << TargetName << "MCRegisterClasses["
+ << TargetName << "::" << ClassRec->getName()
+ << "RegClassID].contains(MI.getOperand(" << OpIdx
+ << ").getReg())) &&\n";
+
+ if (CompressOrUncompress)
+ CodeStream.indent(6)
+ << "OutInst.addOperand(MI.getOperand(" << OpIdx << "));\n";
+ } else {
+ // Handling immediate operands.
+ if (CompressOrUncompress) {
+ unsigned Entry =
+ getPredicates(MCOpPredicateMap, MCOpPredicates, DestOperand.Rec,
+ "MCOperandPredicate");
+ CondStream.indent(6)
+ << ValidatorName << "("
+ << "MI.getOperand(" << OpIdx << "), STI, " << Entry << ") &&\n";
+ } else {
+ unsigned Entry =
+ getPredicates(ImmLeafPredicateMap, ImmLeafPredicates,
+ DestOperand.Rec, "ImmediateCode");
+ CondStream.indent(6)
+ << "MI.getOperand(" << OpIdx << ").isImm() &&\n";
+ CondStream.indent(6) << TargetName << "ValidateMachineOperand("
+ << "MI.getOperand(" << OpIdx
+ << "), &STI, " << Entry << ") &&\n";
+ }
+ if (CompressOrUncompress)
+ CodeStream.indent(6)
+ << "OutInst.addOperand(MI.getOperand(" << OpIdx << "));\n";
+ }
+ break;
+ }
+ case OpData::Imm: {
+ if (CompressOrUncompress) {
+ unsigned Entry = getPredicates(MCOpPredicateMap, MCOpPredicates,
+ DestOperand.Rec, "MCOperandPredicate");
+ CondStream.indent(6)
+ << ValidatorName << "("
+ << "MCOperand::createImm(" << DestOperandMap[OpNo].Data.Imm
+ << "), STI, " << Entry << ") &&\n";
+ } else {
+ unsigned Entry = getPredicates(ImmLeafPredicateMap, ImmLeafPredicates,
+ DestOperand.Rec, "ImmediateCode");
+ CondStream.indent(6)
+ << TargetName
+ << "ValidateMachineOperand(MachineOperand::CreateImm("
+ << DestOperandMap[OpNo].Data.Imm << "), &STI, " << Entry
+ << ") &&\n";
+ }
+ if (CompressOrUncompress)
+ CodeStream.indent(6) << "OutInst.addOperand(MCOperand::createImm("
+ << DestOperandMap[OpNo].Data.Imm << "));\n";
+ } break;
+ case OpData::Reg: {
+ if (CompressOrUncompress) {
+ // Fixed register has been validated at pattern validation time.
+ Record *Reg = DestOperandMap[OpNo].Data.Reg;
+ CodeStream.indent(6)
+ << "OutInst.addOperand(MCOperand::createReg(" << TargetName
+ << "::" << Reg->getName() << "));\n";
+ }
+ } break;
+ }
+ ++OpNo;
+ }
+ if (CompressOrUncompress)
+ CodeStream.indent(6) << "OutInst.setLoc(MI.getLoc());\n";
+ mergeCondAndCode(CaseStream, CondStream.str(), CodeStream.str());
+ PrevOp = CurOp;
+ }
+ Func << CaseStream.str() << "\n";
+ // Close brace for the last case.
+ Func.indent(4) << "} // case " << CurOp << "\n";
+ Func.indent(2) << "} // switch\n";
+ Func.indent(2) << "return false;\n}\n";
+
+ if (!MCOpPredicates.empty()) {
+ o << "static bool " << ValidatorName << "(const MCOperand &MCOp,\n"
+ << " const MCSubtargetInfo &STI,\n"
+ << " unsigned PredicateIndex) {\n"
+ << " switch (PredicateIndex) {\n"
+ << " default:\n"
+ << " llvm_unreachable(\"Unknown MCOperandPredicate kind\");\n"
+ << " break;\n";
+
+ printPredicates(MCOpPredicates, "MCOperandPredicate", o);
+
+ o << " }\n"
+ << "}\n\n";
+ }
+
+ if (!ImmLeafPredicates.empty()) {
+ o << "static bool " << TargetName
+ << "ValidateMachineOperand(const MachineOperand &MO,\n"
+ << " const " << TargetName << "Subtarget *Subtarget,\n"
+ << " unsigned PredicateIndex) {\n"
+ << " int64_t Imm = MO.getImm();\n"
+ << " switch (PredicateIndex) {\n"
+ << " default:\n"
+ << " llvm_unreachable(\"Unknown ImmLeaf Predicate kind\");\n"
+ << " break;\n";
+
+ printPredicates(ImmLeafPredicates, "ImmediateCode", o);
+
+ o << " }\n"
+ << "}\n\n";
+ }
+
+ o << FuncH.str();
+ o << Func.str();
+
+ if (EType == EmitterType::Compress)
+ o << "\n#endif //GEN_COMPRESS_INSTR\n";
+ else if (EType == EmitterType::Uncompress)
+ o << "\n#endif //GEN_UNCOMPRESS_INSTR\n\n";
+ else if (EType == EmitterType::CheckCompress)
+ o << "\n#endif //GEN_CHECK_COMPRESS_INSTR\n\n";
+}
+
+void CompressInstEmitter::run(raw_ostream &o) {
+ std::vector<Record *> Insts = Records.getAllDerivedDefinitions("CompressPat");
+
+ // Process the CompressPat definitions, validating them as we do so.
+ for (unsigned i = 0, e = Insts.size(); i != e; ++i)
+ evaluateCompressPat(Insts[i]);
+
+ // Emit file header.
+ emitSourceFileHeader("Compress instruction Source Fragment", o);
+ // Generate compressInst() function.
+ emitCompressInstEmitter(o, EmitterType::Compress);
+ // Generate uncompressInst() function.
+ emitCompressInstEmitter(o, EmitterType::Uncompress);
+ // Generate isCompressibleInst() function.
+ emitCompressInstEmitter(o, EmitterType::CheckCompress);
+}
+
+namespace llvm {
+
+void EmitCompressInst(RecordKeeper &RK, raw_ostream &OS) {
+ CompressInstEmitter(RK).run(OS);
+}
+
+} // namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/DAGISelEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/DAGISelEmitter.cpp
new file mode 100644
index 0000000000..d012a0172a
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/DAGISelEmitter.cpp
@@ -0,0 +1,195 @@
+//===- DAGISelEmitter.cpp - Generate an instruction selector --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend emits a DAG instruction selector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenDAGPatterns.h"
+#include "CodeGenInstruction.h"
+#include "DAGISelMatcher.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "dag-isel-emitter"
+
+namespace {
+/// DAGISelEmitter - The top-level class which coordinates construction
+/// and emission of the instruction selector.
+class DAGISelEmitter {
+ RecordKeeper &Records; // Just so we can get at the timing functions.
+ CodeGenDAGPatterns CGP;
+public:
+ explicit DAGISelEmitter(RecordKeeper &R) : Records(R), CGP(R) {}
+ void run(raw_ostream &OS);
+};
+} // End anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// DAGISelEmitter Helper methods
+//
+
+/// getResultPatternCost - Compute the number of instructions for this pattern.
+/// This is a temporary hack. We should really include the instruction
+/// latencies in this calculation.
+static unsigned getResultPatternCost(TreePatternNode *P,
+ CodeGenDAGPatterns &CGP) {
+ if (P->isLeaf()) return 0;
+
+ unsigned Cost = 0;
+ Record *Op = P->getOperator();
+ if (Op->isSubClassOf("Instruction")) {
+ Cost++;
+ CodeGenInstruction &II = CGP.getTargetInfo().getInstruction(Op);
+ if (II.usesCustomInserter)
+ Cost += 10;
+ }
+ for (unsigned i = 0, e = P->getNumChildren(); i != e; ++i)
+ Cost += getResultPatternCost(P->getChild(i), CGP);
+ return Cost;
+}
+
+/// getResultPatternCodeSize - Compute the code size of instructions for this
+/// pattern.
+static unsigned getResultPatternSize(TreePatternNode *P,
+ CodeGenDAGPatterns &CGP) {
+ if (P->isLeaf()) return 0;
+
+ unsigned Cost = 0;
+ Record *Op = P->getOperator();
+ if (Op->isSubClassOf("Instruction")) {
+ Cost += Op->getValueAsInt("CodeSize");
+ }
+ for (unsigned i = 0, e = P->getNumChildren(); i != e; ++i)
+ Cost += getResultPatternSize(P->getChild(i), CGP);
+ return Cost;
+}
+
+namespace {
+// PatternSortingPredicate - return true if we prefer to match LHS before RHS.
+// In particular, we want to match maximal patterns first and lowest cost within
+// a particular complexity first.
+struct PatternSortingPredicate {
+ PatternSortingPredicate(CodeGenDAGPatterns &cgp) : CGP(cgp) {}
+ CodeGenDAGPatterns &CGP;
+
+ bool operator()(const PatternToMatch *LHS, const PatternToMatch *RHS) {
+ const TreePatternNode *LT = LHS->getSrcPattern();
+ const TreePatternNode *RT = RHS->getSrcPattern();
+
+ MVT LHSVT = LT->getNumTypes() != 0 ? LT->getSimpleType(0) : MVT::Other;
+ MVT RHSVT = RT->getNumTypes() != 0 ? RT->getSimpleType(0) : MVT::Other;
+ if (LHSVT.isVector() != RHSVT.isVector())
+ return RHSVT.isVector();
+
+ if (LHSVT.isFloatingPoint() != RHSVT.isFloatingPoint())
+ return RHSVT.isFloatingPoint();
+
+ // Otherwise, if the patterns might both match, sort based on complexity,
+ // which means that we prefer to match patterns that cover more nodes in the
+ // input over nodes that cover fewer.
+ int LHSSize = LHS->getPatternComplexity(CGP);
+ int RHSSize = RHS->getPatternComplexity(CGP);
+ if (LHSSize > RHSSize) return true; // LHS -> bigger -> less cost
+ if (LHSSize < RHSSize) return false;
+
+ // If the patterns have equal complexity, compare generated instruction cost
+ unsigned LHSCost = getResultPatternCost(LHS->getDstPattern(), CGP);
+ unsigned RHSCost = getResultPatternCost(RHS->getDstPattern(), CGP);
+ if (LHSCost < RHSCost) return true;
+ if (LHSCost > RHSCost) return false;
+
+ unsigned LHSPatSize = getResultPatternSize(LHS->getDstPattern(), CGP);
+ unsigned RHSPatSize = getResultPatternSize(RHS->getDstPattern(), CGP);
+ if (LHSPatSize < RHSPatSize) return true;
+ if (LHSPatSize > RHSPatSize) return false;
+
+ // Sort based on the UID of the pattern, to reflect source order.
+ // Note that this is not guaranteed to be unique, since a single source
+ // pattern may have been resolved into multiple match patterns due to
+ // alternative fragments. To ensure deterministic output, always use
+ // std::stable_sort with this predicate.
+ return LHS->getID() < RHS->getID();
+ }
+};
+} // End anonymous namespace
+
+
+void DAGISelEmitter::run(raw_ostream &OS) {
+ emitSourceFileHeader("DAG Instruction Selector for the " +
+ CGP.getTargetInfo().getName().str() + " target", OS);
+
+ OS << "// *** NOTE: This file is #included into the middle of the target\n"
+ << "// *** instruction selector class. These functions are really "
+ << "methods.\n\n";
+
+ OS << "// If GET_DAGISEL_DECL is #defined with any value, only function\n"
+ "// declarations will be included when this file is included.\n"
+ "// If GET_DAGISEL_BODY is #defined, its value should be the name of\n"
+ "// the instruction selector class. Function bodies will be emitted\n"
+ "// and each function's name will be qualified with the name of the\n"
+ "// class.\n"
+ "//\n"
+ "// When neither of the GET_DAGISEL* macros is defined, the functions\n"
+ "// are emitted inline.\n\n";
+
+ LLVM_DEBUG(errs() << "\n\nALL PATTERNS TO MATCH:\n\n";
+ for (CodeGenDAGPatterns::ptm_iterator I = CGP.ptm_begin(),
+ E = CGP.ptm_end();
+ I != E; ++I) {
+ errs() << "PATTERN: ";
+ I->getSrcPattern()->dump();
+ errs() << "\nRESULT: ";
+ I->getDstPattern()->dump();
+ errs() << "\n";
+ });
+
+ // Add all the patterns to a temporary list so we can sort them.
+ Records.startTimer("Sort patterns");
+ std::vector<const PatternToMatch*> Patterns;
+ for (const PatternToMatch &PTM : CGP.ptms())
+ Patterns.push_back(&PTM);
+
+ // We want to process the matches in order of minimal cost. Sort the patterns
+ // so the least cost one is at the start.
+ llvm::stable_sort(Patterns, PatternSortingPredicate(CGP));
+
+ // Convert each variant of each pattern into a Matcher.
+ Records.startTimer("Convert to matchers");
+ std::vector<Matcher*> PatternMatchers;
+ for (const PatternToMatch *PTM : Patterns) {
+ for (unsigned Variant = 0; ; ++Variant) {
+ if (Matcher *M = ConvertPatternToMatcher(*PTM, Variant, CGP))
+ PatternMatchers.push_back(M);
+ else
+ break;
+ }
+ }
+
+ std::unique_ptr<Matcher> TheMatcher =
+ std::make_unique<ScopeMatcher>(PatternMatchers);
+
+ Records.startTimer("Optimize matchers");
+ OptimizeMatcher(TheMatcher, CGP);
+
+ //Matcher->dump();
+
+ Records.startTimer("Emit matcher table");
+ EmitMatcherTable(TheMatcher.get(), CGP, OS);
+}
+
+namespace llvm {
+
+void EmitDAGISel(RecordKeeper &RK, raw_ostream &OS) {
+ RK.startTimer("Parse patterns");
+ DAGISelEmitter(RK).run(OS);
+}
+
+} // End llvm namespace
diff --git a/contrib/libs/llvm16/utils/TableGen/DAGISelMatcher.cpp b/contrib/libs/llvm16/utils/TableGen/DAGISelMatcher.cpp
new file mode 100644
index 0000000000..e436a931a9
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/DAGISelMatcher.cpp
@@ -0,0 +1,435 @@
+//===- DAGISelMatcher.cpp - Representation of DAG pattern matcher ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "DAGISelMatcher.h"
+#include "CodeGenDAGPatterns.h"
+#include "CodeGenTarget.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/Record.h"
+using namespace llvm;
+
+void Matcher::anchor() { }
+
+void Matcher::dump() const {
+ print(errs(), 0);
+}
+
+void Matcher::print(raw_ostream &OS, unsigned indent) const {
+ printImpl(OS, indent);
+ if (Next)
+ return Next->print(OS, indent);
+}
+
+void Matcher::printOne(raw_ostream &OS) const {
+ printImpl(OS, 0);
+}
+
+/// unlinkNode - Unlink the specified node from this chain. If Other == this,
+/// we unlink the next pointer and return it. Otherwise we unlink Other from
+/// the list and return this.
+Matcher *Matcher::unlinkNode(Matcher *Other) {
+ if (this == Other)
+ return takeNext();
+
+ // Scan until we find the predecessor of Other.
+ Matcher *Cur = this;
+ for (; Cur && Cur->getNext() != Other; Cur = Cur->getNext())
+ /*empty*/;
+
+ if (!Cur) return nullptr;
+ Cur->takeNext();
+ Cur->setNext(Other->takeNext());
+ return this;
+}
+
+/// canMoveBefore - Return true if this matcher is the same as Other, or if
+/// we can move this matcher past all of the nodes in-between Other and this
+/// node. Other must be equal to or before this.
+bool Matcher::canMoveBefore(const Matcher *Other) const {
+ for (;; Other = Other->getNext()) {
+ assert(Other && "Other didn't come before 'this'?");
+ if (this == Other) return true;
+
+ // We have to be able to move this node across the Other node.
+ if (!canMoveBeforeNode(Other))
+ return false;
+ }
+}
+
+/// canMoveBeforeNode - Return true if it is safe to move the current matcher
+/// across the specified one.
+bool Matcher::canMoveBeforeNode(const Matcher *Other) const {
+ // We can move simple predicates before record nodes.
+ if (isSimplePredicateNode())
+ return Other->isSimplePredicateOrRecordNode();
+
+ // We can move record nodes across simple predicates.
+ if (isSimplePredicateOrRecordNode())
+ return isSimplePredicateNode();
+
+ // We can't move record nodes across each other etc.
+ return false;
+}
+
+
+ScopeMatcher::~ScopeMatcher() {
+ for (Matcher *C : Children)
+ delete C;
+}
+
+SwitchOpcodeMatcher::~SwitchOpcodeMatcher() {
+ for (auto &C : Cases)
+ delete C.second;
+}
+
+SwitchTypeMatcher::~SwitchTypeMatcher() {
+ for (auto &C : Cases)
+ delete C.second;
+}
+
+CheckPredicateMatcher::CheckPredicateMatcher(
+ const TreePredicateFn &pred, const SmallVectorImpl<unsigned> &Ops)
+ : Matcher(CheckPredicate), Pred(pred.getOrigPatFragRecord()),
+ Operands(Ops.begin(), Ops.end()) {}
+
+TreePredicateFn CheckPredicateMatcher::getPredicate() const {
+ return TreePredicateFn(Pred);
+}
+
+unsigned CheckPredicateMatcher::getNumOperands() const {
+ return Operands.size();
+}
+
+unsigned CheckPredicateMatcher::getOperandNo(unsigned i) const {
+ assert(i < Operands.size());
+ return Operands[i];
+}
+
+
+// printImpl methods.
+
+void ScopeMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "Scope\n";
+ for (const Matcher *C : Children) {
+ if (!C)
+ OS.indent(indent+1) << "NULL POINTER\n";
+ else
+ C->print(OS, indent+2);
+ }
+}
+
+void RecordMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "Record\n";
+}
+
+void RecordChildMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "RecordChild: " << ChildNo << '\n';
+}
+
+void RecordMemRefMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "RecordMemRef\n";
+}
+
+void CaptureGlueInputMatcher::printImpl(raw_ostream &OS, unsigned indent) const{
+ OS.indent(indent) << "CaptureGlueInput\n";
+}
+
+void MoveChildMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "MoveChild " << ChildNo << '\n';
+}
+
+void MoveParentMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "MoveParent\n";
+}
+
+void CheckSameMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "CheckSame " << MatchNumber << '\n';
+}
+
+void CheckChildSameMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "CheckChild" << ChildNo << "Same\n";
+}
+
+void CheckPatternPredicateMatcher::
+printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "CheckPatternPredicate " << Predicate << '\n';
+}
+
+void CheckPredicateMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "CheckPredicate " << getPredicate().getFnName() << '\n';
+}
+
+void CheckOpcodeMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "CheckOpcode " << Opcode.getEnumName() << '\n';
+}
+
+void SwitchOpcodeMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "SwitchOpcode: {\n";
+ for (const auto &C : Cases) {
+ OS.indent(indent) << "case " << C.first->getEnumName() << ":\n";
+ C.second->print(OS, indent+2);
+ }
+ OS.indent(indent) << "}\n";
+}
+
+
+void CheckTypeMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "CheckType " << getEnumName(Type) << ", ResNo="
+ << ResNo << '\n';
+}
+
+void SwitchTypeMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "SwitchType: {\n";
+ for (const auto &C : Cases) {
+ OS.indent(indent) << "case " << getEnumName(C.first) << ":\n";
+ C.second->print(OS, indent+2);
+ }
+ OS.indent(indent) << "}\n";
+}
+
+void CheckChildTypeMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "CheckChildType " << ChildNo << " "
+ << getEnumName(Type) << '\n';
+}
+
+
+void CheckIntegerMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "CheckInteger " << Value << '\n';
+}
+
+void CheckChildIntegerMatcher::printImpl(raw_ostream &OS,
+ unsigned indent) const {
+ OS.indent(indent) << "CheckChildInteger " << ChildNo << " " << Value << '\n';
+}
+
+void CheckCondCodeMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "CheckCondCode ISD::" << CondCodeName << '\n';
+}
+
+void CheckChild2CondCodeMatcher::printImpl(raw_ostream &OS,
+ unsigned indent) const {
+ OS.indent(indent) << "CheckChild2CondCode ISD::" << CondCodeName << '\n';
+}
+
+void CheckValueTypeMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "CheckValueType MVT::" << TypeName << '\n';
+}
+
+void CheckComplexPatMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "CheckComplexPat " << Pattern.getSelectFunc() << '\n';
+}
+
+void CheckAndImmMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "CheckAndImm " << Value << '\n';
+}
+
+void CheckOrImmMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "CheckOrImm " << Value << '\n';
+}
+
+void CheckFoldableChainNodeMatcher::printImpl(raw_ostream &OS,
+ unsigned indent) const {
+ OS.indent(indent) << "CheckFoldableChainNode\n";
+}
+
+void CheckImmAllOnesVMatcher::printImpl(raw_ostream &OS,
+ unsigned indent) const {
+ OS.indent(indent) << "CheckAllOnesV\n";
+}
+
+void CheckImmAllZerosVMatcher::printImpl(raw_ostream &OS,
+ unsigned indent) const {
+ OS.indent(indent) << "CheckAllZerosV\n";
+}
+
+void EmitIntegerMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "EmitInteger " << Val << " VT=" << getEnumName(VT)
+ << '\n';
+}
+
+void EmitStringIntegerMatcher::
+printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "EmitStringInteger " << Val << " VT=" << getEnumName(VT)
+ << '\n';
+}
+
+void EmitRegisterMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "EmitRegister ";
+ if (Reg)
+ OS << Reg->getName();
+ else
+ OS << "zero_reg";
+ OS << " VT=" << getEnumName(VT) << '\n';
+}
+
+void EmitConvertToTargetMatcher::
+printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "EmitConvertToTarget " << Slot << '\n';
+}
+
+void EmitMergeInputChainsMatcher::
+printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "EmitMergeInputChains <todo: args>\n";
+}
+
+void EmitCopyToRegMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "EmitCopyToReg <todo: args>\n";
+}
+
+void EmitNodeXFormMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "EmitNodeXForm " << NodeXForm->getName()
+ << " Slot=" << Slot << '\n';
+}
+
+
+void EmitNodeMatcherCommon::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent);
+ OS << (isa<MorphNodeToMatcher>(this) ? "MorphNodeTo: " : "EmitNode: ")
+ << OpcodeName << ": <todo flags> ";
+
+ for (unsigned i = 0, e = VTs.size(); i != e; ++i)
+ OS << ' ' << getEnumName(VTs[i]);
+ OS << '(';
+ for (unsigned i = 0, e = Operands.size(); i != e; ++i)
+ OS << Operands[i] << ' ';
+ OS << ")\n";
+}
+
+void CompleteMatchMatcher::printImpl(raw_ostream &OS, unsigned indent) const {
+ OS.indent(indent) << "CompleteMatch <todo args>\n";
+ OS.indent(indent) << "Src = " << *Pattern.getSrcPattern() << "\n";
+ OS.indent(indent) << "Dst = " << *Pattern.getDstPattern() << "\n";
+}
+
+bool CheckOpcodeMatcher::isEqualImpl(const Matcher *M) const {
+ // Note: pointer equality isn't enough here, we have to check the enum names
+ // to ensure that the nodes are for the same opcode.
+ return cast<CheckOpcodeMatcher>(M)->Opcode.getEnumName() ==
+ Opcode.getEnumName();
+}
+
+bool EmitNodeMatcherCommon::isEqualImpl(const Matcher *m) const {
+ const EmitNodeMatcherCommon *M = cast<EmitNodeMatcherCommon>(m);
+ return M->OpcodeName == OpcodeName && M->VTs == VTs &&
+ M->Operands == Operands && M->HasChain == HasChain &&
+ M->HasInGlue == HasInGlue && M->HasOutGlue == HasOutGlue &&
+ M->HasMemRefs == HasMemRefs &&
+ M->NumFixedArityOperands == NumFixedArityOperands;
+}
+
+void EmitNodeMatcher::anchor() { }
+
+void MorphNodeToMatcher::anchor() { }
+
+// isContradictoryImpl Implementations.
+
+static bool TypesAreContradictory(MVT::SimpleValueType T1,
+ MVT::SimpleValueType T2) {
+ // If the two types are the same, then they are the same, so they don't
+ // contradict.
+ if (T1 == T2) return false;
+
+ // If either type is about iPtr, then they don't conflict unless the other
+ // one is not a scalar integer type.
+ if (T1 == MVT::iPTR)
+ return !MVT(T2).isInteger() || MVT(T2).isVector();
+
+ if (T2 == MVT::iPTR)
+ return !MVT(T1).isInteger() || MVT(T1).isVector();
+
+ // Otherwise, they are two different non-iPTR types, they conflict.
+ return true;
+}
+
+bool CheckOpcodeMatcher::isContradictoryImpl(const Matcher *M) const {
+ if (const CheckOpcodeMatcher *COM = dyn_cast<CheckOpcodeMatcher>(M)) {
+ // One node can't have two different opcodes!
+ // Note: pointer equality isn't enough here, we have to check the enum names
+ // to ensure that the nodes are for the same opcode.
+ return COM->getOpcode().getEnumName() != getOpcode().getEnumName();
+ }
+
+ // If the node has a known type, and if the type we're checking for is
+ // different, then we know they contradict. For example, a check for
+ // ISD::STORE will never be true at the same time a check for Type i32 is.
+ if (const CheckTypeMatcher *CT = dyn_cast<CheckTypeMatcher>(M)) {
+ // If checking for a result the opcode doesn't have, it can't match.
+ if (CT->getResNo() >= getOpcode().getNumResults())
+ return true;
+
+ MVT::SimpleValueType NodeType = getOpcode().getKnownType(CT->getResNo());
+ if (NodeType != MVT::Other)
+ return TypesAreContradictory(NodeType, CT->getType());
+ }
+
+ return false;
+}
+
+bool CheckTypeMatcher::isContradictoryImpl(const Matcher *M) const {
+ if (const CheckTypeMatcher *CT = dyn_cast<CheckTypeMatcher>(M))
+ return TypesAreContradictory(getType(), CT->getType());
+ return false;
+}
+
+bool CheckChildTypeMatcher::isContradictoryImpl(const Matcher *M) const {
+ if (const CheckChildTypeMatcher *CC = dyn_cast<CheckChildTypeMatcher>(M)) {
+ // If the two checks are about different nodes, we don't know if they
+ // conflict!
+ if (CC->getChildNo() != getChildNo())
+ return false;
+
+ return TypesAreContradictory(getType(), CC->getType());
+ }
+ return false;
+}
+
+bool CheckIntegerMatcher::isContradictoryImpl(const Matcher *M) const {
+ if (const CheckIntegerMatcher *CIM = dyn_cast<CheckIntegerMatcher>(M))
+ return CIM->getValue() != getValue();
+ return false;
+}
+
+bool CheckChildIntegerMatcher::isContradictoryImpl(const Matcher *M) const {
+ if (const CheckChildIntegerMatcher *CCIM = dyn_cast<CheckChildIntegerMatcher>(M)) {
+ // If the two checks are about different nodes, we don't know if they
+ // conflict!
+ if (CCIM->getChildNo() != getChildNo())
+ return false;
+
+ return CCIM->getValue() != getValue();
+ }
+ return false;
+}
+
+bool CheckValueTypeMatcher::isContradictoryImpl(const Matcher *M) const {
+ if (const CheckValueTypeMatcher *CVT = dyn_cast<CheckValueTypeMatcher>(M))
+ return CVT->getTypeName() != getTypeName();
+ return false;
+}
+
+bool CheckImmAllOnesVMatcher::isContradictoryImpl(const Matcher *M) const {
+ // AllZeros is contradictory.
+ return isa<CheckImmAllZerosVMatcher>(M);
+}
+
+bool CheckImmAllZerosVMatcher::isContradictoryImpl(const Matcher *M) const {
+ // AllOnes is contradictory.
+ return isa<CheckImmAllOnesVMatcher>(M);
+}
+
+bool CheckCondCodeMatcher::isContradictoryImpl(const Matcher *M) const {
+ if (const auto *CCCM = dyn_cast<CheckCondCodeMatcher>(M))
+ return CCCM->getCondCodeName() != getCondCodeName();
+ return false;
+}
+
+bool CheckChild2CondCodeMatcher::isContradictoryImpl(const Matcher *M) const {
+ if (const auto *CCCCM = dyn_cast<CheckChild2CondCodeMatcher>(M))
+ return CCCCM->getCondCodeName() != getCondCodeName();
+ return false;
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/DAGISelMatcher.h b/contrib/libs/llvm16/utils/TableGen/DAGISelMatcher.h
new file mode 100644
index 0000000000..77280acaf4
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/DAGISelMatcher.h
@@ -0,0 +1,1125 @@
+//===- DAGISelMatcher.h - Representation of DAG pattern matcher -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_DAGISELMATCHER_H
+#define LLVM_UTILS_TABLEGEN_DAGISELMATCHER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/MachineValueType.h"
+
+namespace llvm {
+ struct CodeGenRegister;
+ class CodeGenDAGPatterns;
+ class Matcher;
+ class PatternToMatch;
+ class raw_ostream;
+ class ComplexPattern;
+ class Record;
+ class SDNodeInfo;
+ class TreePredicateFn;
+ class TreePattern;
+
+Matcher *ConvertPatternToMatcher(const PatternToMatch &Pattern,unsigned Variant,
+ const CodeGenDAGPatterns &CGP);
+void OptimizeMatcher(std::unique_ptr<Matcher> &Matcher,
+ const CodeGenDAGPatterns &CGP);
+void EmitMatcherTable(Matcher *Matcher, const CodeGenDAGPatterns &CGP,
+ raw_ostream &OS);
+
+
+/// Matcher - Base class for all the DAG ISel Matcher representation
+/// nodes.
+class Matcher {
+ // The next matcher node that is executed after this one. Null if this is the
+ // last stage of a match.
+ std::unique_ptr<Matcher> Next;
+ size_t Size; // Size in bytes of matcher and all its children (if any).
+ virtual void anchor();
+public:
+ enum KindTy {
+ // Matcher state manipulation.
+ Scope, // Push a checking scope.
+ RecordNode, // Record the current node.
+ RecordChild, // Record a child of the current node.
+ RecordMemRef, // Record the memref in the current node.
+ CaptureGlueInput, // If the current node has an input glue, save it.
+ MoveChild, // Move current node to specified child.
+ MoveParent, // Move current node to parent.
+
+ // Predicate checking.
+ CheckSame, // Fail if not same as prev match.
+ CheckChildSame, // Fail if child not same as prev match.
+ CheckPatternPredicate,
+ CheckPredicate, // Fail if node predicate fails.
+ CheckOpcode, // Fail if not opcode.
+ SwitchOpcode, // Dispatch based on opcode.
+ CheckType, // Fail if not correct type.
+ SwitchType, // Dispatch based on type.
+ CheckChildType, // Fail if child has wrong type.
+ CheckInteger, // Fail if wrong val.
+ CheckChildInteger, // Fail if child is wrong val.
+ CheckCondCode, // Fail if not condcode.
+ CheckChild2CondCode, // Fail if child is wrong condcode.
+ CheckValueType,
+ CheckComplexPat,
+ CheckAndImm,
+ CheckOrImm,
+ CheckImmAllOnesV,
+ CheckImmAllZerosV,
+ CheckFoldableChainNode,
+
+ // Node creation/emisssion.
+ EmitInteger, // Create a TargetConstant
+ EmitStringInteger, // Create a TargetConstant from a string.
+ EmitRegister, // Create a register.
+ EmitConvertToTarget, // Convert a imm/fpimm to target imm/fpimm
+ EmitMergeInputChains, // Merge together a chains for an input.
+ EmitCopyToReg, // Emit a copytoreg into a physreg.
+ EmitNode, // Create a DAG node
+ EmitNodeXForm, // Run a SDNodeXForm
+ CompleteMatch, // Finish a match and update the results.
+ MorphNodeTo, // Build a node, finish a match and update results.
+
+ // Highest enum value; watch out when adding more.
+ HighestKind = MorphNodeTo
+ };
+ const KindTy Kind;
+
+protected:
+ Matcher(KindTy K) : Kind(K) {}
+public:
+ virtual ~Matcher() {}
+
+ unsigned getSize() const { return Size; }
+ void setSize(unsigned sz) { Size = sz; }
+ KindTy getKind() const { return Kind; }
+
+ Matcher *getNext() { return Next.get(); }
+ const Matcher *getNext() const { return Next.get(); }
+ void setNext(Matcher *C) { Next.reset(C); }
+ Matcher *takeNext() { return Next.release(); }
+
+ std::unique_ptr<Matcher> &getNextPtr() { return Next; }
+
+ bool isEqual(const Matcher *M) const {
+ if (getKind() != M->getKind()) return false;
+ return isEqualImpl(M);
+ }
+
+ /// isSimplePredicateNode - Return true if this is a simple predicate that
+ /// operates on the node or its children without potential side effects or a
+ /// change of the current node.
+ bool isSimplePredicateNode() const {
+ switch (getKind()) {
+ default: return false;
+ case CheckSame:
+ case CheckChildSame:
+ case CheckPatternPredicate:
+ case CheckPredicate:
+ case CheckOpcode:
+ case CheckType:
+ case CheckChildType:
+ case CheckInteger:
+ case CheckChildInteger:
+ case CheckCondCode:
+ case CheckChild2CondCode:
+ case CheckValueType:
+ case CheckAndImm:
+ case CheckOrImm:
+ case CheckImmAllOnesV:
+ case CheckImmAllZerosV:
+ case CheckFoldableChainNode:
+ return true;
+ }
+ }
+
+ /// isSimplePredicateOrRecordNode - Return true if this is a record node or
+ /// a simple predicate.
+ bool isSimplePredicateOrRecordNode() const {
+ return isSimplePredicateNode() ||
+ getKind() == RecordNode || getKind() == RecordChild;
+ }
+
+ /// unlinkNode - Unlink the specified node from this chain. If Other == this,
+ /// we unlink the next pointer and return it. Otherwise we unlink Other from
+ /// the list and return this.
+ Matcher *unlinkNode(Matcher *Other);
+
+ /// canMoveBefore - Return true if this matcher is the same as Other, or if
+ /// we can move this matcher past all of the nodes in-between Other and this
+ /// node. Other must be equal to or before this.
+ bool canMoveBefore(const Matcher *Other) const;
+
+ /// canMoveBeforeNode - Return true if it is safe to move the current matcher
+ /// across the specified one.
+ bool canMoveBeforeNode(const Matcher *Other) const;
+
+ /// isContradictory - Return true of these two matchers could never match on
+ /// the same node.
+ bool isContradictory(const Matcher *Other) const {
+ // Since this predicate is reflexive, we canonicalize the ordering so that
+ // we always match a node against nodes with kinds that are greater or equal
+ // to them. For example, we'll pass in a CheckType node as an argument to
+ // the CheckOpcode method, not the other way around.
+ if (getKind() < Other->getKind())
+ return isContradictoryImpl(Other);
+ return Other->isContradictoryImpl(this);
+ }
+
+ void print(raw_ostream &OS, unsigned indent = 0) const;
+ void printOne(raw_ostream &OS) const;
+ void dump() const;
+protected:
+ virtual void printImpl(raw_ostream &OS, unsigned indent) const = 0;
+ virtual bool isEqualImpl(const Matcher *M) const = 0;
+ virtual bool isContradictoryImpl(const Matcher *M) const { return false; }
+};
+
+/// ScopeMatcher - This attempts to match each of its children to find the first
+/// one that successfully matches. If one child fails, it tries the next child.
+/// If none of the children match then this check fails. It never has a 'next'.
+class ScopeMatcher : public Matcher {
+ SmallVector<Matcher*, 4> Children;
+public:
+ ScopeMatcher(ArrayRef<Matcher *> children)
+ : Matcher(Scope), Children(children.begin(), children.end()) {
+ }
+ ~ScopeMatcher() override;
+
+ unsigned getNumChildren() const { return Children.size(); }
+
+ Matcher *getChild(unsigned i) { return Children[i]; }
+ const Matcher *getChild(unsigned i) const { return Children[i]; }
+
+ void resetChild(unsigned i, Matcher *N) {
+ delete Children[i];
+ Children[i] = N;
+ }
+
+ Matcher *takeChild(unsigned i) {
+ Matcher *Res = Children[i];
+ Children[i] = nullptr;
+ return Res;
+ }
+
+ void setNumChildren(unsigned NC) {
+ if (NC < Children.size()) {
+ // delete any children we're about to lose pointers to.
+ for (unsigned i = NC, e = Children.size(); i != e; ++i)
+ delete Children[i];
+ }
+ Children.resize(NC);
+ }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == Scope;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override { return false; }
+};
+
+/// RecordMatcher - Save the current node in the operand list.
+class RecordMatcher : public Matcher {
+ /// WhatFor - This is a string indicating why we're recording this. This
+ /// should only be used for comment generation not anything semantic.
+ std::string WhatFor;
+
+ /// ResultNo - The slot number in the RecordedNodes vector that this will be,
+ /// just printed as a comment.
+ unsigned ResultNo;
+public:
+ RecordMatcher(const std::string &whatfor, unsigned resultNo)
+ : Matcher(RecordNode), WhatFor(whatfor), ResultNo(resultNo) {}
+
+ const std::string &getWhatFor() const { return WhatFor; }
+ unsigned getResultNo() const { return ResultNo; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == RecordNode;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override { return true; }
+};
+
+/// RecordChildMatcher - Save a numbered child of the current node, or fail
+/// the match if it doesn't exist. This is logically equivalent to:
+/// MoveChild N + RecordNode + MoveParent.
+class RecordChildMatcher : public Matcher {
+ unsigned ChildNo;
+
+ /// WhatFor - This is a string indicating why we're recording this. This
+ /// should only be used for comment generation not anything semantic.
+ std::string WhatFor;
+
+ /// ResultNo - The slot number in the RecordedNodes vector that this will be,
+ /// just printed as a comment.
+ unsigned ResultNo;
+public:
+ RecordChildMatcher(unsigned childno, const std::string &whatfor,
+ unsigned resultNo)
+ : Matcher(RecordChild), ChildNo(childno), WhatFor(whatfor),
+ ResultNo(resultNo) {}
+
+ unsigned getChildNo() const { return ChildNo; }
+ const std::string &getWhatFor() const { return WhatFor; }
+ unsigned getResultNo() const { return ResultNo; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == RecordChild;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<RecordChildMatcher>(M)->getChildNo() == getChildNo();
+ }
+};
+
+/// RecordMemRefMatcher - Save the current node's memref.
+class RecordMemRefMatcher : public Matcher {
+public:
+ RecordMemRefMatcher() : Matcher(RecordMemRef) {}
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == RecordMemRef;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override { return true; }
+};
+
+
+/// CaptureGlueInputMatcher - If the current record has a glue input, record
+/// it so that it is used as an input to the generated code.
+class CaptureGlueInputMatcher : public Matcher {
+public:
+ CaptureGlueInputMatcher() : Matcher(CaptureGlueInput) {}
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == CaptureGlueInput;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override { return true; }
+};
+
+/// MoveChildMatcher - This tells the interpreter to move into the
+/// specified child node.
+class MoveChildMatcher : public Matcher {
+ unsigned ChildNo;
+public:
+ MoveChildMatcher(unsigned childNo) : Matcher(MoveChild), ChildNo(childNo) {}
+
+ unsigned getChildNo() const { return ChildNo; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == MoveChild;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<MoveChildMatcher>(M)->getChildNo() == getChildNo();
+ }
+};
+
+/// MoveParentMatcher - This tells the interpreter to move to the parent
+/// of the current node.
+class MoveParentMatcher : public Matcher {
+public:
+ MoveParentMatcher() : Matcher(MoveParent) {}
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == MoveParent;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override { return true; }
+};
+
+/// CheckSameMatcher - This checks to see if this node is exactly the same
+/// node as the specified match that was recorded with 'Record'. This is used
+/// when patterns have the same name in them, like '(mul GPR:$in, GPR:$in)'.
+class CheckSameMatcher : public Matcher {
+ unsigned MatchNumber;
+public:
+ CheckSameMatcher(unsigned matchnumber)
+ : Matcher(CheckSame), MatchNumber(matchnumber) {}
+
+ unsigned getMatchNumber() const { return MatchNumber; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == CheckSame;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<CheckSameMatcher>(M)->getMatchNumber() == getMatchNumber();
+ }
+};
+
+/// CheckChildSameMatcher - This checks to see if child node is exactly the same
+/// node as the specified match that was recorded with 'Record'. This is used
+/// when patterns have the same name in them, like '(mul GPR:$in, GPR:$in)'.
+class CheckChildSameMatcher : public Matcher {
+ unsigned ChildNo;
+ unsigned MatchNumber;
+public:
+ CheckChildSameMatcher(unsigned childno, unsigned matchnumber)
+ : Matcher(CheckChildSame), ChildNo(childno), MatchNumber(matchnumber) {}
+
+ unsigned getChildNo() const { return ChildNo; }
+ unsigned getMatchNumber() const { return MatchNumber; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == CheckChildSame;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<CheckChildSameMatcher>(M)->ChildNo == ChildNo &&
+ cast<CheckChildSameMatcher>(M)->MatchNumber == MatchNumber;
+ }
+};
+
+/// CheckPatternPredicateMatcher - This checks the target-specific predicate
+/// to see if the entire pattern is capable of matching. This predicate does
+/// not take a node as input. This is used for subtarget feature checks etc.
+class CheckPatternPredicateMatcher : public Matcher {
+ std::string Predicate;
+public:
+ CheckPatternPredicateMatcher(StringRef predicate)
+ : Matcher(CheckPatternPredicate), Predicate(predicate) {}
+
+ StringRef getPredicate() const { return Predicate; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == CheckPatternPredicate;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<CheckPatternPredicateMatcher>(M)->getPredicate() == Predicate;
+ }
+};
+
+/// CheckPredicateMatcher - This checks the target-specific predicate to
+/// see if the node is acceptable.
+class CheckPredicateMatcher : public Matcher {
+ TreePattern *Pred;
+ const SmallVector<unsigned, 4> Operands;
+public:
+ CheckPredicateMatcher(const TreePredicateFn &pred,
+ const SmallVectorImpl<unsigned> &Operands);
+
+ TreePredicateFn getPredicate() const;
+ unsigned getNumOperands() const;
+ unsigned getOperandNo(unsigned i) const;
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == CheckPredicate;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<CheckPredicateMatcher>(M)->Pred == Pred;
+ }
+};
+
+
+/// CheckOpcodeMatcher - This checks to see if the current node has the
+/// specified opcode, if not it fails to match.
+class CheckOpcodeMatcher : public Matcher {
+ const SDNodeInfo &Opcode;
+public:
+ CheckOpcodeMatcher(const SDNodeInfo &opcode)
+ : Matcher(CheckOpcode), Opcode(opcode) {}
+
+ const SDNodeInfo &getOpcode() const { return Opcode; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == CheckOpcode;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override;
+ bool isContradictoryImpl(const Matcher *M) const override;
+};
+
+/// SwitchOpcodeMatcher - Switch based on the current node's opcode, dispatching
+/// to one matcher per opcode. If the opcode doesn't match any of the cases,
+/// then the match fails. This is semantically equivalent to a Scope node where
+/// every child does a CheckOpcode, but is much faster.
+class SwitchOpcodeMatcher : public Matcher {
+ SmallVector<std::pair<const SDNodeInfo*, Matcher*>, 8> Cases;
+public:
+ SwitchOpcodeMatcher(ArrayRef<std::pair<const SDNodeInfo*, Matcher*> > cases)
+ : Matcher(SwitchOpcode), Cases(cases.begin(), cases.end()) {}
+ ~SwitchOpcodeMatcher() override;
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == SwitchOpcode;
+ }
+
+ unsigned getNumCases() const { return Cases.size(); }
+
+ const SDNodeInfo &getCaseOpcode(unsigned i) const { return *Cases[i].first; }
+ Matcher *getCaseMatcher(unsigned i) { return Cases[i].second; }
+ const Matcher *getCaseMatcher(unsigned i) const { return Cases[i].second; }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override { return false; }
+};
+
+/// CheckTypeMatcher - This checks to see if the current node has the
+/// specified type at the specified result, if not it fails to match.
+class CheckTypeMatcher : public Matcher {
+ MVT::SimpleValueType Type;
+ unsigned ResNo;
+public:
+ CheckTypeMatcher(MVT::SimpleValueType type, unsigned resno)
+ : Matcher(CheckType), Type(type), ResNo(resno) {}
+
+ MVT::SimpleValueType getType() const { return Type; }
+ unsigned getResNo() const { return ResNo; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == CheckType;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<CheckTypeMatcher>(M)->Type == Type;
+ }
+ bool isContradictoryImpl(const Matcher *M) const override;
+};
+
+/// SwitchTypeMatcher - Switch based on the current node's type, dispatching
+/// to one matcher per case. If the type doesn't match any of the cases,
+/// then the match fails. This is semantically equivalent to a Scope node where
+/// every child does a CheckType, but is much faster.
+class SwitchTypeMatcher : public Matcher {
+ SmallVector<std::pair<MVT::SimpleValueType, Matcher*>, 8> Cases;
+public:
+ SwitchTypeMatcher(ArrayRef<std::pair<MVT::SimpleValueType, Matcher*> > cases)
+ : Matcher(SwitchType), Cases(cases.begin(), cases.end()) {}
+ ~SwitchTypeMatcher() override;
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == SwitchType;
+ }
+
+ unsigned getNumCases() const { return Cases.size(); }
+
+ MVT::SimpleValueType getCaseType(unsigned i) const { return Cases[i].first; }
+ Matcher *getCaseMatcher(unsigned i) { return Cases[i].second; }
+ const Matcher *getCaseMatcher(unsigned i) const { return Cases[i].second; }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override { return false; }
+};
+
+
+/// CheckChildTypeMatcher - This checks to see if a child node has the
+/// specified type, if not it fails to match.
+class CheckChildTypeMatcher : public Matcher {
+ unsigned ChildNo;
+ MVT::SimpleValueType Type;
+public:
+ CheckChildTypeMatcher(unsigned childno, MVT::SimpleValueType type)
+ : Matcher(CheckChildType), ChildNo(childno), Type(type) {}
+
+ unsigned getChildNo() const { return ChildNo; }
+ MVT::SimpleValueType getType() const { return Type; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == CheckChildType;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<CheckChildTypeMatcher>(M)->ChildNo == ChildNo &&
+ cast<CheckChildTypeMatcher>(M)->Type == Type;
+ }
+ bool isContradictoryImpl(const Matcher *M) const override;
+};
+
+
+/// CheckIntegerMatcher - This checks to see if the current node is a
+/// ConstantSDNode with the specified integer value, if not it fails to match.
+class CheckIntegerMatcher : public Matcher {
+ int64_t Value;
+public:
+ CheckIntegerMatcher(int64_t value)
+ : Matcher(CheckInteger), Value(value) {}
+
+ int64_t getValue() const { return Value; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == CheckInteger;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<CheckIntegerMatcher>(M)->Value == Value;
+ }
+ bool isContradictoryImpl(const Matcher *M) const override;
+};
+
+/// CheckChildIntegerMatcher - This checks to see if the child node is a
+/// ConstantSDNode with a specified integer value, if not it fails to match.
+class CheckChildIntegerMatcher : public Matcher {
+ unsigned ChildNo;
+ int64_t Value;
+public:
+ CheckChildIntegerMatcher(unsigned childno, int64_t value)
+ : Matcher(CheckChildInteger), ChildNo(childno), Value(value) {}
+
+ unsigned getChildNo() const { return ChildNo; }
+ int64_t getValue() const { return Value; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == CheckChildInteger;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<CheckChildIntegerMatcher>(M)->ChildNo == ChildNo &&
+ cast<CheckChildIntegerMatcher>(M)->Value == Value;
+ }
+ bool isContradictoryImpl(const Matcher *M) const override;
+};
+
+/// CheckCondCodeMatcher - This checks to see if the current node is a
+/// CondCodeSDNode with the specified condition, if not it fails to match.
+class CheckCondCodeMatcher : public Matcher {
+ StringRef CondCodeName;
+public:
+ CheckCondCodeMatcher(StringRef condcodename)
+ : Matcher(CheckCondCode), CondCodeName(condcodename) {}
+
+ StringRef getCondCodeName() const { return CondCodeName; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == CheckCondCode;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<CheckCondCodeMatcher>(M)->CondCodeName == CondCodeName;
+ }
+ bool isContradictoryImpl(const Matcher *M) const override;
+};
+
+/// CheckChild2CondCodeMatcher - This checks to see if child 2 node is a
+/// CondCodeSDNode with the specified condition, if not it fails to match.
+class CheckChild2CondCodeMatcher : public Matcher {
+ StringRef CondCodeName;
+public:
+ CheckChild2CondCodeMatcher(StringRef condcodename)
+ : Matcher(CheckChild2CondCode), CondCodeName(condcodename) {}
+
+ StringRef getCondCodeName() const { return CondCodeName; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == CheckChild2CondCode;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<CheckChild2CondCodeMatcher>(M)->CondCodeName == CondCodeName;
+ }
+ bool isContradictoryImpl(const Matcher *M) const override;
+};
+
+/// CheckValueTypeMatcher - This checks to see if the current node is a
+/// VTSDNode with the specified type, if not it fails to match.
+class CheckValueTypeMatcher : public Matcher {
+ StringRef TypeName;
+public:
+ CheckValueTypeMatcher(StringRef type_name)
+ : Matcher(CheckValueType), TypeName(type_name) {}
+
+ StringRef getTypeName() const { return TypeName; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == CheckValueType;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<CheckValueTypeMatcher>(M)->TypeName == TypeName;
+ }
+ bool isContradictoryImpl(const Matcher *M) const override;
+};
+
+
+
+/// CheckComplexPatMatcher - This node runs the specified ComplexPattern on
+/// the current node.
+class CheckComplexPatMatcher : public Matcher {
+ const ComplexPattern &Pattern;
+
+ /// MatchNumber - This is the recorded nodes slot that contains the node we
+ /// want to match against.
+ unsigned MatchNumber;
+
+ /// Name - The name of the node we're matching, for comment emission.
+ std::string Name;
+
+ /// FirstResult - This is the first slot in the RecordedNodes list that the
+ /// result of the match populates.
+ unsigned FirstResult;
+public:
+ CheckComplexPatMatcher(const ComplexPattern &pattern, unsigned matchnumber,
+ const std::string &name, unsigned firstresult)
+ : Matcher(CheckComplexPat), Pattern(pattern), MatchNumber(matchnumber),
+ Name(name), FirstResult(firstresult) {}
+
+ const ComplexPattern &getPattern() const { return Pattern; }
+ unsigned getMatchNumber() const { return MatchNumber; }
+
+ std::string getName() const { return Name; }
+ unsigned getFirstResult() const { return FirstResult; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == CheckComplexPat;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return &cast<CheckComplexPatMatcher>(M)->Pattern == &Pattern &&
+ cast<CheckComplexPatMatcher>(M)->MatchNumber == MatchNumber;
+ }
+};
+
+/// CheckAndImmMatcher - This checks to see if the current node is an 'and'
+/// with something equivalent to the specified immediate.
+class CheckAndImmMatcher : public Matcher {
+ int64_t Value;
+public:
+ CheckAndImmMatcher(int64_t value)
+ : Matcher(CheckAndImm), Value(value) {}
+
+ int64_t getValue() const { return Value; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == CheckAndImm;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<CheckAndImmMatcher>(M)->Value == Value;
+ }
+};
+
+/// CheckOrImmMatcher - This checks to see if the current node is an 'and'
+/// with something equivalent to the specified immediate.
+class CheckOrImmMatcher : public Matcher {
+ int64_t Value;
+public:
+ CheckOrImmMatcher(int64_t value)
+ : Matcher(CheckOrImm), Value(value) {}
+
+ int64_t getValue() const { return Value; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == CheckOrImm;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<CheckOrImmMatcher>(M)->Value == Value;
+ }
+};
+
+/// CheckImmAllOnesVMatcher - This checks if the current node is a build_vector
+/// or splat_vector of all ones.
+class CheckImmAllOnesVMatcher : public Matcher {
+public:
+ CheckImmAllOnesVMatcher() : Matcher(CheckImmAllOnesV) {}
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == CheckImmAllOnesV;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override { return true; }
+ bool isContradictoryImpl(const Matcher *M) const override;
+};
+
+/// CheckImmAllZerosVMatcher - This checks if the current node is a
+/// build_vector or splat_vector of all zeros.
+class CheckImmAllZerosVMatcher : public Matcher {
+public:
+ CheckImmAllZerosVMatcher() : Matcher(CheckImmAllZerosV) {}
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == CheckImmAllZerosV;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override { return true; }
+ bool isContradictoryImpl(const Matcher *M) const override;
+};
+
+/// CheckFoldableChainNodeMatcher - This checks to see if the current node
+/// (which defines a chain operand) is safe to fold into a larger pattern.
+class CheckFoldableChainNodeMatcher : public Matcher {
+public:
+ CheckFoldableChainNodeMatcher()
+ : Matcher(CheckFoldableChainNode) {}
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == CheckFoldableChainNode;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override { return true; }
+};
+
+/// EmitIntegerMatcher - This creates a new TargetConstant.
+class EmitIntegerMatcher : public Matcher {
+ int64_t Val;
+ MVT::SimpleValueType VT;
+public:
+ EmitIntegerMatcher(int64_t val, MVT::SimpleValueType vt)
+ : Matcher(EmitInteger), Val(val), VT(vt) {}
+
+ int64_t getValue() const { return Val; }
+ MVT::SimpleValueType getVT() const { return VT; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == EmitInteger;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<EmitIntegerMatcher>(M)->Val == Val &&
+ cast<EmitIntegerMatcher>(M)->VT == VT;
+ }
+};
+
+/// EmitStringIntegerMatcher - A target constant whose value is represented
+/// by a string.
+class EmitStringIntegerMatcher : public Matcher {
+ std::string Val;
+ MVT::SimpleValueType VT;
+public:
+ EmitStringIntegerMatcher(const std::string &val, MVT::SimpleValueType vt)
+ : Matcher(EmitStringInteger), Val(val), VT(vt) {}
+
+ const std::string &getValue() const { return Val; }
+ MVT::SimpleValueType getVT() const { return VT; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == EmitStringInteger;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<EmitStringIntegerMatcher>(M)->Val == Val &&
+ cast<EmitStringIntegerMatcher>(M)->VT == VT;
+ }
+};
+
+/// EmitRegisterMatcher - This creates a new TargetConstant.
+class EmitRegisterMatcher : public Matcher {
+ /// Reg - The def for the register that we're emitting. If this is null, then
+ /// this is a reference to zero_reg.
+ const CodeGenRegister *Reg;
+ MVT::SimpleValueType VT;
+public:
+ EmitRegisterMatcher(const CodeGenRegister *reg, MVT::SimpleValueType vt)
+ : Matcher(EmitRegister), Reg(reg), VT(vt) {}
+
+ const CodeGenRegister *getReg() const { return Reg; }
+ MVT::SimpleValueType getVT() const { return VT; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == EmitRegister;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<EmitRegisterMatcher>(M)->Reg == Reg &&
+ cast<EmitRegisterMatcher>(M)->VT == VT;
+ }
+};
+
+/// EmitConvertToTargetMatcher - Emit an operation that reads a specified
+/// recorded node and converts it from being a ISD::Constant to
+/// ISD::TargetConstant, likewise for ConstantFP.
+class EmitConvertToTargetMatcher : public Matcher {
+ unsigned Slot;
+public:
+ EmitConvertToTargetMatcher(unsigned slot)
+ : Matcher(EmitConvertToTarget), Slot(slot) {}
+
+ unsigned getSlot() const { return Slot; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == EmitConvertToTarget;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<EmitConvertToTargetMatcher>(M)->Slot == Slot;
+ }
+};
+
+/// EmitMergeInputChainsMatcher - Emit a node that merges a list of input
+/// chains together with a token factor. The list of nodes are the nodes in the
+/// matched pattern that have chain input/outputs. This node adds all input
+/// chains of these nodes if they are not themselves a node in the pattern.
+class EmitMergeInputChainsMatcher : public Matcher {
+ SmallVector<unsigned, 3> ChainNodes;
+public:
+ EmitMergeInputChainsMatcher(ArrayRef<unsigned> nodes)
+ : Matcher(EmitMergeInputChains), ChainNodes(nodes.begin(), nodes.end()) {}
+
+ unsigned getNumNodes() const { return ChainNodes.size(); }
+
+ unsigned getNode(unsigned i) const {
+ assert(i < ChainNodes.size());
+ return ChainNodes[i];
+ }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == EmitMergeInputChains;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<EmitMergeInputChainsMatcher>(M)->ChainNodes == ChainNodes;
+ }
+};
+
+/// EmitCopyToRegMatcher - Emit a CopyToReg node from a value to a physreg,
+/// pushing the chain and glue results.
+///
+class EmitCopyToRegMatcher : public Matcher {
+ unsigned SrcSlot; // Value to copy into the physreg.
+ const CodeGenRegister *DestPhysReg;
+
+public:
+ EmitCopyToRegMatcher(unsigned srcSlot,
+ const CodeGenRegister *destPhysReg)
+ : Matcher(EmitCopyToReg), SrcSlot(srcSlot), DestPhysReg(destPhysReg) {}
+
+ unsigned getSrcSlot() const { return SrcSlot; }
+ const CodeGenRegister *getDestPhysReg() const { return DestPhysReg; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == EmitCopyToReg;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<EmitCopyToRegMatcher>(M)->SrcSlot == SrcSlot &&
+ cast<EmitCopyToRegMatcher>(M)->DestPhysReg == DestPhysReg;
+ }
+};
+
+
+
+/// EmitNodeXFormMatcher - Emit an operation that runs an SDNodeXForm on a
+/// recorded node and records the result.
+class EmitNodeXFormMatcher : public Matcher {
+ unsigned Slot;
+ Record *NodeXForm;
+public:
+ EmitNodeXFormMatcher(unsigned slot, Record *nodeXForm)
+ : Matcher(EmitNodeXForm), Slot(slot), NodeXForm(nodeXForm) {}
+
+ unsigned getSlot() const { return Slot; }
+ Record *getNodeXForm() const { return NodeXForm; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == EmitNodeXForm;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<EmitNodeXFormMatcher>(M)->Slot == Slot &&
+ cast<EmitNodeXFormMatcher>(M)->NodeXForm == NodeXForm;
+ }
+};
+
+/// EmitNodeMatcherCommon - Common class shared between EmitNode and
+/// MorphNodeTo.
+class EmitNodeMatcherCommon : public Matcher {
+ std::string OpcodeName;
+ const SmallVector<MVT::SimpleValueType, 3> VTs;
+ const SmallVector<unsigned, 6> Operands;
+ bool HasChain, HasInGlue, HasOutGlue, HasMemRefs;
+
+ /// NumFixedArityOperands - If this is a fixed arity node, this is set to -1.
+ /// If this is a varidic node, this is set to the number of fixed arity
+ /// operands in the root of the pattern. The rest are appended to this node.
+ int NumFixedArityOperands;
+public:
+ EmitNodeMatcherCommon(const std::string &opcodeName,
+ ArrayRef<MVT::SimpleValueType> vts,
+ ArrayRef<unsigned> operands,
+ bool hasChain, bool hasInGlue, bool hasOutGlue,
+ bool hasmemrefs,
+ int numfixedarityoperands, bool isMorphNodeTo)
+ : Matcher(isMorphNodeTo ? MorphNodeTo : EmitNode), OpcodeName(opcodeName),
+ VTs(vts.begin(), vts.end()), Operands(operands.begin(), operands.end()),
+ HasChain(hasChain), HasInGlue(hasInGlue), HasOutGlue(hasOutGlue),
+ HasMemRefs(hasmemrefs), NumFixedArityOperands(numfixedarityoperands) {}
+
+ const std::string &getOpcodeName() const { return OpcodeName; }
+
+ unsigned getNumVTs() const { return VTs.size(); }
+ MVT::SimpleValueType getVT(unsigned i) const {
+ assert(i < VTs.size());
+ return VTs[i];
+ }
+
+ unsigned getNumOperands() const { return Operands.size(); }
+ unsigned getOperand(unsigned i) const {
+ assert(i < Operands.size());
+ return Operands[i];
+ }
+
+ const SmallVectorImpl<MVT::SimpleValueType> &getVTList() const { return VTs; }
+ const SmallVectorImpl<unsigned> &getOperandList() const { return Operands; }
+
+
+ bool hasChain() const { return HasChain; }
+ bool hasInFlag() const { return HasInGlue; }
+ bool hasOutFlag() const { return HasOutGlue; }
+ bool hasMemRefs() const { return HasMemRefs; }
+ int getNumFixedArityOperands() const { return NumFixedArityOperands; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == EmitNode || N->getKind() == MorphNodeTo;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override;
+};
+
+/// EmitNodeMatcher - This signals a successful match and generates a node.
+class EmitNodeMatcher : public EmitNodeMatcherCommon {
+ void anchor() override;
+ unsigned FirstResultSlot;
+public:
+ EmitNodeMatcher(const std::string &opcodeName,
+ ArrayRef<MVT::SimpleValueType> vts,
+ ArrayRef<unsigned> operands,
+ bool hasChain, bool hasInFlag, bool hasOutFlag,
+ bool hasmemrefs,
+ int numfixedarityoperands, unsigned firstresultslot)
+ : EmitNodeMatcherCommon(opcodeName, vts, operands, hasChain,
+ hasInFlag, hasOutFlag, hasmemrefs,
+ numfixedarityoperands, false),
+ FirstResultSlot(firstresultslot) {}
+
+ unsigned getFirstResultSlot() const { return FirstResultSlot; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == EmitNode;
+ }
+
+};
+
+class MorphNodeToMatcher : public EmitNodeMatcherCommon {
+ void anchor() override;
+ const PatternToMatch &Pattern;
+public:
+ MorphNodeToMatcher(const std::string &opcodeName,
+ ArrayRef<MVT::SimpleValueType> vts,
+ ArrayRef<unsigned> operands,
+ bool hasChain, bool hasInFlag, bool hasOutFlag,
+ bool hasmemrefs,
+ int numfixedarityoperands, const PatternToMatch &pattern)
+ : EmitNodeMatcherCommon(opcodeName, vts, operands, hasChain,
+ hasInFlag, hasOutFlag, hasmemrefs,
+ numfixedarityoperands, true),
+ Pattern(pattern) {
+ }
+
+ const PatternToMatch &getPattern() const { return Pattern; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == MorphNodeTo;
+ }
+};
+
+/// CompleteMatchMatcher - Complete a match by replacing the results of the
+/// pattern with the newly generated nodes. This also prints a comment
+/// indicating the source and dest patterns.
+class CompleteMatchMatcher : public Matcher {
+ SmallVector<unsigned, 2> Results;
+ const PatternToMatch &Pattern;
+public:
+ CompleteMatchMatcher(ArrayRef<unsigned> results,
+ const PatternToMatch &pattern)
+ : Matcher(CompleteMatch), Results(results.begin(), results.end()),
+ Pattern(pattern) {}
+
+ unsigned getNumResults() const { return Results.size(); }
+ unsigned getResult(unsigned R) const { return Results[R]; }
+ const PatternToMatch &getPattern() const { return Pattern; }
+
+ static bool classof(const Matcher *N) {
+ return N->getKind() == CompleteMatch;
+ }
+
+private:
+ void printImpl(raw_ostream &OS, unsigned indent) const override;
+ bool isEqualImpl(const Matcher *M) const override {
+ return cast<CompleteMatchMatcher>(M)->Results == Results &&
+ &cast<CompleteMatchMatcher>(M)->Pattern == &Pattern;
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/libs/llvm16/utils/TableGen/DAGISelMatcherEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/DAGISelMatcherEmitter.cpp
new file mode 100644
index 0000000000..777e75dcd9
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/DAGISelMatcherEmitter.cpp
@@ -0,0 +1,1171 @@
+//===- DAGISelMatcherEmitter.cpp - Matcher Emitter ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code to generate C++ code for a matcher.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenDAGPatterns.h"
+#include "DAGISelMatcher.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/TinyPtrVector.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+
+using namespace llvm;
+
+enum {
+ IndexWidth = 6,
+ FullIndexWidth = IndexWidth + 4,
+ HistOpcWidth = 40,
+};
+
+cl::OptionCategory DAGISelCat("Options for -gen-dag-isel");
+
+// To reduce generated source code size.
+static cl::opt<bool> OmitComments("omit-comments",
+ cl::desc("Do not generate comments"),
+ cl::init(false), cl::cat(DAGISelCat));
+
+static cl::opt<bool> InstrumentCoverage(
+ "instrument-coverage",
+ cl::desc("Generates tables to help identify patterns matched"),
+ cl::init(false), cl::cat(DAGISelCat));
+
+namespace {
+class MatcherTableEmitter {
+ const CodeGenDAGPatterns &CGP;
+
+ SmallVector<unsigned, Matcher::HighestKind+1> OpcodeCounts;
+
+ DenseMap<TreePattern *, unsigned> NodePredicateMap;
+ std::vector<TreePredicateFn> NodePredicates;
+ std::vector<TreePredicateFn> NodePredicatesWithOperands;
+
+ // We de-duplicate the predicates by code string, and use this map to track
+ // all the patterns with "identical" predicates.
+ StringMap<TinyPtrVector<TreePattern *>> NodePredicatesByCodeToRun;
+
+ StringMap<unsigned> PatternPredicateMap;
+ std::vector<std::string> PatternPredicates;
+
+ DenseMap<const ComplexPattern*, unsigned> ComplexPatternMap;
+ std::vector<const ComplexPattern*> ComplexPatterns;
+
+
+ DenseMap<Record*, unsigned> NodeXFormMap;
+ std::vector<Record*> NodeXForms;
+
+ std::vector<std::string> VecIncludeStrings;
+ MapVector<std::string, unsigned, StringMap<unsigned> > VecPatterns;
+
+ unsigned getPatternIdxFromTable(std::string &&P, std::string &&include_loc) {
+ const auto It = VecPatterns.find(P);
+ if (It == VecPatterns.end()) {
+ VecPatterns.insert(make_pair(std::move(P), VecPatterns.size()));
+ VecIncludeStrings.push_back(std::move(include_loc));
+ return VecIncludeStrings.size() - 1;
+ }
+ return It->second;
+ }
+
+public:
+ MatcherTableEmitter(const CodeGenDAGPatterns &cgp) : CGP(cgp) {
+ OpcodeCounts.assign(Matcher::HighestKind+1, 0);
+ }
+
+ unsigned EmitMatcherList(const Matcher *N, const unsigned Indent,
+ unsigned StartIdx, raw_ostream &OS);
+
+ unsigned SizeMatcherList(Matcher *N, raw_ostream &OS);
+
+ void EmitPredicateFunctions(raw_ostream &OS);
+
+ void EmitHistogram(const Matcher *N, raw_ostream &OS);
+
+ void EmitPatternMatchTable(raw_ostream &OS);
+
+private:
+ void EmitNodePredicatesFunction(const std::vector<TreePredicateFn> &Preds,
+ StringRef Decl, raw_ostream &OS);
+
+ unsigned SizeMatcher(Matcher *N, raw_ostream &OS);
+
+ unsigned EmitMatcher(const Matcher *N, const unsigned Indent, unsigned CurrentIdx,
+ raw_ostream &OS);
+
+ unsigned getNodePredicate(TreePredicateFn Pred) {
+ TreePattern *TP = Pred.getOrigPatFragRecord();
+ unsigned &Entry = NodePredicateMap[TP];
+ if (Entry == 0) {
+ TinyPtrVector<TreePattern *> &SameCodePreds =
+ NodePredicatesByCodeToRun[Pred.getCodeToRunOnSDNode()];
+ if (SameCodePreds.empty()) {
+ // We've never seen a predicate with the same code: allocate an entry.
+ if (Pred.usesOperands()) {
+ NodePredicatesWithOperands.push_back(Pred);
+ Entry = NodePredicatesWithOperands.size();
+ } else {
+ NodePredicates.push_back(Pred);
+ Entry = NodePredicates.size();
+ }
+ } else {
+ // We did see an identical predicate: re-use it.
+ Entry = NodePredicateMap[SameCodePreds.front()];
+ assert(Entry != 0);
+ assert(TreePredicateFn(SameCodePreds.front()).usesOperands() ==
+ Pred.usesOperands() &&
+ "PatFrags with some code must have same usesOperands setting");
+ }
+ // In both cases, we've never seen this particular predicate before, so
+ // mark it in the list of predicates sharing the same code.
+ SameCodePreds.push_back(TP);
+ }
+ return Entry-1;
+ }
+
+ unsigned getPatternPredicate(StringRef PredName) {
+ unsigned &Entry = PatternPredicateMap[PredName];
+ if (Entry == 0) {
+ PatternPredicates.push_back(PredName.str());
+ Entry = PatternPredicates.size();
+ }
+ return Entry-1;
+ }
+ unsigned getComplexPat(const ComplexPattern &P) {
+ unsigned &Entry = ComplexPatternMap[&P];
+ if (Entry == 0) {
+ ComplexPatterns.push_back(&P);
+ Entry = ComplexPatterns.size();
+ }
+ return Entry-1;
+ }
+
+ unsigned getNodeXFormID(Record *Rec) {
+ unsigned &Entry = NodeXFormMap[Rec];
+ if (Entry == 0) {
+ NodeXForms.push_back(Rec);
+ Entry = NodeXForms.size();
+ }
+ return Entry-1;
+ }
+
+};
+} // end anonymous namespace.
+
+static std::string GetPatFromTreePatternNode(const TreePatternNode *N) {
+ std::string str;
+ raw_string_ostream Stream(str);
+ Stream << *N;
+ return str;
+}
+
+static unsigned GetVBRSize(unsigned Val) {
+ if (Val <= 127) return 1;
+
+ unsigned NumBytes = 0;
+ while (Val >= 128) {
+ Val >>= 7;
+ ++NumBytes;
+ }
+ return NumBytes+1;
+}
+
+/// EmitVBRValue - Emit the specified value as a VBR, returning the number of
+/// bytes emitted.
+static unsigned EmitVBRValue(uint64_t Val, raw_ostream &OS) {
+ if (Val <= 127) {
+ OS << Val << ", ";
+ return 1;
+ }
+
+ uint64_t InVal = Val;
+ unsigned NumBytes = 0;
+ while (Val >= 128) {
+ OS << (Val&127) << "|128,";
+ Val >>= 7;
+ ++NumBytes;
+ }
+ OS << Val;
+ if (!OmitComments)
+ OS << "/*" << InVal << "*/";
+ OS << ", ";
+ return NumBytes+1;
+}
+
+/// Emit the specified signed value as a VBR. To improve compression we encode
+/// positive numbers shifted left by 1 and negative numbers negated and shifted
+/// left by 1 with bit 0 set.
+static unsigned EmitSignedVBRValue(uint64_t Val, raw_ostream &OS) {
+ if ((int64_t)Val >= 0)
+ Val = Val << 1;
+ else
+ Val = (-Val << 1) | 1;
+
+ return EmitVBRValue(Val, OS);
+}
+
+// This is expensive and slow.
+static std::string getIncludePath(const Record *R) {
+ std::string str;
+ raw_string_ostream Stream(str);
+ auto Locs = R->getLoc();
+ SMLoc L;
+ if (Locs.size() > 1) {
+ // Get where the pattern prototype was instantiated
+ L = Locs[1];
+ } else if (Locs.size() == 1) {
+ L = Locs[0];
+ }
+ unsigned CurBuf = SrcMgr.FindBufferContainingLoc(L);
+ assert(CurBuf && "Invalid or unspecified location!");
+
+ Stream << SrcMgr.getBufferInfo(CurBuf).Buffer->getBufferIdentifier() << ":"
+ << SrcMgr.FindLineNumber(L, CurBuf);
+ return str;
+}
+
+/// This function traverses the matcher tree and sizes all the nodes
+/// that are children of the three kinds of nodes that have them.
+unsigned MatcherTableEmitter::
+SizeMatcherList(Matcher *N, raw_ostream &OS) {
+ unsigned Size = 0;
+ while (N) {
+ Size += SizeMatcher(N, OS);
+ N = N->getNext();
+ }
+ return Size;
+}
+
+/// This function sizes the children of the three kinds of nodes that
+/// have them. It does so by using special cases for those three
+/// nodes, but sharing the code in EmitMatcher() for the other kinds.
+unsigned MatcherTableEmitter::
+SizeMatcher(Matcher *N, raw_ostream &OS) {
+ unsigned Idx = 0;
+
+ ++OpcodeCounts[N->getKind()];
+ switch (N->getKind()) {
+ // The Scope matcher has its kind, a series of child size + child,
+ // and a trailing zero.
+ case Matcher::Scope: {
+ ScopeMatcher *SM = cast<ScopeMatcher>(N);
+ assert(SM->getNext() == nullptr && "Scope matcher should not have next");
+ unsigned Size = 1; // Count the kind.
+ for (unsigned i = 0, e = SM->getNumChildren(); i != e; ++i) {
+ const unsigned ChildSize = SizeMatcherList(SM->getChild(i), OS);
+ assert(ChildSize != 0 && "Matcher cannot have child of size 0");
+ SM->getChild(i)->setSize(ChildSize);
+ Size += GetVBRSize(ChildSize) + ChildSize; // Count VBR and child size.
+ }
+ ++Size; // Count the zero sentinel.
+ return Size;
+ }
+
+ // SwitchOpcode and SwitchType have their kind, a series of child size +
+ // opcode/type + child, and a trailing zero.
+ case Matcher::SwitchOpcode:
+ case Matcher::SwitchType: {
+ unsigned Size = 1; // Count the kind.
+ unsigned NumCases;
+ if (const SwitchOpcodeMatcher *SOM = dyn_cast<SwitchOpcodeMatcher>(N))
+ NumCases = SOM->getNumCases();
+ else
+ NumCases = cast<SwitchTypeMatcher>(N)->getNumCases();
+ for (unsigned i = 0, e = NumCases; i != e; ++i) {
+ Matcher *Child;
+ if (SwitchOpcodeMatcher *SOM = dyn_cast<SwitchOpcodeMatcher>(N)) {
+ Child = SOM->getCaseMatcher(i);
+ Size += 2; // Count the child's opcode.
+ } else {
+ Child = cast<SwitchTypeMatcher>(N)->getCaseMatcher(i);
+ ++Size; // Count the child's type.
+ }
+ const unsigned ChildSize = SizeMatcherList(Child, OS);
+ assert(ChildSize != 0 && "Matcher cannot have child of size 0");
+ Child->setSize(ChildSize);
+ Size += GetVBRSize(ChildSize) + ChildSize; // Count VBR and child size.
+ }
+ ++Size; // Count the zero sentinel.
+ return Size;
+ }
+
+ default:
+ // Employ the matcher emitter to size other matchers.
+ return EmitMatcher(N, 0, Idx, OS);
+ }
+ llvm_unreachable("Unreachable");
+}
+
+static void BeginEmitFunction(raw_ostream &OS, StringRef RetType,
+ StringRef Decl, bool AddOverride) {
+ OS << "#ifdef GET_DAGISEL_DECL\n";
+ OS << RetType << ' ' << Decl;
+ if (AddOverride)
+ OS << " override";
+ OS << ";\n"
+ "#endif\n"
+ "#if defined(GET_DAGISEL_BODY) || DAGISEL_INLINE\n";
+ OS << RetType << " DAGISEL_CLASS_COLONCOLON " << Decl << "\n";
+ if (AddOverride) {
+ OS << "#if DAGISEL_INLINE\n"
+ " override\n"
+ "#endif\n";
+ }
+}
+
+static void EndEmitFunction(raw_ostream &OS) {
+ OS << "#endif // GET_DAGISEL_BODY\n\n";
+}
+
+void MatcherTableEmitter::EmitPatternMatchTable(raw_ostream &OS) {
+
+ assert(isUInt<16>(VecPatterns.size()) &&
+ "Using only 16 bits to encode offset into Pattern Table");
+ assert(VecPatterns.size() == VecIncludeStrings.size() &&
+ "The sizes of Pattern and include vectors should be the same");
+
+ BeginEmitFunction(OS, "StringRef", "getPatternForIndex(unsigned Index)",
+ true/*AddOverride*/);
+ OS << "{\n";
+ OS << "static const char *PATTERN_MATCH_TABLE[] = {\n";
+
+ for (const auto &It : VecPatterns) {
+ OS << "\"" << It.first << "\",\n";
+ }
+
+ OS << "\n};";
+ OS << "\nreturn StringRef(PATTERN_MATCH_TABLE[Index]);";
+ OS << "\n}\n";
+ EndEmitFunction(OS);
+
+ BeginEmitFunction(OS, "StringRef", "getIncludePathForIndex(unsigned Index)",
+ true/*AddOverride*/);
+ OS << "{\n";
+ OS << "static const char *INCLUDE_PATH_TABLE[] = {\n";
+
+ for (const auto &It : VecIncludeStrings) {
+ OS << "\"" << It << "\",\n";
+ }
+
+ OS << "\n};";
+ OS << "\nreturn StringRef(INCLUDE_PATH_TABLE[Index]);";
+ OS << "\n}\n";
+ EndEmitFunction(OS);
+}
+
+/// EmitMatcher - Emit bytes for the specified matcher and return
+/// the number of bytes emitted.
+unsigned MatcherTableEmitter::
+EmitMatcher(const Matcher *N, const unsigned Indent, unsigned CurrentIdx,
+ raw_ostream &OS) {
+ OS.indent(Indent);
+
+ switch (N->getKind()) {
+ case Matcher::Scope: {
+ const ScopeMatcher *SM = cast<ScopeMatcher>(N);
+ unsigned StartIdx = CurrentIdx;
+
+ // Emit all of the children.
+ for (unsigned i = 0, e = SM->getNumChildren(); i != e; ++i) {
+ if (i == 0) {
+ OS << "OPC_Scope, ";
+ ++CurrentIdx;
+ } else {
+ if (!OmitComments) {
+ OS << "/*" << format_decimal(CurrentIdx, IndexWidth) << "*/";
+ OS.indent(Indent) << "/*Scope*/ ";
+ } else
+ OS.indent(Indent);
+ }
+
+ unsigned ChildSize = SM->getChild(i)->getSize();
+ unsigned VBRSize = EmitVBRValue(ChildSize, OS);
+ if (!OmitComments) {
+ OS << "/*->" << CurrentIdx + VBRSize + ChildSize << "*/";
+ if (i == 0)
+ OS << " // " << SM->getNumChildren() << " children in Scope";
+ }
+ OS << '\n';
+
+ ChildSize = EmitMatcherList(SM->getChild(i), Indent+1,
+ CurrentIdx + VBRSize, OS);
+ assert(ChildSize == SM->getChild(i)->getSize() &&
+ "Emitted child size does not match calculated size");
+ CurrentIdx += VBRSize + ChildSize;
+ }
+
+ // Emit a zero as a sentinel indicating end of 'Scope'.
+ if (!OmitComments)
+ OS << "/*" << format_decimal(CurrentIdx, IndexWidth) << "*/";
+ OS.indent(Indent) << "0, ";
+ if (!OmitComments)
+ OS << "/*End of Scope*/";
+ OS << '\n';
+ return CurrentIdx - StartIdx + 1;
+ }
+
+ case Matcher::RecordNode:
+ OS << "OPC_RecordNode,";
+ if (!OmitComments)
+ OS << " // #"
+ << cast<RecordMatcher>(N)->getResultNo() << " = "
+ << cast<RecordMatcher>(N)->getWhatFor();
+ OS << '\n';
+ return 1;
+
+ case Matcher::RecordChild:
+ OS << "OPC_RecordChild" << cast<RecordChildMatcher>(N)->getChildNo()
+ << ',';
+ if (!OmitComments)
+ OS << " // #"
+ << cast<RecordChildMatcher>(N)->getResultNo() << " = "
+ << cast<RecordChildMatcher>(N)->getWhatFor();
+ OS << '\n';
+ return 1;
+
+ case Matcher::RecordMemRef:
+ OS << "OPC_RecordMemRef,\n";
+ return 1;
+
+ case Matcher::CaptureGlueInput:
+ OS << "OPC_CaptureGlueInput,\n";
+ return 1;
+
+ case Matcher::MoveChild: {
+ const auto *MCM = cast<MoveChildMatcher>(N);
+
+ OS << "OPC_MoveChild";
+ // Handle the specialized forms.
+ if (MCM->getChildNo() >= 8)
+ OS << ", ";
+ OS << MCM->getChildNo() << ",\n";
+ return (MCM->getChildNo() >= 8) ? 2 : 1;
+ }
+
+ case Matcher::MoveParent:
+ OS << "OPC_MoveParent,\n";
+ return 1;
+
+ case Matcher::CheckSame:
+ OS << "OPC_CheckSame, "
+ << cast<CheckSameMatcher>(N)->getMatchNumber() << ",\n";
+ return 2;
+
+ case Matcher::CheckChildSame:
+ OS << "OPC_CheckChild"
+ << cast<CheckChildSameMatcher>(N)->getChildNo() << "Same, "
+ << cast<CheckChildSameMatcher>(N)->getMatchNumber() << ",\n";
+ return 2;
+
+ case Matcher::CheckPatternPredicate: {
+ StringRef Pred =cast<CheckPatternPredicateMatcher>(N)->getPredicate();
+ OS << "OPC_CheckPatternPredicate, " << getPatternPredicate(Pred) << ',';
+ if (!OmitComments)
+ OS << " // " << Pred;
+ OS << '\n';
+ return 2;
+ }
+ case Matcher::CheckPredicate: {
+ TreePredicateFn Pred = cast<CheckPredicateMatcher>(N)->getPredicate();
+ unsigned OperandBytes = 0;
+
+ if (Pred.usesOperands()) {
+ unsigned NumOps = cast<CheckPredicateMatcher>(N)->getNumOperands();
+ OS << "OPC_CheckPredicateWithOperands, " << NumOps << "/*#Ops*/, ";
+ for (unsigned i = 0; i < NumOps; ++i)
+ OS << cast<CheckPredicateMatcher>(N)->getOperandNo(i) << ", ";
+ OperandBytes = 1 + NumOps;
+ } else {
+ OS << "OPC_CheckPredicate, ";
+ }
+
+ OS << getNodePredicate(Pred) << ',';
+ if (!OmitComments)
+ OS << " // " << Pred.getFnName();
+ OS << '\n';
+ return 2 + OperandBytes;
+ }
+
+ case Matcher::CheckOpcode:
+ OS << "OPC_CheckOpcode, TARGET_VAL("
+ << cast<CheckOpcodeMatcher>(N)->getOpcode().getEnumName() << "),\n";
+ return 3;
+
+ case Matcher::SwitchOpcode:
+ case Matcher::SwitchType: {
+ unsigned StartIdx = CurrentIdx;
+
+ unsigned NumCases;
+ if (const SwitchOpcodeMatcher *SOM = dyn_cast<SwitchOpcodeMatcher>(N)) {
+ OS << "OPC_SwitchOpcode ";
+ NumCases = SOM->getNumCases();
+ } else {
+ OS << "OPC_SwitchType ";
+ NumCases = cast<SwitchTypeMatcher>(N)->getNumCases();
+ }
+
+ if (!OmitComments)
+ OS << "/*" << NumCases << " cases */";
+ OS << ", ";
+ ++CurrentIdx;
+
+ // For each case we emit the size, then the opcode, then the matcher.
+ for (unsigned i = 0, e = NumCases; i != e; ++i) {
+ const Matcher *Child;
+ unsigned IdxSize;
+ if (const SwitchOpcodeMatcher *SOM = dyn_cast<SwitchOpcodeMatcher>(N)) {
+ Child = SOM->getCaseMatcher(i);
+ IdxSize = 2; // size of opcode in table is 2 bytes.
+ } else {
+ Child = cast<SwitchTypeMatcher>(N)->getCaseMatcher(i);
+ IdxSize = 1; // size of type in table is 1 byte.
+ }
+
+ if (i != 0) {
+ if (!OmitComments)
+ OS << "/*" << format_decimal(CurrentIdx, IndexWidth) << "*/";
+ OS.indent(Indent);
+ if (!OmitComments)
+ OS << (isa<SwitchOpcodeMatcher>(N) ?
+ "/*SwitchOpcode*/ " : "/*SwitchType*/ ");
+ }
+
+ unsigned ChildSize = Child->getSize();
+ CurrentIdx += EmitVBRValue(ChildSize, OS) + IdxSize;
+ if (const SwitchOpcodeMatcher *SOM = dyn_cast<SwitchOpcodeMatcher>(N))
+ OS << "TARGET_VAL(" << SOM->getCaseOpcode(i).getEnumName() << "),";
+ else
+ OS << getEnumName(cast<SwitchTypeMatcher>(N)->getCaseType(i)) << ',';
+ if (!OmitComments)
+ OS << "// ->" << CurrentIdx + ChildSize;
+ OS << '\n';
+
+ ChildSize = EmitMatcherList(Child, Indent+1, CurrentIdx, OS);
+ assert(ChildSize == Child->getSize() &&
+ "Emitted child size does not match calculated size");
+ CurrentIdx += ChildSize;
+ }
+
+ // Emit the final zero to terminate the switch.
+ if (!OmitComments)
+ OS << "/*" << format_decimal(CurrentIdx, IndexWidth) << "*/";
+ OS.indent(Indent) << "0,";
+ if (!OmitComments)
+ OS << (isa<SwitchOpcodeMatcher>(N) ?
+ " // EndSwitchOpcode" : " // EndSwitchType");
+
+ OS << '\n';
+ return CurrentIdx - StartIdx + 1;
+ }
+
+ case Matcher::CheckType:
+ if (cast<CheckTypeMatcher>(N)->getResNo() == 0) {
+ OS << "OPC_CheckType, "
+ << getEnumName(cast<CheckTypeMatcher>(N)->getType()) << ",\n";
+ return 2;
+ }
+ OS << "OPC_CheckTypeRes, " << cast<CheckTypeMatcher>(N)->getResNo()
+ << ", " << getEnumName(cast<CheckTypeMatcher>(N)->getType()) << ",\n";
+ return 3;
+
+ case Matcher::CheckChildType:
+ OS << "OPC_CheckChild"
+ << cast<CheckChildTypeMatcher>(N)->getChildNo() << "Type, "
+ << getEnumName(cast<CheckChildTypeMatcher>(N)->getType()) << ",\n";
+ return 2;
+
+ case Matcher::CheckInteger: {
+ OS << "OPC_CheckInteger, ";
+ unsigned Bytes =
+ 1 + EmitSignedVBRValue(cast<CheckIntegerMatcher>(N)->getValue(), OS);
+ OS << '\n';
+ return Bytes;
+ }
+ case Matcher::CheckChildInteger: {
+ OS << "OPC_CheckChild" << cast<CheckChildIntegerMatcher>(N)->getChildNo()
+ << "Integer, ";
+ unsigned Bytes = 1 + EmitSignedVBRValue(
+ cast<CheckChildIntegerMatcher>(N)->getValue(), OS);
+ OS << '\n';
+ return Bytes;
+ }
+ case Matcher::CheckCondCode:
+ OS << "OPC_CheckCondCode, ISD::"
+ << cast<CheckCondCodeMatcher>(N)->getCondCodeName() << ",\n";
+ return 2;
+
+ case Matcher::CheckChild2CondCode:
+ OS << "OPC_CheckChild2CondCode, ISD::"
+ << cast<CheckChild2CondCodeMatcher>(N)->getCondCodeName() << ",\n";
+ return 2;
+
+ case Matcher::CheckValueType:
+ OS << "OPC_CheckValueType, MVT::"
+ << cast<CheckValueTypeMatcher>(N)->getTypeName() << ",\n";
+ return 2;
+
+ case Matcher::CheckComplexPat: {
+ const CheckComplexPatMatcher *CCPM = cast<CheckComplexPatMatcher>(N);
+ const ComplexPattern &Pattern = CCPM->getPattern();
+ OS << "OPC_CheckComplexPat, /*CP*/" << getComplexPat(Pattern) << ", /*#*/"
+ << CCPM->getMatchNumber() << ',';
+
+ if (!OmitComments) {
+ OS << " // " << Pattern.getSelectFunc();
+ OS << ":$" << CCPM->getName();
+ for (unsigned i = 0, e = Pattern.getNumOperands(); i != e; ++i)
+ OS << " #" << CCPM->getFirstResult()+i;
+
+ if (Pattern.hasProperty(SDNPHasChain))
+ OS << " + chain result";
+ }
+ OS << '\n';
+ return 3;
+ }
+
+ case Matcher::CheckAndImm: {
+ OS << "OPC_CheckAndImm, ";
+ unsigned Bytes=1+EmitVBRValue(cast<CheckAndImmMatcher>(N)->getValue(), OS);
+ OS << '\n';
+ return Bytes;
+ }
+
+ case Matcher::CheckOrImm: {
+ OS << "OPC_CheckOrImm, ";
+ unsigned Bytes = 1+EmitVBRValue(cast<CheckOrImmMatcher>(N)->getValue(), OS);
+ OS << '\n';
+ return Bytes;
+ }
+
+ case Matcher::CheckFoldableChainNode:
+ OS << "OPC_CheckFoldableChainNode,\n";
+ return 1;
+
+ case Matcher::CheckImmAllOnesV:
+ OS << "OPC_CheckImmAllOnesV,\n";
+ return 1;
+
+ case Matcher::CheckImmAllZerosV:
+ OS << "OPC_CheckImmAllZerosV,\n";
+ return 1;
+
+ case Matcher::EmitInteger: {
+ int64_t Val = cast<EmitIntegerMatcher>(N)->getValue();
+ OS << "OPC_EmitInteger, "
+ << getEnumName(cast<EmitIntegerMatcher>(N)->getVT()) << ", ";
+ unsigned Bytes = 2 + EmitSignedVBRValue(Val, OS);
+ OS << '\n';
+ return Bytes;
+ }
+ case Matcher::EmitStringInteger: {
+ const std::string &Val = cast<EmitStringIntegerMatcher>(N)->getValue();
+ // These should always fit into 7 bits.
+ OS << "OPC_EmitStringInteger, "
+ << getEnumName(cast<EmitStringIntegerMatcher>(N)->getVT()) << ", " << Val
+ << ",\n";
+ return 3;
+ }
+
+ case Matcher::EmitRegister: {
+ const EmitRegisterMatcher *Matcher = cast<EmitRegisterMatcher>(N);
+ const CodeGenRegister *Reg = Matcher->getReg();
+ // If the enum value of the register is larger than one byte can handle,
+ // use EmitRegister2.
+ if (Reg && Reg->EnumValue > 255) {
+ OS << "OPC_EmitRegister2, " << getEnumName(Matcher->getVT()) << ", ";
+ OS << "TARGET_VAL(" << getQualifiedName(Reg->TheDef) << "),\n";
+ return 4;
+ } else {
+ OS << "OPC_EmitRegister, " << getEnumName(Matcher->getVT()) << ", ";
+ if (Reg) {
+ OS << getQualifiedName(Reg->TheDef) << ",\n";
+ } else {
+ OS << "0 ";
+ if (!OmitComments)
+ OS << "/*zero_reg*/";
+ OS << ",\n";
+ }
+ return 3;
+ }
+ }
+
+ case Matcher::EmitConvertToTarget:
+ OS << "OPC_EmitConvertToTarget, "
+ << cast<EmitConvertToTargetMatcher>(N)->getSlot() << ",\n";
+ return 2;
+
+ case Matcher::EmitMergeInputChains: {
+ const EmitMergeInputChainsMatcher *MN =
+ cast<EmitMergeInputChainsMatcher>(N);
+
+ // Handle the specialized forms OPC_EmitMergeInputChains1_0, 1_1, and 1_2.
+ if (MN->getNumNodes() == 1 && MN->getNode(0) < 3) {
+ OS << "OPC_EmitMergeInputChains1_" << MN->getNode(0) << ",\n";
+ return 1;
+ }
+
+ OS << "OPC_EmitMergeInputChains, " << MN->getNumNodes() << ", ";
+ for (unsigned i = 0, e = MN->getNumNodes(); i != e; ++i)
+ OS << MN->getNode(i) << ", ";
+ OS << '\n';
+ return 2+MN->getNumNodes();
+ }
+ case Matcher::EmitCopyToReg: {
+ const auto *C2RMatcher = cast<EmitCopyToRegMatcher>(N);
+ int Bytes = 3;
+ const CodeGenRegister *Reg = C2RMatcher->getDestPhysReg();
+ if (Reg->EnumValue > 255) {
+ assert(isUInt<16>(Reg->EnumValue) && "not handled");
+ OS << "OPC_EmitCopyToReg2, " << C2RMatcher->getSrcSlot() << ", "
+ << "TARGET_VAL(" << getQualifiedName(Reg->TheDef) << "),\n";
+ ++Bytes;
+ } else {
+ OS << "OPC_EmitCopyToReg, " << C2RMatcher->getSrcSlot() << ", "
+ << getQualifiedName(Reg->TheDef) << ",\n";
+ }
+
+ return Bytes;
+ }
+ case Matcher::EmitNodeXForm: {
+ const EmitNodeXFormMatcher *XF = cast<EmitNodeXFormMatcher>(N);
+ OS << "OPC_EmitNodeXForm, " << getNodeXFormID(XF->getNodeXForm()) << ", "
+ << XF->getSlot() << ',';
+ if (!OmitComments)
+ OS << " // "<<XF->getNodeXForm()->getName();
+ OS <<'\n';
+ return 3;
+ }
+
+ case Matcher::EmitNode:
+ case Matcher::MorphNodeTo: {
+ auto NumCoveredBytes = 0;
+ if (InstrumentCoverage) {
+ if (const MorphNodeToMatcher *SNT = dyn_cast<MorphNodeToMatcher>(N)) {
+ NumCoveredBytes = 3;
+ OS << "OPC_Coverage, ";
+ std::string src =
+ GetPatFromTreePatternNode(SNT->getPattern().getSrcPattern());
+ std::string dst =
+ GetPatFromTreePatternNode(SNT->getPattern().getDstPattern());
+ Record *PatRecord = SNT->getPattern().getSrcRecord();
+ std::string include_src = getIncludePath(PatRecord);
+ unsigned Offset =
+ getPatternIdxFromTable(src + " -> " + dst, std::move(include_src));
+ OS << "TARGET_VAL(" << Offset << "),\n";
+ OS.indent(FullIndexWidth + Indent);
+ }
+ }
+ const EmitNodeMatcherCommon *EN = cast<EmitNodeMatcherCommon>(N);
+ OS << (isa<EmitNodeMatcher>(EN) ? "OPC_EmitNode" : "OPC_MorphNodeTo");
+ bool CompressVTs = EN->getNumVTs() < 3;
+ if (CompressVTs)
+ OS << EN->getNumVTs();
+
+ OS << ", TARGET_VAL(" << EN->getOpcodeName() << "), 0";
+
+ if (EN->hasChain()) OS << "|OPFL_Chain";
+ if (EN->hasInFlag()) OS << "|OPFL_GlueInput";
+ if (EN->hasOutFlag()) OS << "|OPFL_GlueOutput";
+ if (EN->hasMemRefs()) OS << "|OPFL_MemRefs";
+ if (EN->getNumFixedArityOperands() != -1)
+ OS << "|OPFL_Variadic" << EN->getNumFixedArityOperands();
+ OS << ",\n";
+
+ OS.indent(FullIndexWidth + Indent+4);
+ if (!CompressVTs) {
+ OS << EN->getNumVTs();
+ if (!OmitComments)
+ OS << "/*#VTs*/";
+ OS << ", ";
+ }
+ for (unsigned i = 0, e = EN->getNumVTs(); i != e; ++i)
+ OS << getEnumName(EN->getVT(i)) << ", ";
+
+ OS << EN->getNumOperands();
+ if (!OmitComments)
+ OS << "/*#Ops*/";
+ OS << ", ";
+ unsigned NumOperandBytes = 0;
+ for (unsigned i = 0, e = EN->getNumOperands(); i != e; ++i)
+ NumOperandBytes += EmitVBRValue(EN->getOperand(i), OS);
+
+ if (!OmitComments) {
+ // Print the result #'s for EmitNode.
+ if (const EmitNodeMatcher *E = dyn_cast<EmitNodeMatcher>(EN)) {
+ if (unsigned NumResults = EN->getNumVTs()) {
+ OS << " // Results =";
+ unsigned First = E->getFirstResultSlot();
+ for (unsigned i = 0; i != NumResults; ++i)
+ OS << " #" << First+i;
+ }
+ }
+ OS << '\n';
+
+ if (const MorphNodeToMatcher *SNT = dyn_cast<MorphNodeToMatcher>(N)) {
+ OS.indent(FullIndexWidth + Indent) << "// Src: "
+ << *SNT->getPattern().getSrcPattern() << " - Complexity = "
+ << SNT->getPattern().getPatternComplexity(CGP) << '\n';
+ OS.indent(FullIndexWidth + Indent) << "// Dst: "
+ << *SNT->getPattern().getDstPattern() << '\n';
+ }
+ } else
+ OS << '\n';
+
+ return 5 + !CompressVTs + EN->getNumVTs() + NumOperandBytes +
+ NumCoveredBytes;
+ }
+ case Matcher::CompleteMatch: {
+ const CompleteMatchMatcher *CM = cast<CompleteMatchMatcher>(N);
+ auto NumCoveredBytes = 0;
+ if (InstrumentCoverage) {
+ NumCoveredBytes = 3;
+ OS << "OPC_Coverage, ";
+ std::string src =
+ GetPatFromTreePatternNode(CM->getPattern().getSrcPattern());
+ std::string dst =
+ GetPatFromTreePatternNode(CM->getPattern().getDstPattern());
+ Record *PatRecord = CM->getPattern().getSrcRecord();
+ std::string include_src = getIncludePath(PatRecord);
+ unsigned Offset =
+ getPatternIdxFromTable(src + " -> " + dst, std::move(include_src));
+ OS << "TARGET_VAL(" << Offset << "),\n";
+ OS.indent(FullIndexWidth + Indent);
+ }
+ OS << "OPC_CompleteMatch, " << CM->getNumResults() << ", ";
+ unsigned NumResultBytes = 0;
+ for (unsigned i = 0, e = CM->getNumResults(); i != e; ++i)
+ NumResultBytes += EmitVBRValue(CM->getResult(i), OS);
+ OS << '\n';
+ if (!OmitComments) {
+ OS.indent(FullIndexWidth + Indent) << " // Src: "
+ << *CM->getPattern().getSrcPattern() << " - Complexity = "
+ << CM->getPattern().getPatternComplexity(CGP) << '\n';
+ OS.indent(FullIndexWidth + Indent) << " // Dst: "
+ << *CM->getPattern().getDstPattern();
+ }
+ OS << '\n';
+ return 2 + NumResultBytes + NumCoveredBytes;
+ }
+ }
+ llvm_unreachable("Unreachable");
+}
+
+/// This function traverses the matcher tree and emits all the nodes.
+/// The nodes have already been sized.
+unsigned MatcherTableEmitter::
+EmitMatcherList(const Matcher *N, const unsigned Indent, unsigned CurrentIdx,
+ raw_ostream &OS) {
+ unsigned Size = 0;
+ while (N) {
+ if (!OmitComments)
+ OS << "/*" << format_decimal(CurrentIdx, IndexWidth) << "*/";
+ unsigned MatcherSize = EmitMatcher(N, Indent, CurrentIdx, OS);
+ Size += MatcherSize;
+ CurrentIdx += MatcherSize;
+
+ // If there are other nodes in this list, iterate to them, otherwise we're
+ // done.
+ N = N->getNext();
+ }
+ return Size;
+}
+
+void MatcherTableEmitter::EmitNodePredicatesFunction(
+ const std::vector<TreePredicateFn> &Preds, StringRef Decl,
+ raw_ostream &OS) {
+ if (Preds.empty())
+ return;
+
+ BeginEmitFunction(OS, "bool", Decl, true/*AddOverride*/);
+ OS << "{\n";
+ OS << " switch (PredNo) {\n";
+ OS << " default: llvm_unreachable(\"Invalid predicate in table?\");\n";
+ for (unsigned i = 0, e = Preds.size(); i != e; ++i) {
+ // Emit the predicate code corresponding to this pattern.
+ const TreePredicateFn PredFn = Preds[i];
+ assert(!PredFn.isAlwaysTrue() && "No code in this predicate");
+ std::string PredFnCodeStr = PredFn.getCodeToRunOnSDNode();
+
+ OS << " case " << i << ": {\n";
+ for (auto *SimilarPred : NodePredicatesByCodeToRun[PredFnCodeStr])
+ OS << " // " << TreePredicateFn(SimilarPred).getFnName() << '\n';
+ OS << PredFnCodeStr << "\n }\n";
+ }
+ OS << " }\n";
+ OS << "}\n";
+ EndEmitFunction(OS);
+}
+
+void MatcherTableEmitter::EmitPredicateFunctions(raw_ostream &OS) {
+ // Emit pattern predicates.
+ if (!PatternPredicates.empty()) {
+ BeginEmitFunction(OS, "bool",
+ "CheckPatternPredicate(unsigned PredNo) const", true/*AddOverride*/);
+ OS << "{\n";
+ OS << " switch (PredNo) {\n";
+ OS << " default: llvm_unreachable(\"Invalid predicate in table?\");\n";
+ for (unsigned i = 0, e = PatternPredicates.size(); i != e; ++i)
+ OS << " case " << i << ": return " << PatternPredicates[i] << ";\n";
+ OS << " }\n";
+ OS << "}\n";
+ EndEmitFunction(OS);
+ }
+
+ // Emit Node predicates.
+ EmitNodePredicatesFunction(
+ NodePredicates, "CheckNodePredicate(SDNode *Node, unsigned PredNo) const",
+ OS);
+ EmitNodePredicatesFunction(
+ NodePredicatesWithOperands,
+ "CheckNodePredicateWithOperands(SDNode *Node, unsigned PredNo, "
+ "const SmallVectorImpl<SDValue> &Operands) const",
+ OS);
+
+ // Emit CompletePattern matchers.
+ // FIXME: This should be const.
+ if (!ComplexPatterns.empty()) {
+ BeginEmitFunction(OS, "bool",
+ "CheckComplexPattern(SDNode *Root, SDNode *Parent,\n"
+ " SDValue N, unsigned PatternNo,\n"
+ " SmallVectorImpl<std::pair<SDValue, SDNode *>> &Result)",
+ true/*AddOverride*/);
+ OS << "{\n";
+ OS << " unsigned NextRes = Result.size();\n";
+ OS << " switch (PatternNo) {\n";
+ OS << " default: llvm_unreachable(\"Invalid pattern # in table?\");\n";
+ for (unsigned i = 0, e = ComplexPatterns.size(); i != e; ++i) {
+ const ComplexPattern &P = *ComplexPatterns[i];
+ unsigned NumOps = P.getNumOperands();
+
+ if (P.hasProperty(SDNPHasChain))
+ ++NumOps; // Get the chained node too.
+
+ OS << " case " << i << ":\n";
+ if (InstrumentCoverage)
+ OS << " {\n";
+ OS << " Result.resize(NextRes+" << NumOps << ");\n";
+ if (InstrumentCoverage)
+ OS << " bool Succeeded = " << P.getSelectFunc();
+ else
+ OS << " return " << P.getSelectFunc();
+
+ OS << "(";
+ // If the complex pattern wants the root of the match, pass it in as the
+ // first argument.
+ if (P.hasProperty(SDNPWantRoot))
+ OS << "Root, ";
+
+ // If the complex pattern wants the parent of the operand being matched,
+ // pass it in as the next argument.
+ if (P.hasProperty(SDNPWantParent))
+ OS << "Parent, ";
+
+ OS << "N";
+ for (unsigned i = 0; i != NumOps; ++i)
+ OS << ", Result[NextRes+" << i << "].first";
+ OS << ");\n";
+ if (InstrumentCoverage) {
+ OS << " if (Succeeded)\n";
+ OS << " dbgs() << \"\\nCOMPLEX_PATTERN: " << P.getSelectFunc()
+ << "\\n\" ;\n";
+ OS << " return Succeeded;\n";
+ OS << " }\n";
+ }
+ }
+ OS << " }\n";
+ OS << "}\n";
+ EndEmitFunction(OS);
+ }
+
+
+ // Emit SDNodeXForm handlers.
+ // FIXME: This should be const.
+ if (!NodeXForms.empty()) {
+ BeginEmitFunction(OS, "SDValue",
+ "RunSDNodeXForm(SDValue V, unsigned XFormNo)", true/*AddOverride*/);
+ OS << "{\n";
+ OS << " switch (XFormNo) {\n";
+ OS << " default: llvm_unreachable(\"Invalid xform # in table?\");\n";
+
+ // FIXME: The node xform could take SDValue's instead of SDNode*'s.
+ for (unsigned i = 0, e = NodeXForms.size(); i != e; ++i) {
+ const CodeGenDAGPatterns::NodeXForm &Entry =
+ CGP.getSDNodeTransform(NodeXForms[i]);
+
+ Record *SDNode = Entry.first;
+ const std::string &Code = Entry.second;
+
+ OS << " case " << i << ": { ";
+ if (!OmitComments)
+ OS << "// " << NodeXForms[i]->getName();
+ OS << '\n';
+
+ std::string ClassName =
+ std::string(CGP.getSDNodeInfo(SDNode).getSDClassName());
+ if (ClassName == "SDNode")
+ OS << " SDNode *N = V.getNode();\n";
+ else
+ OS << " " << ClassName << " *N = cast<" << ClassName
+ << ">(V.getNode());\n";
+ OS << Code << "\n }\n";
+ }
+ OS << " }\n";
+ OS << "}\n";
+ EndEmitFunction(OS);
+ }
+}
+
+static StringRef getOpcodeString(Matcher::KindTy Kind) {
+ switch (Kind) {
+ case Matcher::Scope: return "OPC_Scope"; break;
+ case Matcher::RecordNode: return "OPC_RecordNode"; break;
+ case Matcher::RecordChild: return "OPC_RecordChild"; break;
+ case Matcher::RecordMemRef: return "OPC_RecordMemRef"; break;
+ case Matcher::CaptureGlueInput: return "OPC_CaptureGlueInput"; break;
+ case Matcher::MoveChild: return "OPC_MoveChild"; break;
+ case Matcher::MoveParent: return "OPC_MoveParent"; break;
+ case Matcher::CheckSame: return "OPC_CheckSame"; break;
+ case Matcher::CheckChildSame: return "OPC_CheckChildSame"; break;
+ case Matcher::CheckPatternPredicate:
+ return "OPC_CheckPatternPredicate"; break;
+ case Matcher::CheckPredicate: return "OPC_CheckPredicate"; break;
+ case Matcher::CheckOpcode: return "OPC_CheckOpcode"; break;
+ case Matcher::SwitchOpcode: return "OPC_SwitchOpcode"; break;
+ case Matcher::CheckType: return "OPC_CheckType"; break;
+ case Matcher::SwitchType: return "OPC_SwitchType"; break;
+ case Matcher::CheckChildType: return "OPC_CheckChildType"; break;
+ case Matcher::CheckInteger: return "OPC_CheckInteger"; break;
+ case Matcher::CheckChildInteger: return "OPC_CheckChildInteger"; break;
+ case Matcher::CheckCondCode: return "OPC_CheckCondCode"; break;
+ case Matcher::CheckChild2CondCode: return "OPC_CheckChild2CondCode"; break;
+ case Matcher::CheckValueType: return "OPC_CheckValueType"; break;
+ case Matcher::CheckComplexPat: return "OPC_CheckComplexPat"; break;
+ case Matcher::CheckAndImm: return "OPC_CheckAndImm"; break;
+ case Matcher::CheckOrImm: return "OPC_CheckOrImm"; break;
+ case Matcher::CheckFoldableChainNode:
+ return "OPC_CheckFoldableChainNode"; break;
+ case Matcher::CheckImmAllOnesV: return "OPC_CheckImmAllOnesV"; break;
+ case Matcher::CheckImmAllZerosV: return "OPC_CheckImmAllZerosV"; break;
+ case Matcher::EmitInteger: return "OPC_EmitInteger"; break;
+ case Matcher::EmitStringInteger: return "OPC_EmitStringInteger"; break;
+ case Matcher::EmitRegister: return "OPC_EmitRegister"; break;
+ case Matcher::EmitConvertToTarget: return "OPC_EmitConvertToTarget"; break;
+ case Matcher::EmitMergeInputChains: return "OPC_EmitMergeInputChains"; break;
+ case Matcher::EmitCopyToReg: return "OPC_EmitCopyToReg"; break;
+ case Matcher::EmitNode: return "OPC_EmitNode"; break;
+ case Matcher::MorphNodeTo: return "OPC_MorphNodeTo"; break;
+ case Matcher::EmitNodeXForm: return "OPC_EmitNodeXForm"; break;
+ case Matcher::CompleteMatch: return "OPC_CompleteMatch"; break;
+ }
+
+ llvm_unreachable("Unhandled opcode?");
+}
+
+void MatcherTableEmitter::EmitHistogram(const Matcher *M,
+ raw_ostream &OS) {
+ if (OmitComments)
+ return;
+
+ OS << " // Opcode Histogram:\n";
+ for (unsigned i = 0, e = OpcodeCounts.size(); i != e; ++i) {
+ OS << " // #"
+ << left_justify(getOpcodeString((Matcher::KindTy)i), HistOpcWidth)
+ << " = " << OpcodeCounts[i] << '\n';
+ }
+ OS << '\n';
+}
+
+
+void llvm::EmitMatcherTable(Matcher *TheMatcher,
+ const CodeGenDAGPatterns &CGP,
+ raw_ostream &OS) {
+ OS << "#if defined(GET_DAGISEL_DECL) && defined(GET_DAGISEL_BODY)\n";
+ OS << "#error GET_DAGISEL_DECL and GET_DAGISEL_BODY cannot be both defined, ";
+ OS << "undef both for inline definitions\n";
+ OS << "#endif\n\n";
+
+ // Emit a check for omitted class name.
+ OS << "#ifdef GET_DAGISEL_BODY\n";
+ OS << "#define LOCAL_DAGISEL_STRINGIZE(X) LOCAL_DAGISEL_STRINGIZE_(X)\n";
+ OS << "#define LOCAL_DAGISEL_STRINGIZE_(X) #X\n";
+ OS << "static_assert(sizeof(LOCAL_DAGISEL_STRINGIZE(GET_DAGISEL_BODY)) > 1,"
+ "\n";
+ OS << " \"GET_DAGISEL_BODY is empty: it should be defined with the class "
+ "name\");\n";
+ OS << "#undef LOCAL_DAGISEL_STRINGIZE_\n";
+ OS << "#undef LOCAL_DAGISEL_STRINGIZE\n";
+ OS << "#endif\n\n";
+
+ OS << "#if !defined(GET_DAGISEL_DECL) && !defined(GET_DAGISEL_BODY)\n";
+ OS << "#define DAGISEL_INLINE 1\n";
+ OS << "#else\n";
+ OS << "#define DAGISEL_INLINE 0\n";
+ OS << "#endif\n\n";
+
+ OS << "#if !DAGISEL_INLINE\n";
+ OS << "#define DAGISEL_CLASS_COLONCOLON GET_DAGISEL_BODY ::\n";
+ OS << "#else\n";
+ OS << "#define DAGISEL_CLASS_COLONCOLON\n";
+ OS << "#endif\n\n";
+
+ BeginEmitFunction(OS, "void", "SelectCode(SDNode *N)", false/*AddOverride*/);
+ MatcherTableEmitter MatcherEmitter(CGP);
+
+ // First we size all the children of the three kinds of matchers that have
+ // them. This is done by sharing the code in EmitMatcher(). but we don't
+ // want to emit anything, so we turn off comments and use a null stream.
+ bool SaveOmitComments = OmitComments;
+ OmitComments = true;
+ raw_null_ostream NullOS;
+ unsigned TotalSize = MatcherEmitter.SizeMatcherList(TheMatcher, NullOS);
+ OmitComments = SaveOmitComments;
+
+ // Now that the matchers are sized, we can emit the code for them to the
+ // final stream.
+ OS << "{\n";
+ OS << " // Some target values are emitted as 2 bytes, TARGET_VAL handles\n";
+ OS << " // this.\n";
+ OS << " #define TARGET_VAL(X) X & 255, unsigned(X) >> 8\n";
+ OS << " static const unsigned char MatcherTable[] = {\n";
+ TotalSize = MatcherEmitter.EmitMatcherList(TheMatcher, 1, 0, OS);
+ OS << " 0\n }; // Total Array size is " << (TotalSize+1) << " bytes\n\n";
+
+ MatcherEmitter.EmitHistogram(TheMatcher, OS);
+
+ OS << " #undef TARGET_VAL\n";
+ OS << " SelectCodeCommon(N, MatcherTable,sizeof(MatcherTable));\n";
+ OS << "}\n";
+ EndEmitFunction(OS);
+
+ // Next up, emit the function for node and pattern predicates:
+ MatcherEmitter.EmitPredicateFunctions(OS);
+
+ if (InstrumentCoverage)
+ MatcherEmitter.EmitPatternMatchTable(OS);
+
+ // Clean up the preprocessor macros.
+ OS << "\n";
+ OS << "#ifdef DAGISEL_INLINE\n";
+ OS << "#undef DAGISEL_INLINE\n";
+ OS << "#endif\n";
+ OS << "#ifdef DAGISEL_CLASS_COLONCOLON\n";
+ OS << "#undef DAGISEL_CLASS_COLONCOLON\n";
+ OS << "#endif\n";
+ OS << "#ifdef GET_DAGISEL_DECL\n";
+ OS << "#undef GET_DAGISEL_DECL\n";
+ OS << "#endif\n";
+ OS << "#ifdef GET_DAGISEL_BODY\n";
+ OS << "#undef GET_DAGISEL_BODY\n";
+ OS << "#endif\n";
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/DAGISelMatcherGen.cpp b/contrib/libs/llvm16/utils/TableGen/DAGISelMatcherGen.cpp
new file mode 100644
index 0000000000..44bff4c67a
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/DAGISelMatcherGen.cpp
@@ -0,0 +1,1111 @@
+//===- DAGISelMatcherGen.cpp - Matcher generator --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenDAGPatterns.h"
+#include "CodeGenInstruction.h"
+#include "CodeGenRegisters.h"
+#include "DAGISelMatcher.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include <utility>
+using namespace llvm;
+
+
+/// getRegisterValueType - Look up and return the ValueType of the specified
+/// register. If the register is a member of multiple register classes which
+/// have different associated types, return MVT::Other.
+static MVT::SimpleValueType getRegisterValueType(Record *R,
+ const CodeGenTarget &T) {
+ bool FoundRC = false;
+ MVT::SimpleValueType VT = MVT::Other;
+ const CodeGenRegister *Reg = T.getRegBank().getReg(R);
+
+ for (const auto &RC : T.getRegBank().getRegClasses()) {
+ if (!RC.contains(Reg))
+ continue;
+
+ if (!FoundRC) {
+ FoundRC = true;
+ const ValueTypeByHwMode &VVT = RC.getValueTypeNum(0);
+ if (VVT.isSimple())
+ VT = VVT.getSimple().SimpleTy;
+ continue;
+ }
+
+#ifndef NDEBUG
+ // If this occurs in multiple register classes, they all have to agree.
+ const ValueTypeByHwMode &T = RC.getValueTypeNum(0);
+ assert((!T.isSimple() || T.getSimple().SimpleTy == VT) &&
+ "ValueType mismatch between register classes for this register");
+#endif
+ }
+ return VT;
+}
+
+
+namespace {
+ class MatcherGen {
+ const PatternToMatch &Pattern;
+ const CodeGenDAGPatterns &CGP;
+
+ /// PatWithNoTypes - This is a clone of Pattern.getSrcPattern() that starts
+ /// out with all of the types removed. This allows us to insert type checks
+ /// as we scan the tree.
+ TreePatternNodePtr PatWithNoTypes;
+
+ /// VariableMap - A map from variable names ('$dst') to the recorded operand
+ /// number that they were captured as. These are biased by 1 to make
+ /// insertion easier.
+ StringMap<unsigned> VariableMap;
+
+ /// This maintains the recorded operand number that OPC_CheckComplexPattern
+ /// drops each sub-operand into. We don't want to insert these into
+ /// VariableMap because that leads to identity checking if they are
+ /// encountered multiple times. Biased by 1 like VariableMap for
+ /// consistency.
+ StringMap<unsigned> NamedComplexPatternOperands;
+
+ /// NextRecordedOperandNo - As we emit opcodes to record matched values in
+ /// the RecordedNodes array, this keeps track of which slot will be next to
+ /// record into.
+ unsigned NextRecordedOperandNo;
+
+ /// MatchedChainNodes - This maintains the position in the recorded nodes
+ /// array of all of the recorded input nodes that have chains.
+ SmallVector<unsigned, 2> MatchedChainNodes;
+
+ /// MatchedComplexPatterns - This maintains a list of all of the
+ /// ComplexPatterns that we need to check. The second element of each pair
+ /// is the recorded operand number of the input node.
+ SmallVector<std::pair<const TreePatternNode*,
+ unsigned>, 2> MatchedComplexPatterns;
+
+ /// PhysRegInputs - List list has an entry for each explicitly specified
+ /// physreg input to the pattern. The first elt is the Register node, the
+ /// second is the recorded slot number the input pattern match saved it in.
+ SmallVector<std::pair<Record*, unsigned>, 2> PhysRegInputs;
+
+ /// Matcher - This is the top level of the generated matcher, the result.
+ Matcher *TheMatcher;
+
+ /// CurPredicate - As we emit matcher nodes, this points to the latest check
+ /// which should have future checks stuck into its Next position.
+ Matcher *CurPredicate;
+ public:
+ MatcherGen(const PatternToMatch &pattern, const CodeGenDAGPatterns &cgp);
+
+ bool EmitMatcherCode(unsigned Variant);
+ void EmitResultCode();
+
+ Matcher *GetMatcher() const { return TheMatcher; }
+ private:
+ void AddMatcher(Matcher *NewNode);
+ void InferPossibleTypes(unsigned ForceMode);
+
+ // Matcher Generation.
+ void EmitMatchCode(const TreePatternNode *N, TreePatternNode *NodeNoTypes,
+ unsigned ForceMode);
+ void EmitLeafMatchCode(const TreePatternNode *N);
+ void EmitOperatorMatchCode(const TreePatternNode *N,
+ TreePatternNode *NodeNoTypes,
+ unsigned ForceMode);
+
+ /// If this is the first time a node with unique identifier Name has been
+ /// seen, record it. Otherwise, emit a check to make sure this is the same
+ /// node. Returns true if this is the first encounter.
+ bool recordUniqueNode(ArrayRef<std::string> Names);
+
+ // Result Code Generation.
+ unsigned getNamedArgumentSlot(StringRef Name) {
+ unsigned VarMapEntry = VariableMap[Name];
+ assert(VarMapEntry != 0 &&
+ "Variable referenced but not defined and not caught earlier!");
+ return VarMapEntry-1;
+ }
+
+ void EmitResultOperand(const TreePatternNode *N,
+ SmallVectorImpl<unsigned> &ResultOps);
+ void EmitResultOfNamedOperand(const TreePatternNode *N,
+ SmallVectorImpl<unsigned> &ResultOps);
+ void EmitResultLeafAsOperand(const TreePatternNode *N,
+ SmallVectorImpl<unsigned> &ResultOps);
+ void EmitResultInstructionAsOperand(const TreePatternNode *N,
+ SmallVectorImpl<unsigned> &ResultOps);
+ void EmitResultSDNodeXFormAsOperand(const TreePatternNode *N,
+ SmallVectorImpl<unsigned> &ResultOps);
+ };
+
+} // end anonymous namespace
+
+MatcherGen::MatcherGen(const PatternToMatch &pattern,
+ const CodeGenDAGPatterns &cgp)
+: Pattern(pattern), CGP(cgp), NextRecordedOperandNo(0),
+ TheMatcher(nullptr), CurPredicate(nullptr) {
+ // We need to produce the matcher tree for the patterns source pattern. To do
+ // this we need to match the structure as well as the types. To do the type
+ // matching, we want to figure out the fewest number of type checks we need to
+ // emit. For example, if there is only one integer type supported by a
+ // target, there should be no type comparisons at all for integer patterns!
+ //
+ // To figure out the fewest number of type checks needed, clone the pattern,
+ // remove the types, then perform type inference on the pattern as a whole.
+ // If there are unresolved types, emit an explicit check for those types,
+ // apply the type to the tree, then rerun type inference. Iterate until all
+ // types are resolved.
+ //
+ PatWithNoTypes = Pattern.getSrcPattern()->clone();
+ PatWithNoTypes->RemoveAllTypes();
+
+ // If there are types that are manifestly known, infer them.
+ InferPossibleTypes(Pattern.getForceMode());
+}
+
+/// InferPossibleTypes - As we emit the pattern, we end up generating type
+/// checks and applying them to the 'PatWithNoTypes' tree. As we do this, we
+/// want to propagate implied types as far throughout the tree as possible so
+/// that we avoid doing redundant type checks. This does the type propagation.
+void MatcherGen::InferPossibleTypes(unsigned ForceMode) {
+ // TP - Get *SOME* tree pattern, we don't care which. It is only used for
+ // diagnostics, which we know are impossible at this point.
+ TreePattern &TP = *CGP.pf_begin()->second;
+ TP.getInfer().CodeGen = true;
+ TP.getInfer().ForceMode = ForceMode;
+
+ bool MadeChange = true;
+ while (MadeChange)
+ MadeChange = PatWithNoTypes->ApplyTypeConstraints(TP,
+ true/*Ignore reg constraints*/);
+}
+
+
+/// AddMatcher - Add a matcher node to the current graph we're building.
+void MatcherGen::AddMatcher(Matcher *NewNode) {
+ if (CurPredicate)
+ CurPredicate->setNext(NewNode);
+ else
+ TheMatcher = NewNode;
+ CurPredicate = NewNode;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Pattern Match Generation
+//===----------------------------------------------------------------------===//
+
+/// EmitLeafMatchCode - Generate matching code for leaf nodes.
+void MatcherGen::EmitLeafMatchCode(const TreePatternNode *N) {
+ assert(N->isLeaf() && "Not a leaf?");
+
+ // Direct match against an integer constant.
+ if (IntInit *II = dyn_cast<IntInit>(N->getLeafValue())) {
+ // If this is the root of the dag we're matching, we emit a redundant opcode
+ // check to ensure that this gets folded into the normal top-level
+ // OpcodeSwitch.
+ if (N == Pattern.getSrcPattern()) {
+ const SDNodeInfo &NI = CGP.getSDNodeInfo(CGP.getSDNodeNamed("imm"));
+ AddMatcher(new CheckOpcodeMatcher(NI));
+ }
+
+ return AddMatcher(new CheckIntegerMatcher(II->getValue()));
+ }
+
+ // An UnsetInit represents a named node without any constraints.
+ if (isa<UnsetInit>(N->getLeafValue())) {
+ assert(N->hasName() && "Unnamed ? leaf");
+ return;
+ }
+
+ DefInit *DI = dyn_cast<DefInit>(N->getLeafValue());
+ if (!DI) {
+ errs() << "Unknown leaf kind: " << *N << "\n";
+ abort();
+ }
+
+ Record *LeafRec = DI->getDef();
+
+ // A ValueType leaf node can represent a register when named, or itself when
+ // unnamed.
+ if (LeafRec->isSubClassOf("ValueType")) {
+ // A named ValueType leaf always matches: (add i32:$a, i32:$b).
+ if (N->hasName())
+ return;
+ // An unnamed ValueType as in (sext_inreg GPR:$foo, i8).
+ return AddMatcher(new CheckValueTypeMatcher(LeafRec->getName()));
+ }
+
+ if (// Handle register references. Nothing to do here, they always match.
+ LeafRec->isSubClassOf("RegisterClass") ||
+ LeafRec->isSubClassOf("RegisterOperand") ||
+ LeafRec->isSubClassOf("PointerLikeRegClass") ||
+ LeafRec->isSubClassOf("SubRegIndex") ||
+ // Place holder for SRCVALUE nodes. Nothing to do here.
+ LeafRec->getName() == "srcvalue")
+ return;
+
+ // If we have a physreg reference like (mul gpr:$src, EAX) then we need to
+ // record the register
+ if (LeafRec->isSubClassOf("Register")) {
+ AddMatcher(new RecordMatcher("physreg input "+LeafRec->getName().str(),
+ NextRecordedOperandNo));
+ PhysRegInputs.push_back(std::make_pair(LeafRec, NextRecordedOperandNo++));
+ return;
+ }
+
+ if (LeafRec->isSubClassOf("CondCode"))
+ return AddMatcher(new CheckCondCodeMatcher(LeafRec->getName()));
+
+ if (LeafRec->isSubClassOf("ComplexPattern")) {
+ // We can't model ComplexPattern uses that don't have their name taken yet.
+ // The OPC_CheckComplexPattern operation implicitly records the results.
+ if (N->getName().empty()) {
+ std::string S;
+ raw_string_ostream OS(S);
+ OS << "We expect complex pattern uses to have names: " << *N;
+ PrintFatalError(S);
+ }
+
+ // Remember this ComplexPattern so that we can emit it after all the other
+ // structural matches are done.
+ unsigned InputOperand = VariableMap[N->getName()] - 1;
+ MatchedComplexPatterns.push_back(std::make_pair(N, InputOperand));
+ return;
+ }
+
+ if (LeafRec->getName() == "immAllOnesV") {
+ // If this is the root of the dag we're matching, we emit a redundant opcode
+ // check to ensure that this gets folded into the normal top-level
+ // OpcodeSwitch.
+ if (N == Pattern.getSrcPattern()) {
+ MVT VT = N->getSimpleType(0);
+ StringRef Name = VT.isScalableVector() ? "splat_vector" : "build_vector";
+ const SDNodeInfo &NI = CGP.getSDNodeInfo(CGP.getSDNodeNamed(Name));
+ AddMatcher(new CheckOpcodeMatcher(NI));
+ }
+ return AddMatcher(new CheckImmAllOnesVMatcher());
+ }
+ if (LeafRec->getName() == "immAllZerosV") {
+ // If this is the root of the dag we're matching, we emit a redundant opcode
+ // check to ensure that this gets folded into the normal top-level
+ // OpcodeSwitch.
+ if (N == Pattern.getSrcPattern()) {
+ MVT VT = N->getSimpleType(0);
+ StringRef Name = VT.isScalableVector() ? "splat_vector" : "build_vector";
+ const SDNodeInfo &NI = CGP.getSDNodeInfo(CGP.getSDNodeNamed(Name));
+ AddMatcher(new CheckOpcodeMatcher(NI));
+ }
+ return AddMatcher(new CheckImmAllZerosVMatcher());
+ }
+
+ errs() << "Unknown leaf kind: " << *N << "\n";
+ abort();
+}
+
+void MatcherGen::EmitOperatorMatchCode(const TreePatternNode *N,
+ TreePatternNode *NodeNoTypes,
+ unsigned ForceMode) {
+ assert(!N->isLeaf() && "Not an operator?");
+
+ if (N->getOperator()->isSubClassOf("ComplexPattern")) {
+ // The "name" of a non-leaf complex pattern (MY_PAT $op1, $op2) is
+ // "MY_PAT:op1:op2". We should already have validated that the uses are
+ // consistent.
+ std::string PatternName = std::string(N->getOperator()->getName());
+ for (unsigned i = 0; i < N->getNumChildren(); ++i) {
+ PatternName += ":";
+ PatternName += N->getChild(i)->getName();
+ }
+
+ if (recordUniqueNode(PatternName)) {
+ auto NodeAndOpNum = std::make_pair(N, NextRecordedOperandNo - 1);
+ MatchedComplexPatterns.push_back(NodeAndOpNum);
+ }
+
+ return;
+ }
+
+ const SDNodeInfo &CInfo = CGP.getSDNodeInfo(N->getOperator());
+
+ // If this is an 'and R, 1234' where the operation is AND/OR and the RHS is
+ // a constant without a predicate fn that has more than one bit set, handle
+ // this as a special case. This is usually for targets that have special
+ // handling of certain large constants (e.g. alpha with it's 8/16/32-bit
+ // handling stuff). Using these instructions is often far more efficient
+ // than materializing the constant. Unfortunately, both the instcombiner
+ // and the dag combiner can often infer that bits are dead, and thus drop
+ // them from the mask in the dag. For example, it might turn 'AND X, 255'
+ // into 'AND X, 254' if it knows the low bit is set. Emit code that checks
+ // to handle this.
+ if ((N->getOperator()->getName() == "and" ||
+ N->getOperator()->getName() == "or") &&
+ N->getChild(1)->isLeaf() && N->getChild(1)->getPredicateCalls().empty() &&
+ N->getPredicateCalls().empty()) {
+ if (IntInit *II = dyn_cast<IntInit>(N->getChild(1)->getLeafValue())) {
+ if (!isPowerOf2_32(II->getValue())) { // Don't bother with single bits.
+ // If this is at the root of the pattern, we emit a redundant
+ // CheckOpcode so that the following checks get factored properly under
+ // a single opcode check.
+ if (N == Pattern.getSrcPattern())
+ AddMatcher(new CheckOpcodeMatcher(CInfo));
+
+ // Emit the CheckAndImm/CheckOrImm node.
+ if (N->getOperator()->getName() == "and")
+ AddMatcher(new CheckAndImmMatcher(II->getValue()));
+ else
+ AddMatcher(new CheckOrImmMatcher(II->getValue()));
+
+ // Match the LHS of the AND as appropriate.
+ AddMatcher(new MoveChildMatcher(0));
+ EmitMatchCode(N->getChild(0), NodeNoTypes->getChild(0), ForceMode);
+ AddMatcher(new MoveParentMatcher());
+ return;
+ }
+ }
+ }
+
+ // Check that the current opcode lines up.
+ AddMatcher(new CheckOpcodeMatcher(CInfo));
+
+ // If this node has memory references (i.e. is a load or store), tell the
+ // interpreter to capture them in the memref array.
+ if (N->NodeHasProperty(SDNPMemOperand, CGP))
+ AddMatcher(new RecordMemRefMatcher());
+
+ // If this node has a chain, then the chain is operand #0 is the SDNode, and
+ // the child numbers of the node are all offset by one.
+ unsigned OpNo = 0;
+ if (N->NodeHasProperty(SDNPHasChain, CGP)) {
+ // Record the node and remember it in our chained nodes list.
+ AddMatcher(new RecordMatcher("'" + N->getOperator()->getName().str() +
+ "' chained node",
+ NextRecordedOperandNo));
+ // Remember all of the input chains our pattern will match.
+ MatchedChainNodes.push_back(NextRecordedOperandNo++);
+
+ // Don't look at the input chain when matching the tree pattern to the
+ // SDNode.
+ OpNo = 1;
+
+ // If this node is not the root and the subtree underneath it produces a
+ // chain, then the result of matching the node is also produce a chain.
+ // Beyond that, this means that we're also folding (at least) the root node
+ // into the node that produce the chain (for example, matching
+ // "(add reg, (load ptr))" as a add_with_memory on X86). This is
+ // problematic, if the 'reg' node also uses the load (say, its chain).
+ // Graphically:
+ //
+ // [LD]
+ // ^ ^
+ // | \ DAG's like cheese.
+ // / |
+ // / [YY]
+ // | ^
+ // [XX]--/
+ //
+ // It would be invalid to fold XX and LD. In this case, folding the two
+ // nodes together would induce a cycle in the DAG, making it a 'cyclic DAG'
+ // To prevent this, we emit a dynamic check for legality before allowing
+ // this to be folded.
+ //
+ const TreePatternNode *Root = Pattern.getSrcPattern();
+ if (N != Root) { // Not the root of the pattern.
+ // If there is a node between the root and this node, then we definitely
+ // need to emit the check.
+ bool NeedCheck = !Root->hasChild(N);
+
+ // If it *is* an immediate child of the root, we can still need a check if
+ // the root SDNode has multiple inputs. For us, this means that it is an
+ // intrinsic, has multiple operands, or has other inputs like chain or
+ // glue).
+ if (!NeedCheck) {
+ const SDNodeInfo &PInfo = CGP.getSDNodeInfo(Root->getOperator());
+ NeedCheck =
+ Root->getOperator() == CGP.get_intrinsic_void_sdnode() ||
+ Root->getOperator() == CGP.get_intrinsic_w_chain_sdnode() ||
+ Root->getOperator() == CGP.get_intrinsic_wo_chain_sdnode() ||
+ PInfo.getNumOperands() > 1 ||
+ PInfo.hasProperty(SDNPHasChain) ||
+ PInfo.hasProperty(SDNPInGlue) ||
+ PInfo.hasProperty(SDNPOptInGlue);
+ }
+
+ if (NeedCheck)
+ AddMatcher(new CheckFoldableChainNodeMatcher());
+ }
+ }
+
+ // If this node has an output glue and isn't the root, remember it.
+ if (N->NodeHasProperty(SDNPOutGlue, CGP) &&
+ N != Pattern.getSrcPattern()) {
+ // TODO: This redundantly records nodes with both glues and chains.
+
+ // Record the node and remember it in our chained nodes list.
+ AddMatcher(new RecordMatcher("'" + N->getOperator()->getName().str() +
+ "' glue output node",
+ NextRecordedOperandNo));
+ }
+
+ // If this node is known to have an input glue or if it *might* have an input
+ // glue, capture it as the glue input of the pattern.
+ if (N->NodeHasProperty(SDNPOptInGlue, CGP) ||
+ N->NodeHasProperty(SDNPInGlue, CGP))
+ AddMatcher(new CaptureGlueInputMatcher());
+
+ for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i, ++OpNo) {
+ // Get the code suitable for matching this child. Move to the child, check
+ // it then move back to the parent.
+ AddMatcher(new MoveChildMatcher(OpNo));
+ EmitMatchCode(N->getChild(i), NodeNoTypes->getChild(i), ForceMode);
+ AddMatcher(new MoveParentMatcher());
+ }
+}
+
+bool MatcherGen::recordUniqueNode(ArrayRef<std::string> Names) {
+ unsigned Entry = 0;
+ for (const std::string &Name : Names) {
+ unsigned &VarMapEntry = VariableMap[Name];
+ if (!Entry)
+ Entry = VarMapEntry;
+ assert(Entry == VarMapEntry);
+ }
+
+ bool NewRecord = false;
+ if (Entry == 0) {
+ // If it is a named node, we must emit a 'Record' opcode.
+ std::string WhatFor;
+ for (const std::string &Name : Names) {
+ if (!WhatFor.empty())
+ WhatFor += ',';
+ WhatFor += "$" + Name;
+ }
+ AddMatcher(new RecordMatcher(WhatFor, NextRecordedOperandNo));
+ Entry = ++NextRecordedOperandNo;
+ NewRecord = true;
+ } else {
+ // If we get here, this is a second reference to a specific name. Since
+ // we already have checked that the first reference is valid, we don't
+ // have to recursively match it, just check that it's the same as the
+ // previously named thing.
+ AddMatcher(new CheckSameMatcher(Entry-1));
+ }
+
+ for (const std::string &Name : Names)
+ VariableMap[Name] = Entry;
+
+ return NewRecord;
+}
+
+void MatcherGen::EmitMatchCode(const TreePatternNode *N,
+ TreePatternNode *NodeNoTypes,
+ unsigned ForceMode) {
+ // If N and NodeNoTypes don't agree on a type, then this is a case where we
+ // need to do a type check. Emit the check, apply the type to NodeNoTypes and
+ // reinfer any correlated types.
+ SmallVector<unsigned, 2> ResultsToTypeCheck;
+
+ for (unsigned i = 0, e = NodeNoTypes->getNumTypes(); i != e; ++i) {
+ if (NodeNoTypes->getExtType(i) == N->getExtType(i)) continue;
+ NodeNoTypes->setType(i, N->getExtType(i));
+ InferPossibleTypes(ForceMode);
+ ResultsToTypeCheck.push_back(i);
+ }
+
+ // If this node has a name associated with it, capture it in VariableMap. If
+ // we already saw this in the pattern, emit code to verify dagness.
+ SmallVector<std::string, 4> Names;
+ if (!N->getName().empty())
+ Names.push_back(N->getName());
+
+ for (const ScopedName &Name : N->getNamesAsPredicateArg()) {
+ Names.push_back(("pred:" + Twine(Name.getScope()) + ":" + Name.getIdentifier()).str());
+ }
+
+ if (!Names.empty()) {
+ if (!recordUniqueNode(Names))
+ return;
+ }
+
+ if (N->isLeaf())
+ EmitLeafMatchCode(N);
+ else
+ EmitOperatorMatchCode(N, NodeNoTypes, ForceMode);
+
+ // If there are node predicates for this node, generate their checks.
+ for (unsigned i = 0, e = N->getPredicateCalls().size(); i != e; ++i) {
+ const TreePredicateCall &Pred = N->getPredicateCalls()[i];
+ SmallVector<unsigned, 4> Operands;
+ if (Pred.Fn.usesOperands()) {
+ TreePattern *TP = Pred.Fn.getOrigPatFragRecord();
+ for (unsigned i = 0; i < TP->getNumArgs(); ++i) {
+ std::string Name =
+ ("pred:" + Twine(Pred.Scope) + ":" + TP->getArgName(i)).str();
+ Operands.push_back(getNamedArgumentSlot(Name));
+ }
+ }
+ AddMatcher(new CheckPredicateMatcher(Pred.Fn, Operands));
+ }
+
+ for (unsigned i = 0, e = ResultsToTypeCheck.size(); i != e; ++i)
+ AddMatcher(new CheckTypeMatcher(N->getSimpleType(ResultsToTypeCheck[i]),
+ ResultsToTypeCheck[i]));
+}
+
+/// EmitMatcherCode - Generate the code that matches the predicate of this
+/// pattern for the specified Variant. If the variant is invalid this returns
+/// true and does not generate code, if it is valid, it returns false.
+bool MatcherGen::EmitMatcherCode(unsigned Variant) {
+ // If the root of the pattern is a ComplexPattern and if it is specified to
+ // match some number of root opcodes, these are considered to be our variants.
+ // Depending on which variant we're generating code for, emit the root opcode
+ // check.
+ if (const ComplexPattern *CP =
+ Pattern.getSrcPattern()->getComplexPatternInfo(CGP)) {
+ const std::vector<Record*> &OpNodes = CP->getRootNodes();
+ assert(!OpNodes.empty() &&"Complex Pattern must specify what it can match");
+ if (Variant >= OpNodes.size()) return true;
+
+ AddMatcher(new CheckOpcodeMatcher(CGP.getSDNodeInfo(OpNodes[Variant])));
+ } else {
+ if (Variant != 0) return true;
+ }
+
+ // Emit the matcher for the pattern structure and types.
+ EmitMatchCode(Pattern.getSrcPattern(), PatWithNoTypes.get(),
+ Pattern.getForceMode());
+
+ // If the pattern has a predicate on it (e.g. only enabled when a subtarget
+ // feature is around, do the check).
+ if (!Pattern.getPredicateCheck().empty())
+ AddMatcher(new CheckPatternPredicateMatcher(Pattern.getPredicateCheck()));
+
+ // Now that we've completed the structural type match, emit any ComplexPattern
+ // checks (e.g. addrmode matches). We emit this after the structural match
+ // because they are generally more expensive to evaluate and more difficult to
+ // factor.
+ for (unsigned i = 0, e = MatchedComplexPatterns.size(); i != e; ++i) {
+ auto N = MatchedComplexPatterns[i].first;
+
+ // Remember where the results of this match get stuck.
+ if (N->isLeaf()) {
+ NamedComplexPatternOperands[N->getName()] = NextRecordedOperandNo + 1;
+ } else {
+ unsigned CurOp = NextRecordedOperandNo;
+ for (unsigned i = 0; i < N->getNumChildren(); ++i) {
+ NamedComplexPatternOperands[N->getChild(i)->getName()] = CurOp + 1;
+ CurOp += N->getChild(i)->getNumMIResults(CGP);
+ }
+ }
+
+ // Get the slot we recorded the value in from the name on the node.
+ unsigned RecNodeEntry = MatchedComplexPatterns[i].second;
+
+ const ComplexPattern &CP = *N->getComplexPatternInfo(CGP);
+
+ // Emit a CheckComplexPat operation, which does the match (aborting if it
+ // fails) and pushes the matched operands onto the recorded nodes list.
+ AddMatcher(new CheckComplexPatMatcher(CP, RecNodeEntry,
+ N->getName(), NextRecordedOperandNo));
+
+ // Record the right number of operands.
+ NextRecordedOperandNo += CP.getNumOperands();
+ if (CP.hasProperty(SDNPHasChain)) {
+ // If the complex pattern has a chain, then we need to keep track of the
+ // fact that we just recorded a chain input. The chain input will be
+ // matched as the last operand of the predicate if it was successful.
+ ++NextRecordedOperandNo; // Chained node operand.
+
+ // It is the last operand recorded.
+ assert(NextRecordedOperandNo > 1 &&
+ "Should have recorded input/result chains at least!");
+ MatchedChainNodes.push_back(NextRecordedOperandNo-1);
+ }
+
+ // TODO: Complex patterns can't have output glues, if they did, we'd want
+ // to record them.
+ }
+
+ return false;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Node Result Generation
+//===----------------------------------------------------------------------===//
+
+void MatcherGen::EmitResultOfNamedOperand(const TreePatternNode *N,
+ SmallVectorImpl<unsigned> &ResultOps){
+ assert(!N->getName().empty() && "Operand not named!");
+
+ if (unsigned SlotNo = NamedComplexPatternOperands[N->getName()]) {
+ // Complex operands have already been completely selected, just find the
+ // right slot ant add the arguments directly.
+ for (unsigned i = 0; i < N->getNumMIResults(CGP); ++i)
+ ResultOps.push_back(SlotNo - 1 + i);
+
+ return;
+ }
+
+ unsigned SlotNo = getNamedArgumentSlot(N->getName());
+
+ // If this is an 'imm' or 'fpimm' node, make sure to convert it to the target
+ // version of the immediate so that it doesn't get selected due to some other
+ // node use.
+ if (!N->isLeaf()) {
+ StringRef OperatorName = N->getOperator()->getName();
+ if (OperatorName == "imm" || OperatorName == "fpimm") {
+ AddMatcher(new EmitConvertToTargetMatcher(SlotNo));
+ ResultOps.push_back(NextRecordedOperandNo++);
+ return;
+ }
+ }
+
+ for (unsigned i = 0; i < N->getNumMIResults(CGP); ++i)
+ ResultOps.push_back(SlotNo + i);
+}
+
+void MatcherGen::EmitResultLeafAsOperand(const TreePatternNode *N,
+ SmallVectorImpl<unsigned> &ResultOps) {
+ assert(N->isLeaf() && "Must be a leaf");
+
+ if (IntInit *II = dyn_cast<IntInit>(N->getLeafValue())) {
+ AddMatcher(new EmitIntegerMatcher(II->getValue(), N->getSimpleType(0)));
+ ResultOps.push_back(NextRecordedOperandNo++);
+ return;
+ }
+
+ // If this is an explicit register reference, handle it.
+ if (DefInit *DI = dyn_cast<DefInit>(N->getLeafValue())) {
+ Record *Def = DI->getDef();
+ if (Def->isSubClassOf("Register")) {
+ const CodeGenRegister *Reg =
+ CGP.getTargetInfo().getRegBank().getReg(Def);
+ AddMatcher(new EmitRegisterMatcher(Reg, N->getSimpleType(0)));
+ ResultOps.push_back(NextRecordedOperandNo++);
+ return;
+ }
+
+ if (Def->getName() == "zero_reg") {
+ AddMatcher(new EmitRegisterMatcher(nullptr, N->getSimpleType(0)));
+ ResultOps.push_back(NextRecordedOperandNo++);
+ return;
+ }
+
+ if (Def->getName() == "undef_tied_input") {
+ std::array<MVT::SimpleValueType, 1> ResultVTs = {{ N->getSimpleType(0) }};
+ std::array<unsigned, 0> InstOps;
+ auto IDOperandNo = NextRecordedOperandNo++;
+ AddMatcher(new EmitNodeMatcher("TargetOpcode::IMPLICIT_DEF",
+ ResultVTs, InstOps, false, false, false,
+ false, -1, IDOperandNo));
+ ResultOps.push_back(IDOperandNo);
+ return;
+ }
+
+ // Handle a reference to a register class. This is used
+ // in COPY_TO_SUBREG instructions.
+ if (Def->isSubClassOf("RegisterOperand"))
+ Def = Def->getValueAsDef("RegClass");
+ if (Def->isSubClassOf("RegisterClass")) {
+ // If the register class has an enum integer value greater than 127, the
+ // encoding overflows the limit of 7 bits, which precludes the use of
+ // StringIntegerMatcher. In this case, fallback to using IntegerMatcher.
+ const CodeGenRegisterClass &RC =
+ CGP.getTargetInfo().getRegisterClass(Def);
+ if (RC.EnumValue <= 127) {
+ std::string Value = getQualifiedName(Def) + "RegClassID";
+ AddMatcher(new EmitStringIntegerMatcher(Value, MVT::i32));
+ ResultOps.push_back(NextRecordedOperandNo++);
+ } else {
+ AddMatcher(new EmitIntegerMatcher(RC.EnumValue, MVT::i32));
+ ResultOps.push_back(NextRecordedOperandNo++);
+ }
+ return;
+ }
+
+ // Handle a subregister index. This is used for INSERT_SUBREG etc.
+ if (Def->isSubClassOf("SubRegIndex")) {
+ const CodeGenRegBank &RB = CGP.getTargetInfo().getRegBank();
+ // If we have more than 127 subreg indices the encoding can overflow
+ // 7 bit and we cannot use StringInteger.
+ if (RB.getSubRegIndices().size() > 127) {
+ const CodeGenSubRegIndex *I = RB.findSubRegIdx(Def);
+ assert(I && "Cannot find subreg index by name!");
+ if (I->EnumValue > 127) {
+ AddMatcher(new EmitIntegerMatcher(I->EnumValue, MVT::i32));
+ ResultOps.push_back(NextRecordedOperandNo++);
+ return;
+ }
+ }
+ std::string Value = getQualifiedName(Def);
+ AddMatcher(new EmitStringIntegerMatcher(Value, MVT::i32));
+ ResultOps.push_back(NextRecordedOperandNo++);
+ return;
+ }
+ }
+
+ errs() << "unhandled leaf node:\n";
+ N->dump();
+}
+
+static bool
+mayInstNodeLoadOrStore(const TreePatternNode *N,
+ const CodeGenDAGPatterns &CGP) {
+ Record *Op = N->getOperator();
+ const CodeGenTarget &CGT = CGP.getTargetInfo();
+ CodeGenInstruction &II = CGT.getInstruction(Op);
+ return II.mayLoad || II.mayStore;
+}
+
+static unsigned
+numNodesThatMayLoadOrStore(const TreePatternNode *N,
+ const CodeGenDAGPatterns &CGP) {
+ if (N->isLeaf())
+ return 0;
+
+ Record *OpRec = N->getOperator();
+ if (!OpRec->isSubClassOf("Instruction"))
+ return 0;
+
+ unsigned Count = 0;
+ if (mayInstNodeLoadOrStore(N, CGP))
+ ++Count;
+
+ for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i)
+ Count += numNodesThatMayLoadOrStore(N->getChild(i), CGP);
+
+ return Count;
+}
+
+void MatcherGen::
+EmitResultInstructionAsOperand(const TreePatternNode *N,
+ SmallVectorImpl<unsigned> &OutputOps) {
+ Record *Op = N->getOperator();
+ const CodeGenTarget &CGT = CGP.getTargetInfo();
+ CodeGenInstruction &II = CGT.getInstruction(Op);
+ const DAGInstruction &Inst = CGP.getInstruction(Op);
+
+ bool isRoot = N == Pattern.getDstPattern();
+
+ // TreeHasOutGlue - True if this tree has glue.
+ bool TreeHasInGlue = false, TreeHasOutGlue = false;
+ if (isRoot) {
+ const TreePatternNode *SrcPat = Pattern.getSrcPattern();
+ TreeHasInGlue = SrcPat->TreeHasProperty(SDNPOptInGlue, CGP) ||
+ SrcPat->TreeHasProperty(SDNPInGlue, CGP);
+
+ // FIXME2: this is checking the entire pattern, not just the node in
+ // question, doing this just for the root seems like a total hack.
+ TreeHasOutGlue = SrcPat->TreeHasProperty(SDNPOutGlue, CGP);
+ }
+
+ // NumResults - This is the number of results produced by the instruction in
+ // the "outs" list.
+ unsigned NumResults = Inst.getNumResults();
+
+ // Number of operands we know the output instruction must have. If it is
+ // variadic, we could have more operands.
+ unsigned NumFixedOperands = II.Operands.size();
+
+ SmallVector<unsigned, 8> InstOps;
+
+ // Loop over all of the fixed operands of the instruction pattern, emitting
+ // code to fill them all in. The node 'N' usually has number children equal to
+ // the number of input operands of the instruction. However, in cases where
+ // there are predicate operands for an instruction, we need to fill in the
+ // 'execute always' values. Match up the node operands to the instruction
+ // operands to do this.
+ unsigned ChildNo = 0;
+
+ // Similarly to the code in TreePatternNode::ApplyTypeConstraints, count the
+ // number of operands at the end of the list which have default values.
+ // Those can come from the pattern if it provides enough arguments, or be
+ // filled in with the default if the pattern hasn't provided them. But any
+ // operand with a default value _before_ the last mandatory one will be
+ // filled in with their defaults unconditionally.
+ unsigned NonOverridableOperands = NumFixedOperands;
+ while (NonOverridableOperands > NumResults &&
+ CGP.operandHasDefault(II.Operands[NonOverridableOperands-1].Rec))
+ --NonOverridableOperands;
+
+ for (unsigned InstOpNo = NumResults, e = NumFixedOperands;
+ InstOpNo != e; ++InstOpNo) {
+ // Determine what to emit for this operand.
+ Record *OperandNode = II.Operands[InstOpNo].Rec;
+ if (CGP.operandHasDefault(OperandNode) &&
+ (InstOpNo < NonOverridableOperands || ChildNo >= N->getNumChildren())) {
+ // This is a predicate or optional def operand which the pattern has not
+ // overridden, or which we aren't letting it override; emit the 'default
+ // ops' operands.
+ const DAGDefaultOperand &DefaultOp
+ = CGP.getDefaultOperand(OperandNode);
+ for (unsigned i = 0, e = DefaultOp.DefaultOps.size(); i != e; ++i)
+ EmitResultOperand(DefaultOp.DefaultOps[i].get(), InstOps);
+ continue;
+ }
+
+ // Otherwise this is a normal operand or a predicate operand without
+ // 'execute always'; emit it.
+
+ // For operands with multiple sub-operands we may need to emit
+ // multiple child patterns to cover them all. However, ComplexPattern
+ // children may themselves emit multiple MI operands.
+ unsigned NumSubOps = 1;
+ if (OperandNode->isSubClassOf("Operand")) {
+ DagInit *MIOpInfo = OperandNode->getValueAsDag("MIOperandInfo");
+ if (unsigned NumArgs = MIOpInfo->getNumArgs())
+ NumSubOps = NumArgs;
+ }
+
+ unsigned FinalNumOps = InstOps.size() + NumSubOps;
+ while (InstOps.size() < FinalNumOps) {
+ const TreePatternNode *Child = N->getChild(ChildNo);
+ unsigned BeforeAddingNumOps = InstOps.size();
+ EmitResultOperand(Child, InstOps);
+ assert(InstOps.size() > BeforeAddingNumOps && "Didn't add any operands");
+
+ // If the operand is an instruction and it produced multiple results, just
+ // take the first one.
+ if (!Child->isLeaf() && Child->getOperator()->isSubClassOf("Instruction"))
+ InstOps.resize(BeforeAddingNumOps+1);
+
+ ++ChildNo;
+ }
+ }
+
+ // If this is a variadic output instruction (i.e. REG_SEQUENCE), we can't
+ // expand suboperands, use default operands, or other features determined from
+ // the CodeGenInstruction after the fixed operands, which were handled
+ // above. Emit the remaining instructions implicitly added by the use for
+ // variable_ops.
+ if (II.Operands.isVariadic) {
+ for (unsigned I = ChildNo, E = N->getNumChildren(); I < E; ++I)
+ EmitResultOperand(N->getChild(I), InstOps);
+ }
+
+ // If this node has input glue or explicitly specified input physregs, we
+ // need to add chained and glued copyfromreg nodes and materialize the glue
+ // input.
+ if (isRoot && !PhysRegInputs.empty()) {
+ // Emit all of the CopyToReg nodes for the input physical registers. These
+ // occur in patterns like (mul:i8 AL:i8, GR8:i8:$src).
+ for (unsigned i = 0, e = PhysRegInputs.size(); i != e; ++i) {
+ const CodeGenRegister *Reg =
+ CGP.getTargetInfo().getRegBank().getReg(PhysRegInputs[i].first);
+ AddMatcher(new EmitCopyToRegMatcher(PhysRegInputs[i].second,
+ Reg));
+ }
+
+ // Even if the node has no other glue inputs, the resultant node must be
+ // glued to the CopyFromReg nodes we just generated.
+ TreeHasInGlue = true;
+ }
+
+ // Result order: node results, chain, glue
+
+ // Determine the result types.
+ SmallVector<MVT::SimpleValueType, 4> ResultVTs;
+ for (unsigned i = 0, e = N->getNumTypes(); i != e; ++i)
+ ResultVTs.push_back(N->getSimpleType(i));
+
+ // If this is the root instruction of a pattern that has physical registers in
+ // its result pattern, add output VTs for them. For example, X86 has:
+ // (set AL, (mul ...))
+ // This also handles implicit results like:
+ // (implicit EFLAGS)
+ if (isRoot && !Pattern.getDstRegs().empty()) {
+ // If the root came from an implicit def in the instruction handling stuff,
+ // don't re-add it.
+ Record *HandledReg = nullptr;
+ if (II.HasOneImplicitDefWithKnownVT(CGT) != MVT::Other)
+ HandledReg = II.ImplicitDefs[0];
+
+ for (Record *Reg : Pattern.getDstRegs()) {
+ if (!Reg->isSubClassOf("Register") || Reg == HandledReg) continue;
+ ResultVTs.push_back(getRegisterValueType(Reg, CGT));
+ }
+ }
+
+ // If this is the root of the pattern and the pattern we're matching includes
+ // a node that is variadic, mark the generated node as variadic so that it
+ // gets the excess operands from the input DAG.
+ int NumFixedArityOperands = -1;
+ if (isRoot &&
+ Pattern.getSrcPattern()->NodeHasProperty(SDNPVariadic, CGP))
+ NumFixedArityOperands = Pattern.getSrcPattern()->getNumChildren();
+
+ // If this is the root node and multiple matched nodes in the input pattern
+ // have MemRefs in them, have the interpreter collect them and plop them onto
+ // this node. If there is just one node with MemRefs, leave them on that node
+ // even if it is not the root.
+ //
+ // FIXME3: This is actively incorrect for result patterns with multiple
+ // memory-referencing instructions.
+ bool PatternHasMemOperands =
+ Pattern.getSrcPattern()->TreeHasProperty(SDNPMemOperand, CGP);
+
+ bool NodeHasMemRefs = false;
+ if (PatternHasMemOperands) {
+ unsigned NumNodesThatLoadOrStore =
+ numNodesThatMayLoadOrStore(Pattern.getDstPattern(), CGP);
+ bool NodeIsUniqueLoadOrStore = mayInstNodeLoadOrStore(N, CGP) &&
+ NumNodesThatLoadOrStore == 1;
+ NodeHasMemRefs =
+ NodeIsUniqueLoadOrStore || (isRoot && (mayInstNodeLoadOrStore(N, CGP) ||
+ NumNodesThatLoadOrStore != 1));
+ }
+
+ // Determine whether we need to attach a chain to this node.
+ bool NodeHasChain = false;
+ if (Pattern.getSrcPattern()->TreeHasProperty(SDNPHasChain, CGP)) {
+ // For some instructions, we were able to infer from the pattern whether
+ // they should have a chain. Otherwise, attach the chain to the root.
+ //
+ // FIXME2: This is extremely dubious for several reasons, not the least of
+ // which it gives special status to instructions with patterns that Pat<>
+ // nodes can't duplicate.
+ if (II.hasChain_Inferred)
+ NodeHasChain = II.hasChain;
+ else
+ NodeHasChain = isRoot;
+ // Instructions which load and store from memory should have a chain,
+ // regardless of whether they happen to have a pattern saying so.
+ if (II.hasCtrlDep || II.mayLoad || II.mayStore || II.canFoldAsLoad ||
+ II.hasSideEffects)
+ NodeHasChain = true;
+ }
+
+ assert((!ResultVTs.empty() || TreeHasOutGlue || NodeHasChain) &&
+ "Node has no result");
+
+ AddMatcher(new EmitNodeMatcher(II.Namespace.str()+"::"+II.TheDef->getName().str(),
+ ResultVTs, InstOps,
+ NodeHasChain, TreeHasInGlue, TreeHasOutGlue,
+ NodeHasMemRefs, NumFixedArityOperands,
+ NextRecordedOperandNo));
+
+ // The non-chain and non-glue results of the newly emitted node get recorded.
+ for (unsigned i = 0, e = ResultVTs.size(); i != e; ++i) {
+ if (ResultVTs[i] == MVT::Other || ResultVTs[i] == MVT::Glue) break;
+ OutputOps.push_back(NextRecordedOperandNo++);
+ }
+}
+
+void MatcherGen::
+EmitResultSDNodeXFormAsOperand(const TreePatternNode *N,
+ SmallVectorImpl<unsigned> &ResultOps) {
+ assert(N->getOperator()->isSubClassOf("SDNodeXForm") && "Not SDNodeXForm?");
+
+ // Emit the operand.
+ SmallVector<unsigned, 8> InputOps;
+
+ // FIXME2: Could easily generalize this to support multiple inputs and outputs
+ // to the SDNodeXForm. For now we just support one input and one output like
+ // the old instruction selector.
+ assert(N->getNumChildren() == 1);
+ EmitResultOperand(N->getChild(0), InputOps);
+
+ // The input currently must have produced exactly one result.
+ assert(InputOps.size() == 1 && "Unexpected input to SDNodeXForm");
+
+ AddMatcher(new EmitNodeXFormMatcher(InputOps[0], N->getOperator()));
+ ResultOps.push_back(NextRecordedOperandNo++);
+}
+
+void MatcherGen::EmitResultOperand(const TreePatternNode *N,
+ SmallVectorImpl<unsigned> &ResultOps) {
+ // This is something selected from the pattern we matched.
+ if (!N->getName().empty())
+ return EmitResultOfNamedOperand(N, ResultOps);
+
+ if (N->isLeaf())
+ return EmitResultLeafAsOperand(N, ResultOps);
+
+ Record *OpRec = N->getOperator();
+ if (OpRec->isSubClassOf("Instruction"))
+ return EmitResultInstructionAsOperand(N, ResultOps);
+ if (OpRec->isSubClassOf("SDNodeXForm"))
+ return EmitResultSDNodeXFormAsOperand(N, ResultOps);
+ errs() << "Unknown result node to emit code for: " << *N << '\n';
+ PrintFatalError("Unknown node in result pattern!");
+}
+
+void MatcherGen::EmitResultCode() {
+ // Patterns that match nodes with (potentially multiple) chain inputs have to
+ // merge them together into a token factor. This informs the generated code
+ // what all the chained nodes are.
+ if (!MatchedChainNodes.empty())
+ AddMatcher(new EmitMergeInputChainsMatcher(MatchedChainNodes));
+
+ // Codegen the root of the result pattern, capturing the resulting values.
+ SmallVector<unsigned, 8> Ops;
+ EmitResultOperand(Pattern.getDstPattern(), Ops);
+
+ // At this point, we have however many values the result pattern produces.
+ // However, the input pattern might not need all of these. If there are
+ // excess values at the end (such as implicit defs of condition codes etc)
+ // just lop them off. This doesn't need to worry about glue or chains, just
+ // explicit results.
+ //
+ unsigned NumSrcResults = Pattern.getSrcPattern()->getNumTypes();
+
+ // If the pattern also has (implicit) results, count them as well.
+ if (!Pattern.getDstRegs().empty()) {
+ // If the root came from an implicit def in the instruction handling stuff,
+ // don't re-add it.
+ Record *HandledReg = nullptr;
+ const TreePatternNode *DstPat = Pattern.getDstPattern();
+ if (!DstPat->isLeaf() &&DstPat->getOperator()->isSubClassOf("Instruction")){
+ const CodeGenTarget &CGT = CGP.getTargetInfo();
+ CodeGenInstruction &II = CGT.getInstruction(DstPat->getOperator());
+
+ if (II.HasOneImplicitDefWithKnownVT(CGT) != MVT::Other)
+ HandledReg = II.ImplicitDefs[0];
+ }
+
+ for (Record *Reg : Pattern.getDstRegs()) {
+ if (!Reg->isSubClassOf("Register") || Reg == HandledReg) continue;
+ ++NumSrcResults;
+ }
+ }
+
+ SmallVector<unsigned, 8> Results(Ops);
+
+ // Apply result permutation.
+ for (unsigned ResNo = 0; ResNo < Pattern.getDstPattern()->getNumResults();
+ ++ResNo) {
+ Results[ResNo] = Ops[Pattern.getDstPattern()->getResultIndex(ResNo)];
+ }
+
+ Results.resize(NumSrcResults);
+ AddMatcher(new CompleteMatchMatcher(Results, Pattern));
+}
+
+
+/// ConvertPatternToMatcher - Create the matcher for the specified pattern with
+/// the specified variant. If the variant number is invalid, this returns null.
+Matcher *llvm::ConvertPatternToMatcher(const PatternToMatch &Pattern,
+ unsigned Variant,
+ const CodeGenDAGPatterns &CGP) {
+ MatcherGen Gen(Pattern, CGP);
+
+ // Generate the code for the matcher.
+ if (Gen.EmitMatcherCode(Variant))
+ return nullptr;
+
+ // FIXME2: Kill extra MoveParent commands at the end of the matcher sequence.
+ // FIXME2: Split result code out to another table, and make the matcher end
+ // with an "Emit <index>" command. This allows result generation stuff to be
+ // shared and factored?
+
+ // If the match succeeds, then we generate Pattern.
+ Gen.EmitResultCode();
+
+ // Unconditional match.
+ return Gen.GetMatcher();
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/DAGISelMatcherOpt.cpp b/contrib/libs/llvm16/utils/TableGen/DAGISelMatcherOpt.cpp
new file mode 100644
index 0000000000..4273bd69b8
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/DAGISelMatcherOpt.cpp
@@ -0,0 +1,471 @@
+//===- DAGISelMatcherOpt.cpp - Optimize a DAG Matcher ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the DAG Matcher optimizer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "DAGISelMatcher.h"
+#include "CodeGenDAGPatterns.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "isel-opt"
+
+/// ContractNodes - Turn multiple matcher node patterns like 'MoveChild+Record'
+/// into single compound nodes like RecordChild.
+static void ContractNodes(std::unique_ptr<Matcher> &MatcherPtr,
+ const CodeGenDAGPatterns &CGP) {
+ // If we reached the end of the chain, we're done.
+ Matcher *N = MatcherPtr.get();
+ if (!N) return;
+
+ // If we have a scope node, walk down all of the children.
+ if (ScopeMatcher *Scope = dyn_cast<ScopeMatcher>(N)) {
+ for (unsigned i = 0, e = Scope->getNumChildren(); i != e; ++i) {
+ std::unique_ptr<Matcher> Child(Scope->takeChild(i));
+ ContractNodes(Child, CGP);
+ Scope->resetChild(i, Child.release());
+ }
+ return;
+ }
+
+ // If we found a movechild node with a node that comes in a 'foochild' form,
+ // transform it.
+ if (MoveChildMatcher *MC = dyn_cast<MoveChildMatcher>(N)) {
+ Matcher *New = nullptr;
+ if (RecordMatcher *RM = dyn_cast<RecordMatcher>(MC->getNext()))
+ if (MC->getChildNo() < 8) // Only have RecordChild0...7
+ New = new RecordChildMatcher(MC->getChildNo(), RM->getWhatFor(),
+ RM->getResultNo());
+
+ if (CheckTypeMatcher *CT = dyn_cast<CheckTypeMatcher>(MC->getNext()))
+ if (MC->getChildNo() < 8 && // Only have CheckChildType0...7
+ CT->getResNo() == 0) // CheckChildType checks res #0
+ New = new CheckChildTypeMatcher(MC->getChildNo(), CT->getType());
+
+ if (CheckSameMatcher *CS = dyn_cast<CheckSameMatcher>(MC->getNext()))
+ if (MC->getChildNo() < 4) // Only have CheckChildSame0...3
+ New = new CheckChildSameMatcher(MC->getChildNo(), CS->getMatchNumber());
+
+ if (CheckIntegerMatcher *CI = dyn_cast<CheckIntegerMatcher>(MC->getNext()))
+ if (MC->getChildNo() < 5) // Only have CheckChildInteger0...4
+ New = new CheckChildIntegerMatcher(MC->getChildNo(), CI->getValue());
+
+ if (auto *CCC = dyn_cast<CheckCondCodeMatcher>(MC->getNext()))
+ if (MC->getChildNo() == 2) // Only have CheckChild2CondCode
+ New = new CheckChild2CondCodeMatcher(CCC->getCondCodeName());
+
+ if (New) {
+ // Insert the new node.
+ New->setNext(MatcherPtr.release());
+ MatcherPtr.reset(New);
+ // Remove the old one.
+ MC->setNext(MC->getNext()->takeNext());
+ return ContractNodes(MatcherPtr, CGP);
+ }
+ }
+
+ // Zap movechild -> moveparent.
+ if (MoveChildMatcher *MC = dyn_cast<MoveChildMatcher>(N))
+ if (MoveParentMatcher *MP =
+ dyn_cast<MoveParentMatcher>(MC->getNext())) {
+ MatcherPtr.reset(MP->takeNext());
+ return ContractNodes(MatcherPtr, CGP);
+ }
+
+ // Turn EmitNode->CompleteMatch into MorphNodeTo if we can.
+ if (EmitNodeMatcher *EN = dyn_cast<EmitNodeMatcher>(N))
+ if (CompleteMatchMatcher *CM =
+ dyn_cast<CompleteMatchMatcher>(EN->getNext())) {
+ // We can only use MorphNodeTo if the result values match up.
+ unsigned RootResultFirst = EN->getFirstResultSlot();
+ bool ResultsMatch = true;
+ for (unsigned i = 0, e = CM->getNumResults(); i != e; ++i)
+ if (CM->getResult(i) != RootResultFirst+i)
+ ResultsMatch = false;
+
+ // If the selected node defines a subset of the glue/chain results, we
+ // can't use MorphNodeTo. For example, we can't use MorphNodeTo if the
+ // matched pattern has a chain but the root node doesn't.
+ const PatternToMatch &Pattern = CM->getPattern();
+
+ if (!EN->hasChain() &&
+ Pattern.getSrcPattern()->NodeHasProperty(SDNPHasChain, CGP))
+ ResultsMatch = false;
+
+ // If the matched node has glue and the output root doesn't, we can't
+ // use MorphNodeTo.
+ //
+ // NOTE: Strictly speaking, we don't have to check for glue here
+ // because the code in the pattern generator doesn't handle it right. We
+ // do it anyway for thoroughness.
+ if (!EN->hasOutFlag() &&
+ Pattern.getSrcPattern()->NodeHasProperty(SDNPOutGlue, CGP))
+ ResultsMatch = false;
+
+
+ // If the root result node defines more results than the source root node
+ // *and* has a chain or glue input, then we can't match it because it
+ // would end up replacing the extra result with the chain/glue.
+#if 0
+ if ((EN->hasGlue() || EN->hasChain()) &&
+ EN->getNumNonChainGlueVTs() > ... need to get no results reliably ...)
+ ResultMatch = false;
+#endif
+
+ if (ResultsMatch) {
+ const SmallVectorImpl<MVT::SimpleValueType> &VTs = EN->getVTList();
+ const SmallVectorImpl<unsigned> &Operands = EN->getOperandList();
+ MatcherPtr.reset(new MorphNodeToMatcher(EN->getOpcodeName(),
+ VTs, Operands,
+ EN->hasChain(), EN->hasInFlag(),
+ EN->hasOutFlag(),
+ EN->hasMemRefs(),
+ EN->getNumFixedArityOperands(),
+ Pattern));
+ return;
+ }
+
+ // FIXME2: Kill off all the SelectionDAG::SelectNodeTo and getMachineNode
+ // variants.
+ }
+
+ ContractNodes(N->getNextPtr(), CGP);
+
+
+ // If we have a CheckType/CheckChildType/Record node followed by a
+ // CheckOpcode, invert the two nodes. We prefer to do structural checks
+ // before type checks, as this opens opportunities for factoring on targets
+ // like X86 where many operations are valid on multiple types.
+ if ((isa<CheckTypeMatcher>(N) || isa<CheckChildTypeMatcher>(N) ||
+ isa<RecordMatcher>(N)) &&
+ isa<CheckOpcodeMatcher>(N->getNext())) {
+ // Unlink the two nodes from the list.
+ Matcher *CheckType = MatcherPtr.release();
+ Matcher *CheckOpcode = CheckType->takeNext();
+ Matcher *Tail = CheckOpcode->takeNext();
+
+ // Relink them.
+ MatcherPtr.reset(CheckOpcode);
+ CheckOpcode->setNext(CheckType);
+ CheckType->setNext(Tail);
+ return ContractNodes(MatcherPtr, CGP);
+ }
+}
+
+/// FindNodeWithKind - Scan a series of matchers looking for a matcher with a
+/// specified kind. Return null if we didn't find one otherwise return the
+/// matcher.
+static Matcher *FindNodeWithKind(Matcher *M, Matcher::KindTy Kind) {
+ for (; M; M = M->getNext())
+ if (M->getKind() == Kind)
+ return M;
+ return nullptr;
+}
+
+
+/// FactorNodes - Turn matches like this:
+/// Scope
+/// OPC_CheckType i32
+/// ABC
+/// OPC_CheckType i32
+/// XYZ
+/// into:
+/// OPC_CheckType i32
+/// Scope
+/// ABC
+/// XYZ
+///
+static void FactorNodes(std::unique_ptr<Matcher> &InputMatcherPtr) {
+ // Look for a push node. Iterates instead of recurses to reduce stack usage.
+ ScopeMatcher *Scope = nullptr;
+ std::unique_ptr<Matcher> *RebindableMatcherPtr = &InputMatcherPtr;
+ while (!Scope) {
+ // If we reached the end of the chain, we're done.
+ Matcher *N = RebindableMatcherPtr->get();
+ if (!N) return;
+
+ // If this is not a push node, just scan for one.
+ Scope = dyn_cast<ScopeMatcher>(N);
+ if (!Scope)
+ RebindableMatcherPtr = &(N->getNextPtr());
+ }
+ std::unique_ptr<Matcher> &MatcherPtr = *RebindableMatcherPtr;
+
+ // Okay, pull together the children of the scope node into a vector so we can
+ // inspect it more easily.
+ SmallVector<Matcher*, 32> OptionsToMatch;
+
+ for (unsigned i = 0, e = Scope->getNumChildren(); i != e; ++i) {
+ // Factor the subexpression.
+ std::unique_ptr<Matcher> Child(Scope->takeChild(i));
+ FactorNodes(Child);
+
+ if (Child) {
+ // If the child is a ScopeMatcher we can just merge its contents.
+ if (auto *SM = dyn_cast<ScopeMatcher>(Child.get())) {
+ for (unsigned j = 0, e = SM->getNumChildren(); j != e; ++j)
+ OptionsToMatch.push_back(SM->takeChild(j));
+ } else {
+ OptionsToMatch.push_back(Child.release());
+ }
+ }
+ }
+
+ SmallVector<Matcher*, 32> NewOptionsToMatch;
+
+ // Loop over options to match, merging neighboring patterns with identical
+ // starting nodes into a shared matcher.
+ for (unsigned OptionIdx = 0, e = OptionsToMatch.size(); OptionIdx != e;) {
+ // Find the set of matchers that start with this node.
+ Matcher *Optn = OptionsToMatch[OptionIdx++];
+
+ if (OptionIdx == e) {
+ NewOptionsToMatch.push_back(Optn);
+ continue;
+ }
+
+ // See if the next option starts with the same matcher. If the two
+ // neighbors *do* start with the same matcher, we can factor the matcher out
+ // of at least these two patterns. See what the maximal set we can merge
+ // together is.
+ SmallVector<Matcher*, 8> EqualMatchers;
+ EqualMatchers.push_back(Optn);
+
+ // Factor all of the known-equal matchers after this one into the same
+ // group.
+ while (OptionIdx != e && OptionsToMatch[OptionIdx]->isEqual(Optn))
+ EqualMatchers.push_back(OptionsToMatch[OptionIdx++]);
+
+ // If we found a non-equal matcher, see if it is contradictory with the
+ // current node. If so, we know that the ordering relation between the
+ // current sets of nodes and this node don't matter. Look past it to see if
+ // we can merge anything else into this matching group.
+ unsigned Scan = OptionIdx;
+ while (true) {
+ // If we ran out of stuff to scan, we're done.
+ if (Scan == e) break;
+
+ Matcher *ScanMatcher = OptionsToMatch[Scan];
+
+ // If we found an entry that matches out matcher, merge it into the set to
+ // handle.
+ if (Optn->isEqual(ScanMatcher)) {
+ // If is equal after all, add the option to EqualMatchers and remove it
+ // from OptionsToMatch.
+ EqualMatchers.push_back(ScanMatcher);
+ OptionsToMatch.erase(OptionsToMatch.begin()+Scan);
+ --e;
+ continue;
+ }
+
+ // If the option we're checking for contradicts the start of the list,
+ // skip over it.
+ if (Optn->isContradictory(ScanMatcher)) {
+ ++Scan;
+ continue;
+ }
+
+ // If we're scanning for a simple node, see if it occurs later in the
+ // sequence. If so, and if we can move it up, it might be contradictory
+ // or the same as what we're looking for. If so, reorder it.
+ if (Optn->isSimplePredicateOrRecordNode()) {
+ Matcher *M2 = FindNodeWithKind(ScanMatcher, Optn->getKind());
+ if (M2 && M2 != ScanMatcher &&
+ M2->canMoveBefore(ScanMatcher) &&
+ (M2->isEqual(Optn) || M2->isContradictory(Optn))) {
+ Matcher *MatcherWithoutM2 = ScanMatcher->unlinkNode(M2);
+ M2->setNext(MatcherWithoutM2);
+ OptionsToMatch[Scan] = M2;
+ continue;
+ }
+ }
+
+ // Otherwise, we don't know how to handle this entry, we have to bail.
+ break;
+ }
+
+ if (Scan != e &&
+ // Don't print it's obvious nothing extra could be merged anyway.
+ Scan+1 != e) {
+ LLVM_DEBUG(errs() << "Couldn't merge this:\n"; Optn->print(errs(), 4);
+ errs() << "into this:\n";
+ OptionsToMatch[Scan]->print(errs(), 4);
+ if (Scan + 1 != e) OptionsToMatch[Scan + 1]->printOne(errs());
+ if (Scan + 2 < e) OptionsToMatch[Scan + 2]->printOne(errs());
+ errs() << "\n");
+ }
+
+ // If we only found one option starting with this matcher, no factoring is
+ // possible.
+ if (EqualMatchers.size() == 1) {
+ NewOptionsToMatch.push_back(EqualMatchers[0]);
+ continue;
+ }
+
+ // Factor these checks by pulling the first node off each entry and
+ // discarding it. Take the first one off the first entry to reuse.
+ Matcher *Shared = Optn;
+ Optn = Optn->takeNext();
+ EqualMatchers[0] = Optn;
+
+ // Remove and delete the first node from the other matchers we're factoring.
+ for (unsigned i = 1, e = EqualMatchers.size(); i != e; ++i) {
+ Matcher *Tmp = EqualMatchers[i]->takeNext();
+ delete EqualMatchers[i];
+ EqualMatchers[i] = Tmp;
+ }
+
+ Shared->setNext(new ScopeMatcher(EqualMatchers));
+
+ // Recursively factor the newly created node.
+ FactorNodes(Shared->getNextPtr());
+
+ NewOptionsToMatch.push_back(Shared);
+ }
+
+ // If we're down to a single pattern to match, then we don't need this scope
+ // anymore.
+ if (NewOptionsToMatch.size() == 1) {
+ MatcherPtr.reset(NewOptionsToMatch[0]);
+ return;
+ }
+
+ if (NewOptionsToMatch.empty()) {
+ MatcherPtr.reset();
+ return;
+ }
+
+ // If our factoring failed (didn't achieve anything) see if we can simplify in
+ // other ways.
+
+ // Check to see if all of the leading entries are now opcode checks. If so,
+ // we can convert this Scope to be a OpcodeSwitch instead.
+ bool AllOpcodeChecks = true, AllTypeChecks = true;
+ for (unsigned i = 0, e = NewOptionsToMatch.size(); i != e; ++i) {
+ // Check to see if this breaks a series of CheckOpcodeMatchers.
+ if (AllOpcodeChecks &&
+ !isa<CheckOpcodeMatcher>(NewOptionsToMatch[i])) {
+#if 0
+ if (i > 3) {
+ errs() << "FAILING OPC #" << i << "\n";
+ NewOptionsToMatch[i]->dump();
+ }
+#endif
+ AllOpcodeChecks = false;
+ }
+
+ // Check to see if this breaks a series of CheckTypeMatcher's.
+ if (AllTypeChecks) {
+ CheckTypeMatcher *CTM =
+ cast_or_null<CheckTypeMatcher>(FindNodeWithKind(NewOptionsToMatch[i],
+ Matcher::CheckType));
+ if (!CTM ||
+ // iPTR checks could alias any other case without us knowing, don't
+ // bother with them.
+ CTM->getType() == MVT::iPTR ||
+ // SwitchType only works for result #0.
+ CTM->getResNo() != 0 ||
+ // If the CheckType isn't at the start of the list, see if we can move
+ // it there.
+ !CTM->canMoveBefore(NewOptionsToMatch[i])) {
+#if 0
+ if (i > 3 && AllTypeChecks) {
+ errs() << "FAILING TYPE #" << i << "\n";
+ NewOptionsToMatch[i]->dump();
+ }
+#endif
+ AllTypeChecks = false;
+ }
+ }
+ }
+
+ // If all the options are CheckOpcode's, we can form the SwitchOpcode, woot.
+ if (AllOpcodeChecks) {
+ StringSet<> Opcodes;
+ SmallVector<std::pair<const SDNodeInfo*, Matcher*>, 8> Cases;
+ for (unsigned i = 0, e = NewOptionsToMatch.size(); i != e; ++i) {
+ CheckOpcodeMatcher *COM = cast<CheckOpcodeMatcher>(NewOptionsToMatch[i]);
+ assert(Opcodes.insert(COM->getOpcode().getEnumName()).second &&
+ "Duplicate opcodes not factored?");
+ Cases.push_back(std::make_pair(&COM->getOpcode(), COM->takeNext()));
+ delete COM;
+ }
+
+ MatcherPtr.reset(new SwitchOpcodeMatcher(Cases));
+ return;
+ }
+
+ // If all the options are CheckType's, we can form the SwitchType, woot.
+ if (AllTypeChecks) {
+ DenseMap<unsigned, unsigned> TypeEntry;
+ SmallVector<std::pair<MVT::SimpleValueType, Matcher*>, 8> Cases;
+ for (unsigned i = 0, e = NewOptionsToMatch.size(); i != e; ++i) {
+ Matcher* M = FindNodeWithKind(NewOptionsToMatch[i], Matcher::CheckType);
+ assert(M && isa<CheckTypeMatcher>(M) && "Unknown Matcher type");
+
+ auto *CTM = cast<CheckTypeMatcher>(M);
+ Matcher *MatcherWithoutCTM = NewOptionsToMatch[i]->unlinkNode(CTM);
+ MVT::SimpleValueType CTMTy = CTM->getType();
+ delete CTM;
+
+ unsigned &Entry = TypeEntry[CTMTy];
+ if (Entry != 0) {
+ // If we have unfactored duplicate types, then we should factor them.
+ Matcher *PrevMatcher = Cases[Entry-1].second;
+ if (ScopeMatcher *SM = dyn_cast<ScopeMatcher>(PrevMatcher)) {
+ SM->setNumChildren(SM->getNumChildren()+1);
+ SM->resetChild(SM->getNumChildren()-1, MatcherWithoutCTM);
+ continue;
+ }
+
+ Matcher *Entries[2] = { PrevMatcher, MatcherWithoutCTM };
+ Cases[Entry-1].second = new ScopeMatcher(Entries);
+ continue;
+ }
+
+ Entry = Cases.size()+1;
+ Cases.push_back(std::make_pair(CTMTy, MatcherWithoutCTM));
+ }
+
+ // Make sure we recursively factor any scopes we may have created.
+ for (auto &M : Cases) {
+ if (ScopeMatcher *SM = dyn_cast<ScopeMatcher>(M.second)) {
+ std::unique_ptr<Matcher> Scope(SM);
+ FactorNodes(Scope);
+ M.second = Scope.release();
+ assert(M.second && "null matcher");
+ }
+ }
+
+ if (Cases.size() != 1) {
+ MatcherPtr.reset(new SwitchTypeMatcher(Cases));
+ } else {
+ // If we factored and ended up with one case, create it now.
+ MatcherPtr.reset(new CheckTypeMatcher(Cases[0].first, 0));
+ MatcherPtr->setNext(Cases[0].second);
+ }
+ return;
+ }
+
+
+ // Reassemble the Scope node with the adjusted children.
+ Scope->setNumChildren(NewOptionsToMatch.size());
+ for (unsigned i = 0, e = NewOptionsToMatch.size(); i != e; ++i)
+ Scope->resetChild(i, NewOptionsToMatch[i]);
+}
+
+void
+llvm::OptimizeMatcher(std::unique_ptr<Matcher> &MatcherPtr,
+ const CodeGenDAGPatterns &CGP) {
+ ContractNodes(MatcherPtr, CGP);
+ FactorNodes(MatcherPtr);
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/DFAEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/DFAEmitter.cpp
new file mode 100644
index 0000000000..705908226f
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/DFAEmitter.cpp
@@ -0,0 +1,379 @@
+//===- DFAEmitter.cpp - Finite state automaton emitter --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This class can produce a generic deterministic finite state automaton (DFA),
+// given a set of possible states and transitions.
+//
+// The input transitions can be nondeterministic - this class will produce the
+// deterministic equivalent state machine.
+//
+// The generated code can run the DFA and produce an accepted / not accepted
+// state and also produce, given a sequence of transitions that results in an
+// accepted state, the sequence of intermediate states. This is useful if the
+// initial automaton was nondeterministic - it allows mapping back from the DFA
+// to the NFA.
+//
+//===----------------------------------------------------------------------===//
+
+#include "DFAEmitter.h"
+#include "SequenceToOffsetTable.h"
+#include "TableGenBackends.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/UniqueVector.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/Record.h"
+#include <cassert>
+#include <cstdint>
+#include <deque>
+#include <map>
+#include <set>
+#include <string>
+#include <variant>
+#include <vector>
+
+#define DEBUG_TYPE "dfa-emitter"
+
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// DfaEmitter implementation. This is independent of the GenAutomaton backend.
+//===----------------------------------------------------------------------===//
+
+void DfaEmitter::addTransition(state_type From, state_type To, action_type A) {
+ Actions.insert(A);
+ NfaStates.insert(From);
+ NfaStates.insert(To);
+ NfaTransitions[{From, A}].push_back(To);
+ ++NumNfaTransitions;
+}
+
+void DfaEmitter::visitDfaState(const DfaState &DS) {
+ // For every possible action...
+ auto FromId = DfaStates.idFor(DS);
+ for (action_type A : Actions) {
+ DfaState NewStates;
+ DfaTransitionInfo TI;
+ // For every represented state, word pair in the original NFA...
+ for (state_type FromState : DS) {
+ // If this action is possible from this state add the transitioned-to
+ // states to NewStates.
+ auto I = NfaTransitions.find({FromState, A});
+ if (I == NfaTransitions.end())
+ continue;
+ for (state_type &ToState : I->second) {
+ NewStates.push_back(ToState);
+ TI.emplace_back(FromState, ToState);
+ }
+ }
+ if (NewStates.empty())
+ continue;
+ // Sort and unique.
+ sort(NewStates);
+ NewStates.erase(std::unique(NewStates.begin(), NewStates.end()),
+ NewStates.end());
+ sort(TI);
+ TI.erase(std::unique(TI.begin(), TI.end()), TI.end());
+ unsigned ToId = DfaStates.insert(NewStates);
+ DfaTransitions.emplace(std::make_pair(FromId, A), std::make_pair(ToId, TI));
+ }
+}
+
+void DfaEmitter::constructDfa() {
+ DfaState Initial(1, /*NFA initial state=*/0);
+ DfaStates.insert(Initial);
+
+ // Note that UniqueVector starts indices at 1, not zero.
+ unsigned DfaStateId = 1;
+ while (DfaStateId <= DfaStates.size()) {
+ DfaState S = DfaStates[DfaStateId];
+ visitDfaState(S);
+ DfaStateId++;
+ }
+}
+
+void DfaEmitter::emit(StringRef Name, raw_ostream &OS) {
+ constructDfa();
+
+ OS << "// Input NFA has " << NfaStates.size() << " states with "
+ << NumNfaTransitions << " transitions.\n";
+ OS << "// Generated DFA has " << DfaStates.size() << " states with "
+ << DfaTransitions.size() << " transitions.\n\n";
+
+ // Implementation note: We don't bake a simple std::pair<> here as it requires
+ // significantly more effort to parse. A simple test with a large array of
+ // struct-pairs (N=100000) took clang-10 6s to parse. The same array of
+ // std::pair<uint64_t, uint64_t> took 242s. Instead we allow the user to
+ // define the pair type.
+ //
+ // FIXME: It may make sense to emit these as ULEB sequences instead of
+ // pairs of uint64_t.
+ OS << "// A zero-terminated sequence of NFA state transitions. Every DFA\n";
+ OS << "// transition implies a set of NFA transitions. These are referred\n";
+ OS << "// to by index in " << Name << "Transitions[].\n";
+
+ SequenceToOffsetTable<DfaTransitionInfo> Table;
+ std::map<DfaTransitionInfo, unsigned> EmittedIndices;
+ for (auto &T : DfaTransitions)
+ Table.add(T.second.second);
+ Table.layout();
+ OS << "const std::array<NfaStatePair, " << Table.size() << "> " << Name
+ << "TransitionInfo = {{\n";
+ Table.emit(
+ OS,
+ [](raw_ostream &OS, std::pair<uint64_t, uint64_t> P) {
+ OS << "{" << P.first << ", " << P.second << "}";
+ },
+ "{0ULL, 0ULL}");
+
+ OS << "}};\n\n";
+
+ OS << "// A transition in the generated " << Name << " DFA.\n";
+ OS << "struct " << Name << "Transition {\n";
+ OS << " unsigned FromDfaState; // The transitioned-from DFA state.\n";
+ OS << " ";
+ printActionType(OS);
+ OS << " Action; // The input symbol that causes this transition.\n";
+ OS << " unsigned ToDfaState; // The transitioned-to DFA state.\n";
+ OS << " unsigned InfoIdx; // Start index into " << Name
+ << "TransitionInfo.\n";
+ OS << "};\n\n";
+
+ OS << "// A table of DFA transitions, ordered by {FromDfaState, Action}.\n";
+ OS << "// The initial state is 1, not zero.\n";
+ OS << "const std::array<" << Name << "Transition, "
+ << DfaTransitions.size() << "> " << Name << "Transitions = {{\n";
+ for (auto &KV : DfaTransitions) {
+ dfa_state_type From = KV.first.first;
+ dfa_state_type To = KV.second.first;
+ action_type A = KV.first.second;
+ unsigned InfoIdx = Table.get(KV.second.second);
+ OS << " {" << From << ", ";
+ printActionValue(A, OS);
+ OS << ", " << To << ", " << InfoIdx << "},\n";
+ }
+ OS << "\n}};\n\n";
+}
+
+void DfaEmitter::printActionType(raw_ostream &OS) { OS << "uint64_t"; }
+
+void DfaEmitter::printActionValue(action_type A, raw_ostream &OS) { OS << A; }
+
+//===----------------------------------------------------------------------===//
+// AutomatonEmitter implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+using Action = std::variant<Record *, unsigned, std::string>;
+using ActionTuple = std::vector<Action>;
+class Automaton;
+
+class Transition {
+ uint64_t NewState;
+ // The tuple of actions that causes this transition.
+ ActionTuple Actions;
+ // The types of the actions; this is the same across all transitions.
+ SmallVector<std::string, 4> Types;
+
+public:
+ Transition(Record *R, Automaton *Parent);
+ const ActionTuple &getActions() { return Actions; }
+ SmallVector<std::string, 4> getTypes() { return Types; }
+
+ bool canTransitionFrom(uint64_t State);
+ uint64_t transitionFrom(uint64_t State);
+};
+
+class Automaton {
+ RecordKeeper &Records;
+ Record *R;
+ std::vector<Transition> Transitions;
+ /// All possible action tuples, uniqued.
+ UniqueVector<ActionTuple> Actions;
+ /// The fields within each Transition object to find the action symbols.
+ std::vector<StringRef> ActionSymbolFields;
+
+public:
+ Automaton(RecordKeeper &Records, Record *R);
+ void emit(raw_ostream &OS);
+
+ ArrayRef<StringRef> getActionSymbolFields() { return ActionSymbolFields; }
+ /// If the type of action A has been overridden (there exists a field
+ /// "TypeOf_A") return that, otherwise return the empty string.
+ StringRef getActionSymbolType(StringRef A);
+};
+
+class AutomatonEmitter {
+ RecordKeeper &Records;
+
+public:
+ AutomatonEmitter(RecordKeeper &R) : Records(R) {}
+ void run(raw_ostream &OS);
+};
+
+/// A DfaEmitter implementation that can print our variant action type.
+class CustomDfaEmitter : public DfaEmitter {
+ const UniqueVector<ActionTuple> &Actions;
+ std::string TypeName;
+
+public:
+ CustomDfaEmitter(const UniqueVector<ActionTuple> &Actions, StringRef TypeName)
+ : Actions(Actions), TypeName(TypeName) {}
+
+ void printActionType(raw_ostream &OS) override;
+ void printActionValue(action_type A, raw_ostream &OS) override;
+};
+} // namespace
+
+void AutomatonEmitter::run(raw_ostream &OS) {
+ for (Record *R : Records.getAllDerivedDefinitions("GenericAutomaton")) {
+ Automaton A(Records, R);
+ OS << "#ifdef GET_" << R->getName() << "_DECL\n";
+ A.emit(OS);
+ OS << "#endif // GET_" << R->getName() << "_DECL\n";
+ }
+}
+
+Automaton::Automaton(RecordKeeper &Records, Record *R)
+ : Records(Records), R(R) {
+ LLVM_DEBUG(dbgs() << "Emitting automaton for " << R->getName() << "\n");
+ ActionSymbolFields = R->getValueAsListOfStrings("SymbolFields");
+}
+
+void Automaton::emit(raw_ostream &OS) {
+ StringRef TransitionClass = R->getValueAsString("TransitionClass");
+ for (Record *T : Records.getAllDerivedDefinitions(TransitionClass)) {
+ assert(T->isSubClassOf("Transition"));
+ Transitions.emplace_back(T, this);
+ Actions.insert(Transitions.back().getActions());
+ }
+
+ LLVM_DEBUG(dbgs() << " Action alphabet cardinality: " << Actions.size()
+ << "\n");
+ LLVM_DEBUG(dbgs() << " Each state has " << Transitions.size()
+ << " potential transitions.\n");
+
+ StringRef Name = R->getName();
+
+ CustomDfaEmitter Emitter(Actions, std::string(Name) + "Action");
+ // Starting from the initial state, build up a list of possible states and
+ // transitions.
+ std::deque<uint64_t> Worklist(1, 0);
+ std::set<uint64_t> SeenStates;
+ unsigned NumTransitions = 0;
+ SeenStates.insert(Worklist.front());
+ while (!Worklist.empty()) {
+ uint64_t State = Worklist.front();
+ Worklist.pop_front();
+ for (Transition &T : Transitions) {
+ if (!T.canTransitionFrom(State))
+ continue;
+ uint64_t NewState = T.transitionFrom(State);
+ if (SeenStates.emplace(NewState).second)
+ Worklist.emplace_back(NewState);
+ ++NumTransitions;
+ Emitter.addTransition(State, NewState, Actions.idFor(T.getActions()));
+ }
+ }
+ LLVM_DEBUG(dbgs() << " NFA automaton has " << SeenStates.size()
+ << " states with " << NumTransitions << " transitions.\n");
+ (void) NumTransitions;
+
+ const auto &ActionTypes = Transitions.back().getTypes();
+ OS << "// The type of an action in the " << Name << " automaton.\n";
+ if (ActionTypes.size() == 1) {
+ OS << "using " << Name << "Action = " << ActionTypes[0] << ";\n";
+ } else {
+ OS << "using " << Name << "Action = std::tuple<" << join(ActionTypes, ", ")
+ << ">;\n";
+ }
+ OS << "\n";
+
+ Emitter.emit(Name, OS);
+}
+
+StringRef Automaton::getActionSymbolType(StringRef A) {
+ Twine Ty = "TypeOf_" + A;
+ if (!R->getValue(Ty.str()))
+ return "";
+ return R->getValueAsString(Ty.str());
+}
+
+Transition::Transition(Record *R, Automaton *Parent) {
+ BitsInit *NewStateInit = R->getValueAsBitsInit("NewState");
+ NewState = 0;
+ assert(NewStateInit->getNumBits() <= sizeof(uint64_t) * 8 &&
+ "State cannot be represented in 64 bits!");
+ for (unsigned I = 0; I < NewStateInit->getNumBits(); ++I) {
+ if (auto *Bit = dyn_cast<BitInit>(NewStateInit->getBit(I))) {
+ if (Bit->getValue())
+ NewState |= 1ULL << I;
+ }
+ }
+
+ for (StringRef A : Parent->getActionSymbolFields()) {
+ RecordVal *SymbolV = R->getValue(A);
+ if (auto *Ty = dyn_cast<RecordRecTy>(SymbolV->getType())) {
+ Actions.emplace_back(R->getValueAsDef(A));
+ Types.emplace_back(Ty->getAsString());
+ } else if (isa<IntRecTy>(SymbolV->getType())) {
+ Actions.emplace_back(static_cast<unsigned>(R->getValueAsInt(A)));
+ Types.emplace_back("unsigned");
+ } else if (isa<StringRecTy>(SymbolV->getType())) {
+ Actions.emplace_back(std::string(R->getValueAsString(A)));
+ Types.emplace_back("std::string");
+ } else {
+ report_fatal_error("Unhandled symbol type!");
+ }
+
+ StringRef TypeOverride = Parent->getActionSymbolType(A);
+ if (!TypeOverride.empty())
+ Types.back() = std::string(TypeOverride);
+ }
+}
+
+bool Transition::canTransitionFrom(uint64_t State) {
+ if ((State & NewState) == 0)
+ // The bits we want to set are not set;
+ return true;
+ return false;
+}
+
+uint64_t Transition::transitionFrom(uint64_t State) {
+ return State | NewState;
+}
+
+void CustomDfaEmitter::printActionType(raw_ostream &OS) { OS << TypeName; }
+
+void CustomDfaEmitter::printActionValue(action_type A, raw_ostream &OS) {
+ const ActionTuple &AT = Actions[A];
+ if (AT.size() > 1)
+ OS << "std::make_tuple(";
+ ListSeparator LS;
+ for (const auto &SingleAction : AT) {
+ OS << LS;
+ if (const auto *R = std::get_if<Record *>(&SingleAction))
+ OS << (*R)->getName();
+ else if (const auto *S = std::get_if<std::string>(&SingleAction))
+ OS << '"' << *S << '"';
+ else
+ OS << std::get<unsigned>(SingleAction);
+ }
+ if (AT.size() > 1)
+ OS << ")";
+}
+
+namespace llvm {
+
+void EmitAutomata(RecordKeeper &RK, raw_ostream &OS) {
+ AutomatonEmitter(RK).run(OS);
+}
+
+} // namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/DFAEmitter.h b/contrib/libs/llvm16/utils/TableGen/DFAEmitter.h
new file mode 100644
index 0000000000..44e5d97d54
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/DFAEmitter.h
@@ -0,0 +1,107 @@
+//===--------------------- DfaEmitter.h -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Defines a generic automaton builder. This takes a set of transitions and
+// states that represent a nondeterministic finite state automaton (NFA) and
+// emits a determinized DFA in a form that include/llvm/Support/Automaton.h can
+// drive.
+//
+// See file llvm/TableGen/Automaton.td for the TableGen API definition.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_DFAEMITTER_H
+#define LLVM_UTILS_TABLEGEN_DFAEMITTER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/UniqueVector.h"
+#include <map>
+#include <set>
+
+namespace llvm {
+
+class raw_ostream;
+class StringRef;
+
+/// Construct a deterministic finite state automaton from possible
+/// nondeterministic state and transition data.
+///
+/// The state type is a 64-bit unsigned integer. The generated automaton is
+/// invariant to the sparsity of the state representation - its size is only
+/// a function of the cardinality of the set of states.
+///
+/// The inputs to this emitter are considered to define a nondeterministic
+/// finite state automaton (NFA). This is then converted to a DFA during
+/// emission. The emitted tables can be used to by
+/// include/llvm/Support/Automaton.h.
+class DfaEmitter {
+public:
+ // The type of an NFA state. The initial state is always zero.
+ using state_type = uint64_t;
+ // The type of an action.
+ using action_type = uint64_t;
+
+ DfaEmitter() = default;
+ virtual ~DfaEmitter() = default;
+
+ void addTransition(state_type From, state_type To, action_type A);
+ void emit(StringRef Name, raw_ostream &OS);
+
+protected:
+ /// Emit the C++ type of an action to OS.
+ virtual void printActionType(raw_ostream &OS);
+ /// Emit the C++ value of an action A to OS.
+ virtual void printActionValue(action_type A, raw_ostream &OS);
+
+private:
+ /// The state type of deterministic states. These are only used internally to
+ /// this class. This is an ID into the DfaStates UniqueVector.
+ using dfa_state_type = unsigned;
+
+ /// The actual representation of a DFA state, which is a union of one or more
+ /// NFA states.
+ using DfaState = SmallVector<state_type, 4>;
+
+ /// A DFA transition consists of a set of NFA states transitioning to a
+ /// new set of NFA states. The DfaTransitionInfo tracks, for every
+ /// transitioned-from NFA state, a set of valid transitioned-to states.
+ ///
+ /// Emission of this transition relation allows algorithmic determination of
+ /// the possible candidate NFA paths taken under a given input sequence to
+ /// reach a given DFA state.
+ using DfaTransitionInfo = SmallVector<std::pair<state_type, state_type>, 4>;
+
+ /// The set of all possible actions.
+ std::set<action_type> Actions;
+
+ /// The set of nondeterministic transitions. A state-action pair can
+ /// transition to multiple target states.
+ std::map<std::pair<state_type, action_type>, std::vector<state_type>>
+ NfaTransitions;
+ std::set<state_type> NfaStates;
+ unsigned NumNfaTransitions = 0;
+
+ /// The set of deterministic states. DfaStates.getId(DfaState) returns an ID,
+ /// which is dfa_state_type. Note that because UniqueVector reserves state
+ /// zero, the initial DFA state is always 1.
+ UniqueVector<DfaState> DfaStates;
+ /// The set of deterministic transitions. A state-action pair has only a
+ /// single target state.
+ std::map<std::pair<dfa_state_type, action_type>,
+ std::pair<dfa_state_type, DfaTransitionInfo>>
+ DfaTransitions;
+
+ /// Visit all NFA states and construct the DFA.
+ void constructDfa();
+ /// Visit a single DFA state and construct all possible transitions to new DFA
+ /// states.
+ void visitDfaState(const DfaState &DS);
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/libs/llvm16/utils/TableGen/DFAPacketizerEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/DFAPacketizerEmitter.cpp
new file mode 100644
index 0000000000..6704d747f7
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/DFAPacketizerEmitter.cpp
@@ -0,0 +1,362 @@
+//===- DFAPacketizerEmitter.cpp - Packetization DFA for a VLIW machine ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This class parses the Schedule.td file and produces an API that can be used
+// to reason about whether an instruction can be added to a packet on a VLIW
+// architecture. The class internally generates a deterministic finite
+// automaton (DFA) that models all possible mappings of machine instructions
+// to functional units as instructions are added to a packet.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenSchedule.h"
+#include "CodeGenTarget.h"
+#include "DFAEmitter.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include <cassert>
+#include <cstdint>
+#include <map>
+#include <set>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#define DEBUG_TYPE "dfa-emitter"
+
+using namespace llvm;
+
+// We use a uint64_t to represent a resource bitmask.
+#define DFA_MAX_RESOURCES 64
+
+namespace {
+using ResourceVector = SmallVector<uint64_t, 4>;
+
+struct ScheduleClass {
+ /// The parent itinerary index (processor model ID).
+ unsigned ItineraryID;
+
+ /// Index within this itinerary of the schedule class.
+ unsigned Idx;
+
+ /// The index within the uniqued set of required resources of Resources.
+ unsigned ResourcesIdx;
+
+ /// Conjunctive list of resource requirements:
+ /// {a|b, b|c} => (a OR b) AND (b or c).
+ /// Resources are unique across all itineraries.
+ ResourceVector Resources;
+};
+
+// Generates and prints out the DFA for resource tracking.
+class DFAPacketizerEmitter {
+private:
+ std::string TargetName;
+ RecordKeeper &Records;
+
+ UniqueVector<ResourceVector> UniqueResources;
+ std::vector<ScheduleClass> ScheduleClasses;
+ std::map<std::string, uint64_t> FUNameToBitsMap;
+ std::map<unsigned, uint64_t> ComboBitToBitsMap;
+
+public:
+ DFAPacketizerEmitter(RecordKeeper &R);
+
+ // Construct a map of function unit names to bits.
+ int collectAllFuncUnits(
+ ArrayRef<const CodeGenProcModel *> ProcModels);
+
+ // Construct a map from a combo function unit bit to the bits of all included
+ // functional units.
+ int collectAllComboFuncs(ArrayRef<Record *> ComboFuncList);
+
+ ResourceVector getResourcesForItinerary(Record *Itinerary);
+ void createScheduleClasses(unsigned ItineraryIdx, const RecVec &Itineraries);
+
+ // Emit code for a subset of itineraries.
+ void emitForItineraries(raw_ostream &OS,
+ std::vector<const CodeGenProcModel *> &ProcItinList,
+ std::string DFAName);
+
+ void run(raw_ostream &OS);
+};
+} // end anonymous namespace
+
+DFAPacketizerEmitter::DFAPacketizerEmitter(RecordKeeper &R)
+ : TargetName(std::string(CodeGenTarget(R).getName())), Records(R) {}
+
+int DFAPacketizerEmitter::collectAllFuncUnits(
+ ArrayRef<const CodeGenProcModel *> ProcModels) {
+ LLVM_DEBUG(dbgs() << "-------------------------------------------------------"
+ "----------------------\n");
+ LLVM_DEBUG(dbgs() << "collectAllFuncUnits");
+ LLVM_DEBUG(dbgs() << " (" << ProcModels.size() << " itineraries)\n");
+
+ std::set<Record *> ProcItinList;
+ for (const CodeGenProcModel *Model : ProcModels)
+ ProcItinList.insert(Model->ItinsDef);
+
+ int totalFUs = 0;
+ // Parse functional units for all the itineraries.
+ for (Record *Proc : ProcItinList) {
+ std::vector<Record *> FUs = Proc->getValueAsListOfDefs("FU");
+
+ LLVM_DEBUG(dbgs() << " FU:"
+ << " (" << FUs.size() << " FUs) " << Proc->getName());
+
+ // Convert macros to bits for each stage.
+ unsigned numFUs = FUs.size();
+ for (unsigned j = 0; j < numFUs; ++j) {
+ assert((j < DFA_MAX_RESOURCES) &&
+ "Exceeded maximum number of representable resources");
+ uint64_t FuncResources = 1ULL << j;
+ FUNameToBitsMap[std::string(FUs[j]->getName())] = FuncResources;
+ LLVM_DEBUG(dbgs() << " " << FUs[j]->getName() << ":0x"
+ << Twine::utohexstr(FuncResources));
+ }
+ totalFUs += numFUs;
+ LLVM_DEBUG(dbgs() << "\n");
+ }
+ return totalFUs;
+}
+
+int DFAPacketizerEmitter::collectAllComboFuncs(ArrayRef<Record *> ComboFuncList) {
+ LLVM_DEBUG(dbgs() << "-------------------------------------------------------"
+ "----------------------\n");
+ LLVM_DEBUG(dbgs() << "collectAllComboFuncs");
+ LLVM_DEBUG(dbgs() << " (" << ComboFuncList.size() << " sets)\n");
+
+ int numCombos = 0;
+ for (unsigned i = 0, N = ComboFuncList.size(); i < N; ++i) {
+ Record *Func = ComboFuncList[i];
+ std::vector<Record *> FUs = Func->getValueAsListOfDefs("CFD");
+
+ LLVM_DEBUG(dbgs() << " CFD:" << i << " (" << FUs.size() << " combo FUs) "
+ << Func->getName() << "\n");
+
+ // Convert macros to bits for each stage.
+ for (unsigned j = 0, N = FUs.size(); j < N; ++j) {
+ assert((j < DFA_MAX_RESOURCES) &&
+ "Exceeded maximum number of DFA resources");
+ Record *FuncData = FUs[j];
+ Record *ComboFunc = FuncData->getValueAsDef("TheComboFunc");
+ const std::vector<Record *> &FuncList =
+ FuncData->getValueAsListOfDefs("FuncList");
+ const std::string &ComboFuncName = std::string(ComboFunc->getName());
+ uint64_t ComboBit = FUNameToBitsMap[ComboFuncName];
+ uint64_t ComboResources = ComboBit;
+ LLVM_DEBUG(dbgs() << " combo: " << ComboFuncName << ":0x"
+ << Twine::utohexstr(ComboResources) << "\n");
+ for (auto *K : FuncList) {
+ std::string FuncName = std::string(K->getName());
+ uint64_t FuncResources = FUNameToBitsMap[FuncName];
+ LLVM_DEBUG(dbgs() << " " << FuncName << ":0x"
+ << Twine::utohexstr(FuncResources) << "\n");
+ ComboResources |= FuncResources;
+ }
+ ComboBitToBitsMap[ComboBit] = ComboResources;
+ numCombos++;
+ LLVM_DEBUG(dbgs() << " => combo bits: " << ComboFuncName << ":0x"
+ << Twine::utohexstr(ComboBit) << " = 0x"
+ << Twine::utohexstr(ComboResources) << "\n");
+ }
+ }
+ return numCombos;
+}
+
+ResourceVector
+DFAPacketizerEmitter::getResourcesForItinerary(Record *Itinerary) {
+ ResourceVector Resources;
+ assert(Itinerary);
+ for (Record *StageDef : Itinerary->getValueAsListOfDefs("Stages")) {
+ uint64_t StageResources = 0;
+ for (Record *Unit : StageDef->getValueAsListOfDefs("Units")) {
+ StageResources |= FUNameToBitsMap[std::string(Unit->getName())];
+ }
+ if (StageResources != 0)
+ Resources.push_back(StageResources);
+ }
+ return Resources;
+}
+
+void DFAPacketizerEmitter::createScheduleClasses(unsigned ItineraryIdx,
+ const RecVec &Itineraries) {
+ unsigned Idx = 0;
+ for (Record *Itinerary : Itineraries) {
+ if (!Itinerary) {
+ ScheduleClasses.push_back({ItineraryIdx, Idx++, 0, ResourceVector{}});
+ continue;
+ }
+ ResourceVector Resources = getResourcesForItinerary(Itinerary);
+ ScheduleClasses.push_back(
+ {ItineraryIdx, Idx++, UniqueResources.insert(Resources), Resources});
+ }
+}
+
+//
+// Run the worklist algorithm to generate the DFA.
+//
+void DFAPacketizerEmitter::run(raw_ostream &OS) {
+ OS << "\n"
+ << "#include \"llvm/CodeGen/DFAPacketizer.h\"\n";
+ OS << "namespace llvm {\n";
+
+ CodeGenTarget CGT(Records);
+ CodeGenSchedModels CGS(Records, CGT);
+
+ std::unordered_map<std::string, std::vector<const CodeGenProcModel *>>
+ ItinsByNamespace;
+ for (const CodeGenProcModel &ProcModel : CGS.procModels()) {
+ if (ProcModel.hasItineraries()) {
+ auto NS = ProcModel.ItinsDef->getValueAsString("PacketizerNamespace");
+ ItinsByNamespace[std::string(NS)].push_back(&ProcModel);
+ }
+ }
+
+ for (auto &KV : ItinsByNamespace)
+ emitForItineraries(OS, KV.second, KV.first);
+ OS << "} // end namespace llvm\n";
+}
+
+void DFAPacketizerEmitter::emitForItineraries(
+ raw_ostream &OS, std::vector<const CodeGenProcModel *> &ProcModels,
+ std::string DFAName) {
+ OS << "} // end namespace llvm\n\n";
+ OS << "namespace {\n";
+ collectAllFuncUnits(ProcModels);
+ collectAllComboFuncs(Records.getAllDerivedDefinitions("ComboFuncUnits"));
+
+ // Collect the itineraries.
+ DenseMap<const CodeGenProcModel *, unsigned> ProcModelStartIdx;
+ for (const CodeGenProcModel *Model : ProcModels) {
+ assert(Model->hasItineraries());
+ ProcModelStartIdx[Model] = ScheduleClasses.size();
+ createScheduleClasses(Model->Index, Model->ItinDefList);
+ }
+
+ // Output the mapping from ScheduleClass to ResourcesIdx.
+ unsigned Idx = 0;
+ OS << "constexpr unsigned " << TargetName << DFAName
+ << "ResourceIndices[] = {";
+ for (const ScheduleClass &SC : ScheduleClasses) {
+ if (Idx++ % 32 == 0)
+ OS << "\n ";
+ OS << SC.ResourcesIdx << ", ";
+ }
+ OS << "\n};\n\n";
+
+ // And the mapping from Itinerary index into the previous table.
+ OS << "constexpr unsigned " << TargetName << DFAName
+ << "ProcResourceIndexStart[] = {\n";
+ OS << " 0, // NoSchedModel\n";
+ for (const CodeGenProcModel *Model : ProcModels) {
+ OS << " " << ProcModelStartIdx[Model] << ", // " << Model->ModelName
+ << "\n";
+ }
+ OS << " " << ScheduleClasses.size() << "\n};\n\n";
+
+ // The type of a state in the nondeterministic automaton we're defining.
+ using NfaStateTy = uint64_t;
+
+ // Given a resource state, return all resource states by applying
+ // InsnClass.
+ auto applyInsnClass = [&](const ResourceVector &InsnClass,
+ NfaStateTy State) -> std::deque<NfaStateTy> {
+ std::deque<NfaStateTy> V(1, State);
+ // Apply every stage in the class individually.
+ for (NfaStateTy Stage : InsnClass) {
+ // Apply this stage to every existing member of V in turn.
+ size_t Sz = V.size();
+ for (unsigned I = 0; I < Sz; ++I) {
+ NfaStateTy S = V.front();
+ V.pop_front();
+
+ // For this stage, state combination, try all possible resources.
+ for (unsigned J = 0; J < DFA_MAX_RESOURCES; ++J) {
+ NfaStateTy ResourceMask = 1ULL << J;
+ if ((ResourceMask & Stage) == 0)
+ // This resource isn't required by this stage.
+ continue;
+ NfaStateTy Combo = ComboBitToBitsMap[ResourceMask];
+ if (Combo && ((~S & Combo) != Combo))
+ // This combo units bits are not available.
+ continue;
+ NfaStateTy ResultingResourceState = S | ResourceMask | Combo;
+ if (ResultingResourceState == S)
+ continue;
+ V.push_back(ResultingResourceState);
+ }
+ }
+ }
+ return V;
+ };
+
+ // Given a resource state, return a quick (conservative) guess as to whether
+ // InsnClass can be applied. This is a filter for the more heavyweight
+ // applyInsnClass.
+ auto canApplyInsnClass = [](const ResourceVector &InsnClass,
+ NfaStateTy State) -> bool {
+ for (NfaStateTy Resources : InsnClass) {
+ if ((State | Resources) == State)
+ return false;
+ }
+ return true;
+ };
+
+ DfaEmitter Emitter;
+ std::deque<NfaStateTy> Worklist(1, 0);
+ std::set<NfaStateTy> SeenStates;
+ SeenStates.insert(Worklist.front());
+ while (!Worklist.empty()) {
+ NfaStateTy State = Worklist.front();
+ Worklist.pop_front();
+ for (const ResourceVector &Resources : UniqueResources) {
+ if (!canApplyInsnClass(Resources, State))
+ continue;
+ unsigned ResourcesID = UniqueResources.idFor(Resources);
+ for (uint64_t NewState : applyInsnClass(Resources, State)) {
+ if (SeenStates.emplace(NewState).second)
+ Worklist.emplace_back(NewState);
+ Emitter.addTransition(State, NewState, ResourcesID);
+ }
+ }
+ }
+
+ std::string TargetAndDFAName = TargetName + DFAName;
+ Emitter.emit(TargetAndDFAName, OS);
+ OS << "} // end anonymous namespace\n\n";
+
+ std::string SubTargetClassName = TargetName + "GenSubtargetInfo";
+ OS << "namespace llvm {\n";
+ OS << "DFAPacketizer *" << SubTargetClassName << "::"
+ << "create" << DFAName
+ << "DFAPacketizer(const InstrItineraryData *IID) const {\n"
+ << " static Automaton<uint64_t> A(ArrayRef<" << TargetAndDFAName
+ << "Transition>(" << TargetAndDFAName << "Transitions), "
+ << TargetAndDFAName << "TransitionInfo);\n"
+ << " unsigned ProcResIdxStart = " << TargetAndDFAName
+ << "ProcResourceIndexStart[IID->SchedModel.ProcID];\n"
+ << " unsigned ProcResIdxNum = " << TargetAndDFAName
+ << "ProcResourceIndexStart[IID->SchedModel.ProcID + 1] - "
+ "ProcResIdxStart;\n"
+ << " return new DFAPacketizer(IID, A, {&" << TargetAndDFAName
+ << "ResourceIndices[ProcResIdxStart], ProcResIdxNum});\n"
+ << "\n}\n\n";
+}
+
+namespace llvm {
+
+void EmitDFAPacketizer(RecordKeeper &RK, raw_ostream &OS) {
+ emitSourceFileHeader("Target DFA Packetizer Tables", OS);
+ DFAPacketizerEmitter(RK).run(OS);
+}
+
+} // end namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/DXILEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/DXILEmitter.cpp
new file mode 100644
index 0000000000..44c1df3e9a
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/DXILEmitter.cpp
@@ -0,0 +1,442 @@
+//===- DXILEmitter.cpp - DXIL operation Emitter ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// DXILEmitter uses the descriptions of DXIL operation to construct enum and
+// helper functions for DXIL operation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SequenceToOffsetTable.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/DXILOperationCommon.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+
+using namespace llvm;
+using namespace llvm::dxil;
+
+namespace {
+
+struct DXILShaderModel {
+ int Major;
+ int Minor;
+};
+
+struct DXILParam {
+ int Pos; // position in parameter list
+ ParameterKind Kind;
+ StringRef Name; // short, unique name
+ StringRef Doc; // the documentation description of this parameter
+ bool IsConst; // whether this argument requires a constant value in the IR
+ StringRef EnumName; // the name of the enum type if applicable
+ int MaxValue; // the maximum value for this parameter if applicable
+ DXILParam(const Record *R);
+};
+
+struct DXILOperationData {
+ StringRef Name; // short, unique name
+
+ StringRef DXILOp; // name of DXIL operation
+ int DXILOpID; // ID of DXIL operation
+ StringRef DXILClass; // name of the opcode class
+ StringRef Category; // classification for this instruction
+ StringRef Doc; // the documentation description of this instruction
+
+ SmallVector<DXILParam> Params; // the operands that this instruction takes
+ StringRef OverloadTypes; // overload types if applicable
+ StringRef FnAttr; // attribute shorthands: rn=does not access
+ // memory,ro=only reads from memory
+ StringRef Intrinsic; // The llvm intrinsic map to DXILOp. Default is "" which
+ // means no map exist
+ bool IsDeriv; // whether this is some kind of derivative
+ bool IsGradient; // whether this requires a gradient calculation
+ bool IsFeedback; // whether this is a sampler feedback op
+ bool IsWave; // whether this requires in-wave, cross-lane functionality
+ bool RequiresUniformInputs; // whether this operation requires that all
+ // of its inputs are uniform across the wave
+ SmallVector<StringRef, 4>
+ ShaderStages; // shader stages to which this applies, empty for all.
+ DXILShaderModel ShaderModel; // minimum shader model required
+ DXILShaderModel ShaderModelTranslated; // minimum shader model required with
+ // translation by linker
+ int OverloadParamIndex; // parameter index which control the overload.
+ // When < 0, should be only 1 overload type.
+ SmallVector<StringRef, 4> counters; // counters for this inst.
+ DXILOperationData(const Record *R) {
+ Name = R->getValueAsString("name");
+ DXILOp = R->getValueAsString("dxil_op");
+ DXILOpID = R->getValueAsInt("dxil_opid");
+ DXILClass = R->getValueAsDef("op_class")->getValueAsString("name");
+ Category = R->getValueAsDef("category")->getValueAsString("name");
+
+ if (R->getValue("llvm_intrinsic")) {
+ auto *IntrinsicDef = R->getValueAsDef("llvm_intrinsic");
+ auto DefName = IntrinsicDef->getName();
+ assert(DefName.startswith("int_") && "invalid intrinsic name");
+ // Remove the int_ from intrinsic name.
+ Intrinsic = DefName.substr(4);
+ }
+
+ Doc = R->getValueAsString("doc");
+
+ ListInit *ParamList = R->getValueAsListInit("ops");
+ OverloadParamIndex = -1;
+ for (unsigned I = 0; I < ParamList->size(); ++I) {
+ Record *Param = ParamList->getElementAsRecord(I);
+ Params.emplace_back(DXILParam(Param));
+ auto &CurParam = Params.back();
+ if (CurParam.Kind >= ParameterKind::OVERLOAD)
+ OverloadParamIndex = I;
+ }
+ OverloadTypes = R->getValueAsString("oload_types");
+ FnAttr = R->getValueAsString("fn_attr");
+ }
+};
+} // end anonymous namespace
+
+DXILParam::DXILParam(const Record *R) {
+ Name = R->getValueAsString("name");
+ Pos = R->getValueAsInt("pos");
+ Kind = parameterTypeNameToKind(R->getValueAsString("llvm_type"));
+ if (R->getValue("doc"))
+ Doc = R->getValueAsString("doc");
+ IsConst = R->getValueAsBit("is_const");
+ EnumName = R->getValueAsString("enum_name");
+ MaxValue = R->getValueAsInt("max_value");
+}
+
+static std::string parameterKindToString(ParameterKind Kind) {
+ switch (Kind) {
+ case ParameterKind::INVALID:
+ return "INVALID";
+ case ParameterKind::VOID:
+ return "VOID";
+ case ParameterKind::HALF:
+ return "HALF";
+ case ParameterKind::FLOAT:
+ return "FLOAT";
+ case ParameterKind::DOUBLE:
+ return "DOUBLE";
+ case ParameterKind::I1:
+ return "I1";
+ case ParameterKind::I8:
+ return "I8";
+ case ParameterKind::I16:
+ return "I16";
+ case ParameterKind::I32:
+ return "I32";
+ case ParameterKind::I64:
+ return "I64";
+ case ParameterKind::OVERLOAD:
+ return "OVERLOAD";
+ case ParameterKind::CBUFFER_RET:
+ return "CBUFFER_RET";
+ case ParameterKind::RESOURCE_RET:
+ return "RESOURCE_RET";
+ case ParameterKind::DXIL_HANDLE:
+ return "DXIL_HANDLE";
+ }
+ llvm_unreachable("Unknown llvm::dxil::ParameterKind enum");
+}
+
+static void emitDXILOpEnum(DXILOperationData &DXILOp, raw_ostream &OS) {
+ // Name = ID, // Doc
+ OS << DXILOp.Name << " = " << DXILOp.DXILOpID << ", // " << DXILOp.Doc
+ << "\n";
+}
+
+static std::string buildCategoryStr(StringSet<> &Cetegorys) {
+ std::string Str;
+ raw_string_ostream OS(Str);
+ for (auto &It : Cetegorys) {
+ OS << " " << It.getKey();
+ }
+ return OS.str();
+}
+
+// Emit enum declaration for DXIL.
+static void emitDXILEnums(std::vector<DXILOperationData> &DXILOps,
+ raw_ostream &OS) {
+ // Sort by Category + OpName.
+ llvm::sort(DXILOps, [](DXILOperationData &A, DXILOperationData &B) {
+ // Group by Category first.
+ if (A.Category == B.Category)
+ // Inside same Category, order by OpName.
+ return A.DXILOp < B.DXILOp;
+ else
+ return A.Category < B.Category;
+ });
+
+ OS << "// Enumeration for operations specified by DXIL\n";
+ OS << "enum class OpCode : unsigned {\n";
+
+ StringMap<StringSet<>> ClassMap;
+ StringRef PrevCategory = "";
+ for (auto &DXILOp : DXILOps) {
+ StringRef Category = DXILOp.Category;
+ if (Category != PrevCategory) {
+ OS << "\n// " << Category << "\n";
+ PrevCategory = Category;
+ }
+ emitDXILOpEnum(DXILOp, OS);
+ auto It = ClassMap.find(DXILOp.DXILClass);
+ if (It != ClassMap.end()) {
+ It->second.insert(DXILOp.Category);
+ } else {
+ ClassMap[DXILOp.DXILClass].insert(DXILOp.Category);
+ }
+ }
+
+ OS << "\n};\n\n";
+
+ std::vector<std::pair<std::string, std::string>> ClassVec;
+ for (auto &It : ClassMap) {
+ ClassVec.emplace_back(
+ std::make_pair(It.getKey().str(), buildCategoryStr(It.second)));
+ }
+ // Sort by Category + ClassName.
+ llvm::sort(ClassVec, [](std::pair<std::string, std::string> &A,
+ std::pair<std::string, std::string> &B) {
+ StringRef ClassA = A.first;
+ StringRef CategoryA = A.second;
+ StringRef ClassB = B.first;
+ StringRef CategoryB = B.second;
+ // Group by Category first.
+ if (CategoryA == CategoryB)
+ // Inside same Category, order by ClassName.
+ return ClassA < ClassB;
+ else
+ return CategoryA < CategoryB;
+ });
+
+ OS << "// Groups for DXIL operations with equivalent function templates\n";
+ OS << "enum class OpCodeClass : unsigned {\n";
+ PrevCategory = "";
+ for (auto &It : ClassVec) {
+
+ StringRef Category = It.second;
+ if (Category != PrevCategory) {
+ OS << "\n// " << Category << "\n";
+ PrevCategory = Category;
+ }
+ StringRef Name = It.first;
+ OS << Name << ",\n";
+ }
+ OS << "\n};\n\n";
+}
+
+// Emit map from llvm intrinsic to DXIL operation.
+static void emitDXILIntrinsicMap(std::vector<DXILOperationData> &DXILOps,
+ raw_ostream &OS) {
+ OS << "\n";
+ // FIXME: use array instead of SmallDenseMap.
+ OS << "static const SmallDenseMap<Intrinsic::ID, dxil::OpCode> LowerMap = "
+ "{\n";
+ for (auto &DXILOp : DXILOps) {
+ if (DXILOp.Intrinsic.empty())
+ continue;
+ // {Intrinsic::sin, dxil::OpCode::Sin},
+ OS << " { Intrinsic::" << DXILOp.Intrinsic
+ << ", dxil::OpCode::" << DXILOp.DXILOp << "},\n";
+ }
+ OS << "};\n";
+ OS << "\n";
+}
+
+static std::string emitDXILOperationFnAttr(StringRef FnAttr) {
+ return StringSwitch<std::string>(FnAttr)
+ .Case("rn", "Attribute::ReadNone")
+ .Case("ro", "Attribute::ReadOnly")
+ .Default("Attribute::None");
+}
+
+static std::string getOverloadKind(StringRef Overload) {
+ return StringSwitch<std::string>(Overload)
+ .Case("half", "OverloadKind::HALF")
+ .Case("float", "OverloadKind::FLOAT")
+ .Case("double", "OverloadKind::DOUBLE")
+ .Case("i1", "OverloadKind::I1")
+ .Case("i16", "OverloadKind::I16")
+ .Case("i32", "OverloadKind::I32")
+ .Case("i64", "OverloadKind::I64")
+ .Case("udt", "OverloadKind::UserDefineType")
+ .Case("obj", "OverloadKind::ObjectType")
+ .Default("OverloadKind::VOID");
+}
+
+static std::string getDXILOperationOverload(StringRef Overloads) {
+ SmallVector<StringRef> OverloadStrs;
+ Overloads.split(OverloadStrs, ';', /*MaxSplit*/ -1, /*KeepEmpty*/ false);
+ // Format is: OverloadKind::FLOAT | OverloadKind::HALF
+ assert(!OverloadStrs.empty() && "Invalid overloads");
+ auto It = OverloadStrs.begin();
+ std::string Result;
+ raw_string_ostream OS(Result);
+ OS << getOverloadKind(*It);
+ for (++It; It != OverloadStrs.end(); ++It) {
+ OS << " | " << getOverloadKind(*It);
+ }
+ return OS.str();
+}
+
+static std::string lowerFirstLetter(StringRef Name) {
+ if (Name.empty())
+ return "";
+
+ std::string LowerName = Name.str();
+ LowerName[0] = llvm::toLower(Name[0]);
+ return LowerName;
+}
+
+static std::string getDXILOpClassName(StringRef DXILOpClass) {
+ // Lower first letter expect for special case.
+ return StringSwitch<std::string>(DXILOpClass)
+ .Case("CBufferLoad", "cbufferLoad")
+ .Case("CBufferLoadLegacy", "cbufferLoadLegacy")
+ .Case("GSInstanceID", "gsInstanceID")
+ .Default(lowerFirstLetter(DXILOpClass));
+}
+
+static void emitDXILOperationTable(std::vector<DXILOperationData> &DXILOps,
+ raw_ostream &OS) {
+ // Sort by DXILOpID.
+ llvm::sort(DXILOps, [](DXILOperationData &A, DXILOperationData &B) {
+ return A.DXILOpID < B.DXILOpID;
+ });
+
+ // Collect Names.
+ SequenceToOffsetTable<std::string> OpClassStrings;
+ SequenceToOffsetTable<std::string> OpStrings;
+ SequenceToOffsetTable<SmallVector<ParameterKind>> Parameters;
+
+ StringMap<SmallVector<ParameterKind>> ParameterMap;
+ StringSet<> ClassSet;
+ for (auto &DXILOp : DXILOps) {
+ OpStrings.add(DXILOp.DXILOp.str());
+
+ if (ClassSet.find(DXILOp.DXILClass) != ClassSet.end())
+ continue;
+ ClassSet.insert(DXILOp.DXILClass);
+ OpClassStrings.add(getDXILOpClassName(DXILOp.DXILClass));
+ SmallVector<ParameterKind> ParamKindVec;
+ for (auto &Param : DXILOp.Params) {
+ ParamKindVec.emplace_back(Param.Kind);
+ }
+ ParameterMap[DXILOp.DXILClass] = ParamKindVec;
+ Parameters.add(ParamKindVec);
+ }
+
+ // Layout names.
+ OpStrings.layout();
+ OpClassStrings.layout();
+ Parameters.layout();
+
+ // Emit the DXIL operation table.
+ //{dxil::OpCode::Sin, OpCodeNameIndex, OpCodeClass::Unary,
+ // OpCodeClassNameIndex,
+ // OverloadKind::FLOAT | OverloadKind::HALF, Attribute::AttrKind::ReadNone, 0,
+ // 3, ParameterTableOffset},
+ OS << "static const OpCodeProperty *getOpCodeProperty(dxil::OpCode DXILOp) "
+ "{\n";
+
+ OS << " static const OpCodeProperty OpCodeProps[] = {\n";
+ for (auto &DXILOp : DXILOps) {
+ OS << " { dxil::OpCode::" << DXILOp.DXILOp << ", "
+ << OpStrings.get(DXILOp.DXILOp.str())
+ << ", OpCodeClass::" << DXILOp.DXILClass << ", "
+ << OpClassStrings.get(getDXILOpClassName(DXILOp.DXILClass)) << ", "
+ << getDXILOperationOverload(DXILOp.OverloadTypes) << ", "
+ << emitDXILOperationFnAttr(DXILOp.FnAttr) << ", "
+ << DXILOp.OverloadParamIndex << ", " << DXILOp.Params.size() << ", "
+ << Parameters.get(ParameterMap[DXILOp.DXILClass]) << " },\n";
+ }
+ OS << " };\n";
+
+ OS << " // FIXME: change search to indexing with\n";
+ OS << " // DXILOp once all DXIL op is added.\n";
+ OS << " OpCodeProperty TmpProp;\n";
+ OS << " TmpProp.OpCode = DXILOp;\n";
+ OS << " const OpCodeProperty *Prop =\n";
+ OS << " llvm::lower_bound(OpCodeProps, TmpProp,\n";
+ OS << " [](const OpCodeProperty &A, const "
+ "OpCodeProperty &B) {\n";
+ OS << " return A.OpCode < B.OpCode;\n";
+ OS << " });\n";
+ OS << " assert(Prop && \"fail to find OpCodeProperty\");\n";
+ OS << " return Prop;\n";
+ OS << "}\n\n";
+
+ // Emit the string tables.
+ OS << "static const char *getOpCodeName(dxil::OpCode DXILOp) {\n\n";
+
+ OpStrings.emitStringLiteralDef(OS,
+ " static const char DXILOpCodeNameTable[]");
+
+ OS << " auto *Prop = getOpCodeProperty(DXILOp);\n";
+ OS << " unsigned Index = Prop->OpCodeNameOffset;\n";
+ OS << " return DXILOpCodeNameTable + Index;\n";
+ OS << "}\n\n";
+
+ OS << "static const char *getOpCodeClassName(const OpCodeProperty &Prop) "
+ "{\n\n";
+
+ OpClassStrings.emitStringLiteralDef(
+ OS, " static const char DXILOpCodeClassNameTable[]");
+
+ OS << " unsigned Index = Prop.OpCodeClassNameOffset;\n";
+ OS << " return DXILOpCodeClassNameTable + Index;\n";
+ OS << "}\n ";
+
+ OS << "static const ParameterKind *getOpCodeParameterKind(const "
+ "OpCodeProperty &Prop) "
+ "{\n\n";
+ OS << " static const ParameterKind DXILOpParameterKindTable[] = {\n";
+ Parameters.emit(
+ OS,
+ [](raw_ostream &ParamOS, ParameterKind Kind) {
+ ParamOS << "ParameterKind::" << parameterKindToString(Kind);
+ },
+ "ParameterKind::INVALID");
+ OS << " };\n\n";
+ OS << " unsigned Index = Prop.ParameterTableOffset;\n";
+ OS << " return DXILOpParameterKindTable + Index;\n";
+ OS << "}\n ";
+}
+
+namespace llvm {
+
+void EmitDXILOperation(RecordKeeper &Records, raw_ostream &OS) {
+ std::vector<Record *> Ops = Records.getAllDerivedDefinitions("dxil_op");
+ OS << "// Generated code, do not edit.\n";
+ OS << "\n";
+
+ std::vector<DXILOperationData> DXILOps;
+ DXILOps.reserve(Ops.size());
+ for (auto *Record : Ops) {
+ DXILOps.emplace_back(DXILOperationData(Record));
+ }
+
+ OS << "#ifdef DXIL_OP_ENUM\n";
+ emitDXILEnums(DXILOps, OS);
+ OS << "#endif\n\n";
+
+ OS << "#ifdef DXIL_OP_INTRINSIC_MAP\n";
+ emitDXILIntrinsicMap(DXILOps, OS);
+ OS << "#endif\n\n";
+
+ OS << "#ifdef DXIL_OP_OPERATION_TABLE\n";
+ emitDXILOperationTable(DXILOps, OS);
+ OS << "#endif\n\n";
+
+ OS << "\n";
+}
+
+} // namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/DecoderEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/DecoderEmitter.cpp
new file mode 100644
index 0000000000..8f81674437
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/DecoderEmitter.cpp
@@ -0,0 +1,2773 @@
+//===---------------- DecoderEmitter.cpp - Decoder Generator --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// It contains the tablegen backend that emits the decoder functions for
+// targets with fixed/variable length instruction set.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenInstruction.h"
+#include "CodeGenTarget.h"
+#include "InfoByHwMode.h"
+#include "VarLenCodeEmitterGen.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/CachedHashString.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCDecoderOps.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/LEB128.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "decoder-emitter"
+
+namespace {
+
+STATISTIC(NumEncodings, "Number of encodings considered");
+STATISTIC(NumEncodingsLackingDisasm, "Number of encodings without disassembler info");
+STATISTIC(NumInstructions, "Number of instructions considered");
+STATISTIC(NumEncodingsSupported, "Number of encodings supported");
+STATISTIC(NumEncodingsOmitted, "Number of encodings omitted");
+
+struct EncodingField {
+ unsigned Base, Width, Offset;
+ EncodingField(unsigned B, unsigned W, unsigned O)
+ : Base(B), Width(W), Offset(O) { }
+};
+
+struct OperandInfo {
+ std::vector<EncodingField> Fields;
+ std::string Decoder;
+ bool HasCompleteDecoder;
+ uint64_t InitValue;
+
+ OperandInfo(std::string D, bool HCD)
+ : Decoder(std::move(D)), HasCompleteDecoder(HCD), InitValue(0) {}
+
+ void addField(unsigned Base, unsigned Width, unsigned Offset) {
+ Fields.push_back(EncodingField(Base, Width, Offset));
+ }
+
+ unsigned numFields() const { return Fields.size(); }
+
+ typedef std::vector<EncodingField>::const_iterator const_iterator;
+
+ const_iterator begin() const { return Fields.begin(); }
+ const_iterator end() const { return Fields.end(); }
+};
+
+typedef std::vector<uint8_t> DecoderTable;
+typedef uint32_t DecoderFixup;
+typedef std::vector<DecoderFixup> FixupList;
+typedef std::vector<FixupList> FixupScopeList;
+typedef SmallSetVector<CachedHashString, 16> PredicateSet;
+typedef SmallSetVector<CachedHashString, 16> DecoderSet;
+struct DecoderTableInfo {
+ DecoderTable Table;
+ FixupScopeList FixupStack;
+ PredicateSet Predicates;
+ DecoderSet Decoders;
+};
+
+struct EncodingAndInst {
+ const Record *EncodingDef;
+ const CodeGenInstruction *Inst;
+ StringRef HwModeName;
+
+ EncodingAndInst(const Record *EncodingDef, const CodeGenInstruction *Inst,
+ StringRef HwModeName = "")
+ : EncodingDef(EncodingDef), Inst(Inst), HwModeName(HwModeName) {}
+};
+
+struct EncodingIDAndOpcode {
+ unsigned EncodingID;
+ unsigned Opcode;
+
+ EncodingIDAndOpcode() : EncodingID(0), Opcode(0) {}
+ EncodingIDAndOpcode(unsigned EncodingID, unsigned Opcode)
+ : EncodingID(EncodingID), Opcode(Opcode) {}
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const EncodingAndInst &Value) {
+ if (Value.EncodingDef != Value.Inst->TheDef)
+ OS << Value.EncodingDef->getName() << ":";
+ OS << Value.Inst->TheDef->getName();
+ return OS;
+}
+
+class DecoderEmitter {
+ RecordKeeper &RK;
+ std::vector<EncodingAndInst> NumberedEncodings;
+
+public:
+ DecoderEmitter(RecordKeeper &R, std::string PredicateNamespace)
+ : RK(R), Target(R), PredicateNamespace(std::move(PredicateNamespace)) {}
+
+ // Emit the decoder state machine table.
+ void emitTable(formatted_raw_ostream &o, DecoderTable &Table,
+ unsigned Indentation, unsigned BitWidth,
+ StringRef Namespace) const;
+ void emitInstrLenTable(formatted_raw_ostream &OS,
+ std::vector<unsigned> &InstrLen) const;
+ void emitPredicateFunction(formatted_raw_ostream &OS,
+ PredicateSet &Predicates,
+ unsigned Indentation) const;
+ void emitDecoderFunction(formatted_raw_ostream &OS,
+ DecoderSet &Decoders,
+ unsigned Indentation) const;
+
+ // run - Output the code emitter
+ void run(raw_ostream &o);
+
+private:
+ CodeGenTarget Target;
+
+public:
+ std::string PredicateNamespace;
+};
+
+} // end anonymous namespace
+
+// The set (BIT_TRUE, BIT_FALSE, BIT_UNSET) represents a ternary logic system
+// for a bit value.
+//
+// BIT_UNFILTERED is used as the init value for a filter position. It is used
+// only for filter processings.
+typedef enum {
+ BIT_TRUE, // '1'
+ BIT_FALSE, // '0'
+ BIT_UNSET, // '?'
+ BIT_UNFILTERED // unfiltered
+} bit_value_t;
+
+static bool ValueSet(bit_value_t V) {
+ return (V == BIT_TRUE || V == BIT_FALSE);
+}
+
+static bool ValueNotSet(bit_value_t V) {
+ return (V == BIT_UNSET);
+}
+
+static int Value(bit_value_t V) {
+ return ValueNotSet(V) ? -1 : (V == BIT_FALSE ? 0 : 1);
+}
+
+static bit_value_t bitFromBits(const BitsInit &bits, unsigned index) {
+ if (BitInit *bit = dyn_cast<BitInit>(bits.getBit(index)))
+ return bit->getValue() ? BIT_TRUE : BIT_FALSE;
+
+ // The bit is uninitialized.
+ return BIT_UNSET;
+}
+
+// Prints the bit value for each position.
+static void dumpBits(raw_ostream &o, const BitsInit &bits) {
+ for (unsigned index = bits.getNumBits(); index > 0; --index) {
+ switch (bitFromBits(bits, index - 1)) {
+ case BIT_TRUE:
+ o << "1";
+ break;
+ case BIT_FALSE:
+ o << "0";
+ break;
+ case BIT_UNSET:
+ o << "_";
+ break;
+ default:
+ llvm_unreachable("unexpected return value from bitFromBits");
+ }
+ }
+}
+
+static BitsInit &getBitsField(const Record &def, StringRef str) {
+ const RecordVal *RV = def.getValue(str);
+ if (BitsInit *Bits = dyn_cast<BitsInit>(RV->getValue()))
+ return *Bits;
+
+ // variable length instruction
+ VarLenInst VLI = VarLenInst(cast<DagInit>(RV->getValue()), RV);
+ SmallVector<Init *, 16> Bits;
+
+ for (auto &SI : VLI) {
+ if (const BitsInit *BI = dyn_cast<BitsInit>(SI.Value)) {
+ for (unsigned Idx = 0U; Idx < BI->getNumBits(); ++Idx) {
+ Bits.push_back(BI->getBit(Idx));
+ }
+ } else if (const BitInit *BI = dyn_cast<BitInit>(SI.Value)) {
+ Bits.push_back(const_cast<BitInit *>(BI));
+ } else {
+ for (unsigned Idx = 0U; Idx < SI.BitWidth; ++Idx)
+ Bits.push_back(UnsetInit::get(def.getRecords()));
+ }
+ }
+
+ return *BitsInit::get(def.getRecords(), Bits);
+}
+
+// Representation of the instruction to work on.
+typedef std::vector<bit_value_t> insn_t;
+
+namespace {
+
+static const uint64_t NO_FIXED_SEGMENTS_SENTINEL = -1ULL;
+
+class FilterChooser;
+
+/// Filter - Filter works with FilterChooser to produce the decoding tree for
+/// the ISA.
+///
+/// It is useful to think of a Filter as governing the switch stmts of the
+/// decoding tree in a certain level. Each case stmt delegates to an inferior
+/// FilterChooser to decide what further decoding logic to employ, or in another
+/// words, what other remaining bits to look at. The FilterChooser eventually
+/// chooses a best Filter to do its job.
+///
+/// This recursive scheme ends when the number of Opcodes assigned to the
+/// FilterChooser becomes 1 or if there is a conflict. A conflict happens when
+/// the Filter/FilterChooser combo does not know how to distinguish among the
+/// Opcodes assigned.
+///
+/// An example of a conflict is
+///
+/// Conflict:
+/// 111101000.00........00010000....
+/// 111101000.00........0001........
+/// 1111010...00........0001........
+/// 1111010...00....................
+/// 1111010.........................
+/// 1111............................
+/// ................................
+/// VST4q8a 111101000_00________00010000____
+/// VST4q8b 111101000_00________00010000____
+///
+/// The Debug output shows the path that the decoding tree follows to reach the
+/// the conclusion that there is a conflict. VST4q8a is a vst4 to double-spaced
+/// even registers, while VST4q8b is a vst4 to double-spaced odd registers.
+///
+/// The encoding info in the .td files does not specify this meta information,
+/// which could have been used by the decoder to resolve the conflict. The
+/// decoder could try to decode the even/odd register numbering and assign to
+/// VST4q8a or VST4q8b, but for the time being, the decoder chooses the "a"
+/// version and return the Opcode since the two have the same Asm format string.
+class Filter {
+protected:
+ const FilterChooser *Owner;// points to the FilterChooser who owns this filter
+ unsigned StartBit; // the starting bit position
+ unsigned NumBits; // number of bits to filter
+ bool Mixed; // a mixed region contains both set and unset bits
+
+ // Map of well-known segment value to the set of uid's with that value.
+ std::map<uint64_t, std::vector<EncodingIDAndOpcode>>
+ FilteredInstructions;
+
+ // Set of uid's with non-constant segment values.
+ std::vector<EncodingIDAndOpcode> VariableInstructions;
+
+ // Map of well-known segment value to its delegate.
+ std::map<uint64_t, std::unique_ptr<const FilterChooser>> FilterChooserMap;
+
+ // Number of instructions which fall under FilteredInstructions category.
+ unsigned NumFiltered;
+
+ // Keeps track of the last opcode in the filtered bucket.
+ EncodingIDAndOpcode LastOpcFiltered;
+
+public:
+ Filter(Filter &&f);
+ Filter(FilterChooser &owner, unsigned startBit, unsigned numBits, bool mixed);
+
+ ~Filter() = default;
+
+ unsigned getNumFiltered() const { return NumFiltered; }
+
+ EncodingIDAndOpcode getSingletonOpc() const {
+ assert(NumFiltered == 1);
+ return LastOpcFiltered;
+ }
+
+ // Return the filter chooser for the group of instructions without constant
+ // segment values.
+ const FilterChooser &getVariableFC() const {
+ assert(NumFiltered == 1);
+ assert(FilterChooserMap.size() == 1);
+ return *(FilterChooserMap.find(NO_FIXED_SEGMENTS_SENTINEL)->second);
+ }
+
+ // Divides the decoding task into sub tasks and delegates them to the
+ // inferior FilterChooser's.
+ //
+ // A special case arises when there's only one entry in the filtered
+ // instructions. In order to unambiguously decode the singleton, we need to
+ // match the remaining undecoded encoding bits against the singleton.
+ void recurse();
+
+ // Emit table entries to decode instructions given a segment or segments of
+ // bits.
+ void emitTableEntry(DecoderTableInfo &TableInfo) const;
+
+ // Returns the number of fanout produced by the filter. More fanout implies
+ // the filter distinguishes more categories of instructions.
+ unsigned usefulness() const;
+}; // end class Filter
+
+} // end anonymous namespace
+
+// These are states of our finite state machines used in FilterChooser's
+// filterProcessor() which produces the filter candidates to use.
+typedef enum {
+ ATTR_NONE,
+ ATTR_FILTERED,
+ ATTR_ALL_SET,
+ ATTR_ALL_UNSET,
+ ATTR_MIXED
+} bitAttr_t;
+
+/// FilterChooser - FilterChooser chooses the best filter among a set of Filters
+/// in order to perform the decoding of instructions at the current level.
+///
+/// Decoding proceeds from the top down. Based on the well-known encoding bits
+/// of instructions available, FilterChooser builds up the possible Filters that
+/// can further the task of decoding by distinguishing among the remaining
+/// candidate instructions.
+///
+/// Once a filter has been chosen, it is called upon to divide the decoding task
+/// into sub-tasks and delegates them to its inferior FilterChoosers for further
+/// processings.
+///
+/// It is useful to think of a Filter as governing the switch stmts of the
+/// decoding tree. And each case is delegated to an inferior FilterChooser to
+/// decide what further remaining bits to look at.
+namespace {
+
+class FilterChooser {
+protected:
+ friend class Filter;
+
+ // Vector of codegen instructions to choose our filter.
+ ArrayRef<EncodingAndInst> AllInstructions;
+
+ // Vector of uid's for this filter chooser to work on.
+ // The first member of the pair is the opcode id being decoded, the second is
+ // the opcode id that should be emitted.
+ const std::vector<EncodingIDAndOpcode> &Opcodes;
+
+ // Lookup table for the operand decoding of instructions.
+ const std::map<unsigned, std::vector<OperandInfo>> &Operands;
+
+ // Vector of candidate filters.
+ std::vector<Filter> Filters;
+
+ // Array of bit values passed down from our parent.
+ // Set to all BIT_UNFILTERED's for Parent == NULL.
+ std::vector<bit_value_t> FilterBitValues;
+
+ // Links to the FilterChooser above us in the decoding tree.
+ const FilterChooser *Parent;
+
+ // Index of the best filter from Filters.
+ int BestIndex;
+
+ // Width of instructions
+ unsigned BitWidth;
+
+ // Parent emitter
+ const DecoderEmitter *Emitter;
+
+public:
+ FilterChooser(ArrayRef<EncodingAndInst> Insts,
+ const std::vector<EncodingIDAndOpcode> &IDs,
+ const std::map<unsigned, std::vector<OperandInfo>> &Ops,
+ unsigned BW, const DecoderEmitter *E)
+ : AllInstructions(Insts), Opcodes(IDs), Operands(Ops),
+ FilterBitValues(BW, BIT_UNFILTERED), Parent(nullptr), BestIndex(-1),
+ BitWidth(BW), Emitter(E) {
+ doFilter();
+ }
+
+ FilterChooser(ArrayRef<EncodingAndInst> Insts,
+ const std::vector<EncodingIDAndOpcode> &IDs,
+ const std::map<unsigned, std::vector<OperandInfo>> &Ops,
+ const std::vector<bit_value_t> &ParentFilterBitValues,
+ const FilterChooser &parent)
+ : AllInstructions(Insts), Opcodes(IDs), Operands(Ops),
+ FilterBitValues(ParentFilterBitValues), Parent(&parent), BestIndex(-1),
+ BitWidth(parent.BitWidth), Emitter(parent.Emitter) {
+ doFilter();
+ }
+
+ FilterChooser(const FilterChooser &) = delete;
+ void operator=(const FilterChooser &) = delete;
+
+ unsigned getBitWidth() const { return BitWidth; }
+
+protected:
+ // Populates the insn given the uid.
+ void insnWithID(insn_t &Insn, unsigned Opcode) const {
+ BitsInit &Bits = getBitsField(*AllInstructions[Opcode].EncodingDef, "Inst");
+ Insn.resize(BitWidth > Bits.getNumBits() ? BitWidth : Bits.getNumBits(),
+ BIT_UNSET);
+ // We may have a SoftFail bitmask, which specifies a mask where an encoding
+ // may differ from the value in "Inst" and yet still be valid, but the
+ // disassembler should return SoftFail instead of Success.
+ //
+ // This is used for marking UNPREDICTABLE instructions in the ARM world.
+ const RecordVal *RV =
+ AllInstructions[Opcode].EncodingDef->getValue("SoftFail");
+ const BitsInit *SFBits = RV ? dyn_cast<BitsInit>(RV->getValue()) : nullptr;
+ for (unsigned i = 0; i < Bits.getNumBits(); ++i) {
+ if (SFBits && bitFromBits(*SFBits, i) == BIT_TRUE)
+ Insn[i] = BIT_UNSET;
+ else
+ Insn[i] = bitFromBits(Bits, i);
+ }
+ }
+
+ // Emit the name of the encoding/instruction pair.
+ void emitNameWithID(raw_ostream &OS, unsigned Opcode) const {
+ const Record *EncodingDef = AllInstructions[Opcode].EncodingDef;
+ const Record *InstDef = AllInstructions[Opcode].Inst->TheDef;
+ if (EncodingDef != InstDef)
+ OS << EncodingDef->getName() << ":";
+ OS << InstDef->getName();
+ }
+
+ // Populates the field of the insn given the start position and the number of
+ // consecutive bits to scan for.
+ //
+ // Returns false if there exists any uninitialized bit value in the range.
+ // Returns true, otherwise.
+ bool fieldFromInsn(uint64_t &Field, insn_t &Insn, unsigned StartBit,
+ unsigned NumBits) const;
+
+ /// dumpFilterArray - dumpFilterArray prints out debugging info for the given
+ /// filter array as a series of chars.
+ void dumpFilterArray(raw_ostream &o,
+ const std::vector<bit_value_t> & filter) const;
+
+ /// dumpStack - dumpStack traverses the filter chooser chain and calls
+ /// dumpFilterArray on each filter chooser up to the top level one.
+ void dumpStack(raw_ostream &o, const char *prefix) const;
+
+ Filter &bestFilter() {
+ assert(BestIndex != -1 && "BestIndex not set");
+ return Filters[BestIndex];
+ }
+
+ bool PositionFiltered(unsigned i) const {
+ return ValueSet(FilterBitValues[i]);
+ }
+
+ // Calculates the island(s) needed to decode the instruction.
+ // This returns a lit of undecoded bits of an instructions, for example,
+ // Inst{20} = 1 && Inst{3-0} == 0b1111 represents two islands of yet-to-be
+ // decoded bits in order to verify that the instruction matches the Opcode.
+ unsigned getIslands(std::vector<unsigned> &StartBits,
+ std::vector<unsigned> &EndBits,
+ std::vector<uint64_t> &FieldVals,
+ const insn_t &Insn) const;
+
+ // Emits code to check the Predicates member of an instruction are true.
+ // Returns true if predicate matches were emitted, false otherwise.
+ bool emitPredicateMatch(raw_ostream &o, unsigned &Indentation,
+ unsigned Opc) const;
+ bool emitPredicateMatchAux(const Init &Val, bool ParenIfBinOp,
+ raw_ostream &OS) const;
+
+ bool doesOpcodeNeedPredicate(unsigned Opc) const;
+ unsigned getPredicateIndex(DecoderTableInfo &TableInfo, StringRef P) const;
+ void emitPredicateTableEntry(DecoderTableInfo &TableInfo,
+ unsigned Opc) const;
+
+ void emitSoftFailTableEntry(DecoderTableInfo &TableInfo,
+ unsigned Opc) const;
+
+ // Emits table entries to decode the singleton.
+ void emitSingletonTableEntry(DecoderTableInfo &TableInfo,
+ EncodingIDAndOpcode Opc) const;
+
+ // Emits code to decode the singleton, and then to decode the rest.
+ void emitSingletonTableEntry(DecoderTableInfo &TableInfo,
+ const Filter &Best) const;
+
+ void emitBinaryParser(raw_ostream &o, unsigned &Indentation,
+ const OperandInfo &OpInfo,
+ bool &OpHasCompleteDecoder) const;
+
+ void emitDecoder(raw_ostream &OS, unsigned Indentation, unsigned Opc,
+ bool &HasCompleteDecoder) const;
+ unsigned getDecoderIndex(DecoderSet &Decoders, unsigned Opc,
+ bool &HasCompleteDecoder) const;
+
+ // Assign a single filter and run with it.
+ void runSingleFilter(unsigned startBit, unsigned numBit, bool mixed);
+
+ // reportRegion is a helper function for filterProcessor to mark a region as
+ // eligible for use as a filter region.
+ void reportRegion(bitAttr_t RA, unsigned StartBit, unsigned BitIndex,
+ bool AllowMixed);
+
+ // FilterProcessor scans the well-known encoding bits of the instructions and
+ // builds up a list of candidate filters. It chooses the best filter and
+ // recursively descends down the decoding tree.
+ bool filterProcessor(bool AllowMixed, bool Greedy = true);
+
+ // Decides on the best configuration of filter(s) to use in order to decode
+ // the instructions. A conflict of instructions may occur, in which case we
+ // dump the conflict set to the standard error.
+ void doFilter();
+
+public:
+ // emitTableEntries - Emit state machine entries to decode our share of
+ // instructions.
+ void emitTableEntries(DecoderTableInfo &TableInfo) const;
+};
+
+} // end anonymous namespace
+
+///////////////////////////
+// //
+// Filter Implementation //
+// //
+///////////////////////////
+
+Filter::Filter(Filter &&f)
+ : Owner(f.Owner), StartBit(f.StartBit), NumBits(f.NumBits), Mixed(f.Mixed),
+ FilteredInstructions(std::move(f.FilteredInstructions)),
+ VariableInstructions(std::move(f.VariableInstructions)),
+ FilterChooserMap(std::move(f.FilterChooserMap)), NumFiltered(f.NumFiltered),
+ LastOpcFiltered(f.LastOpcFiltered) {
+}
+
+Filter::Filter(FilterChooser &owner, unsigned startBit, unsigned numBits,
+ bool mixed)
+ : Owner(&owner), StartBit(startBit), NumBits(numBits), Mixed(mixed) {
+ assert(StartBit + NumBits - 1 < Owner->BitWidth);
+
+ NumFiltered = 0;
+ LastOpcFiltered = {0, 0};
+
+ for (unsigned i = 0, e = Owner->Opcodes.size(); i != e; ++i) {
+ insn_t Insn;
+
+ // Populates the insn given the uid.
+ Owner->insnWithID(Insn, Owner->Opcodes[i].EncodingID);
+
+ uint64_t Field;
+ // Scans the segment for possibly well-specified encoding bits.
+ bool ok = Owner->fieldFromInsn(Field, Insn, StartBit, NumBits);
+
+ if (ok) {
+ // The encoding bits are well-known. Lets add the uid of the
+ // instruction into the bucket keyed off the constant field value.
+ LastOpcFiltered = Owner->Opcodes[i];
+ FilteredInstructions[Field].push_back(LastOpcFiltered);
+ ++NumFiltered;
+ } else {
+ // Some of the encoding bit(s) are unspecified. This contributes to
+ // one additional member of "Variable" instructions.
+ VariableInstructions.push_back(Owner->Opcodes[i]);
+ }
+ }
+
+ assert((FilteredInstructions.size() + VariableInstructions.size() > 0)
+ && "Filter returns no instruction categories");
+}
+
+// Divides the decoding task into sub tasks and delegates them to the
+// inferior FilterChooser's.
+//
+// A special case arises when there's only one entry in the filtered
+// instructions. In order to unambiguously decode the singleton, we need to
+// match the remaining undecoded encoding bits against the singleton.
+void Filter::recurse() {
+ // Starts by inheriting our parent filter chooser's filter bit values.
+ std::vector<bit_value_t> BitValueArray(Owner->FilterBitValues);
+
+ if (!VariableInstructions.empty()) {
+ // Conservatively marks each segment position as BIT_UNSET.
+ for (unsigned bitIndex = 0; bitIndex < NumBits; ++bitIndex)
+ BitValueArray[StartBit + bitIndex] = BIT_UNSET;
+
+ // Delegates to an inferior filter chooser for further processing on this
+ // group of instructions whose segment values are variable.
+ FilterChooserMap.insert(std::make_pair(NO_FIXED_SEGMENTS_SENTINEL,
+ std::make_unique<FilterChooser>(Owner->AllInstructions,
+ VariableInstructions, Owner->Operands, BitValueArray, *Owner)));
+ }
+
+ // No need to recurse for a singleton filtered instruction.
+ // See also Filter::emit*().
+ if (getNumFiltered() == 1) {
+ assert(FilterChooserMap.size() == 1);
+ return;
+ }
+
+ // Otherwise, create sub choosers.
+ for (const auto &Inst : FilteredInstructions) {
+
+ // Marks all the segment positions with either BIT_TRUE or BIT_FALSE.
+ for (unsigned bitIndex = 0; bitIndex < NumBits; ++bitIndex) {
+ if (Inst.first & (1ULL << bitIndex))
+ BitValueArray[StartBit + bitIndex] = BIT_TRUE;
+ else
+ BitValueArray[StartBit + bitIndex] = BIT_FALSE;
+ }
+
+ // Delegates to an inferior filter chooser for further processing on this
+ // category of instructions.
+ FilterChooserMap.insert(std::make_pair(
+ Inst.first, std::make_unique<FilterChooser>(
+ Owner->AllInstructions, Inst.second,
+ Owner->Operands, BitValueArray, *Owner)));
+ }
+}
+
+static void resolveTableFixups(DecoderTable &Table, const FixupList &Fixups,
+ uint32_t DestIdx) {
+ // Any NumToSkip fixups in the current scope can resolve to the
+ // current location.
+ for (FixupList::const_reverse_iterator I = Fixups.rbegin(),
+ E = Fixups.rend();
+ I != E; ++I) {
+ // Calculate the distance from the byte following the fixup entry byte
+ // to the destination. The Target is calculated from after the 16-bit
+ // NumToSkip entry itself, so subtract two from the displacement here
+ // to account for that.
+ uint32_t FixupIdx = *I;
+ uint32_t Delta = DestIdx - FixupIdx - 3;
+ // Our NumToSkip entries are 24-bits. Make sure our table isn't too
+ // big.
+ assert(Delta < (1u << 24));
+ Table[FixupIdx] = (uint8_t)Delta;
+ Table[FixupIdx + 1] = (uint8_t)(Delta >> 8);
+ Table[FixupIdx + 2] = (uint8_t)(Delta >> 16);
+ }
+}
+
+// Emit table entries to decode instructions given a segment or segments
+// of bits.
+void Filter::emitTableEntry(DecoderTableInfo &TableInfo) const {
+ TableInfo.Table.push_back(MCD::OPC_ExtractField);
+ TableInfo.Table.push_back(StartBit);
+ TableInfo.Table.push_back(NumBits);
+
+ // A new filter entry begins a new scope for fixup resolution.
+ TableInfo.FixupStack.emplace_back();
+
+ DecoderTable &Table = TableInfo.Table;
+
+ size_t PrevFilter = 0;
+ bool HasFallthrough = false;
+ for (auto &Filter : FilterChooserMap) {
+ // Field value -1 implies a non-empty set of variable instructions.
+ // See also recurse().
+ if (Filter.first == NO_FIXED_SEGMENTS_SENTINEL) {
+ HasFallthrough = true;
+
+ // Each scope should always have at least one filter value to check
+ // for.
+ assert(PrevFilter != 0 && "empty filter set!");
+ FixupList &CurScope = TableInfo.FixupStack.back();
+ // Resolve any NumToSkip fixups in the current scope.
+ resolveTableFixups(Table, CurScope, Table.size());
+ CurScope.clear();
+ PrevFilter = 0; // Don't re-process the filter's fallthrough.
+ } else {
+ Table.push_back(MCD::OPC_FilterValue);
+ // Encode and emit the value to filter against.
+ uint8_t Buffer[16];
+ unsigned Len = encodeULEB128(Filter.first, Buffer);
+ Table.insert(Table.end(), Buffer, Buffer + Len);
+ // Reserve space for the NumToSkip entry. We'll backpatch the value
+ // later.
+ PrevFilter = Table.size();
+ Table.push_back(0);
+ Table.push_back(0);
+ Table.push_back(0);
+ }
+
+ // We arrive at a category of instructions with the same segment value.
+ // Now delegate to the sub filter chooser for further decodings.
+ // The case may fallthrough, which happens if the remaining well-known
+ // encoding bits do not match exactly.
+ Filter.second->emitTableEntries(TableInfo);
+
+ // Now that we've emitted the body of the handler, update the NumToSkip
+ // of the filter itself to be able to skip forward when false. Subtract
+ // two as to account for the width of the NumToSkip field itself.
+ if (PrevFilter) {
+ uint32_t NumToSkip = Table.size() - PrevFilter - 3;
+ assert(NumToSkip < (1u << 24) && "disassembler decoding table too large!");
+ Table[PrevFilter] = (uint8_t)NumToSkip;
+ Table[PrevFilter + 1] = (uint8_t)(NumToSkip >> 8);
+ Table[PrevFilter + 2] = (uint8_t)(NumToSkip >> 16);
+ }
+ }
+
+ // Any remaining unresolved fixups bubble up to the parent fixup scope.
+ assert(TableInfo.FixupStack.size() > 1 && "fixup stack underflow!");
+ FixupScopeList::iterator Source = TableInfo.FixupStack.end() - 1;
+ FixupScopeList::iterator Dest = Source - 1;
+ llvm::append_range(*Dest, *Source);
+ TableInfo.FixupStack.pop_back();
+
+ // If there is no fallthrough, then the final filter should get fixed
+ // up according to the enclosing scope rather than the current position.
+ if (!HasFallthrough)
+ TableInfo.FixupStack.back().push_back(PrevFilter);
+}
+
+// Returns the number of fanout produced by the filter. More fanout implies
+// the filter distinguishes more categories of instructions.
+unsigned Filter::usefulness() const {
+ if (!VariableInstructions.empty())
+ return FilteredInstructions.size();
+ else
+ return FilteredInstructions.size() + 1;
+}
+
+//////////////////////////////////
+// //
+// Filterchooser Implementation //
+// //
+//////////////////////////////////
+
+// Emit the decoder state machine table.
+void DecoderEmitter::emitTable(formatted_raw_ostream &OS, DecoderTable &Table,
+ unsigned Indentation, unsigned BitWidth,
+ StringRef Namespace) const {
+ OS.indent(Indentation) << "static const uint8_t DecoderTable" << Namespace
+ << BitWidth << "[] = {\n";
+
+ Indentation += 2;
+
+ // FIXME: We may be able to use the NumToSkip values to recover
+ // appropriate indentation levels.
+ DecoderTable::const_iterator I = Table.begin();
+ DecoderTable::const_iterator E = Table.end();
+ while (I != E) {
+ assert (I < E && "incomplete decode table entry!");
+
+ uint64_t Pos = I - Table.begin();
+ OS << "/* " << Pos << " */";
+ OS.PadToColumn(12);
+
+ switch (*I) {
+ default:
+ PrintFatalError("invalid decode table opcode");
+ case MCD::OPC_ExtractField: {
+ ++I;
+ unsigned Start = *I++;
+ unsigned Len = *I++;
+ OS.indent(Indentation) << "MCD::OPC_ExtractField, " << Start << ", "
+ << Len << ", // Inst{";
+ if (Len > 1)
+ OS << (Start + Len - 1) << "-";
+ OS << Start << "} ...\n";
+ break;
+ }
+ case MCD::OPC_FilterValue: {
+ ++I;
+ OS.indent(Indentation) << "MCD::OPC_FilterValue, ";
+ // The filter value is ULEB128 encoded.
+ while (*I >= 128)
+ OS << (unsigned)*I++ << ", ";
+ OS << (unsigned)*I++ << ", ";
+
+ // 24-bit numtoskip value.
+ uint8_t Byte = *I++;
+ uint32_t NumToSkip = Byte;
+ OS << (unsigned)Byte << ", ";
+ Byte = *I++;
+ OS << (unsigned)Byte << ", ";
+ NumToSkip |= Byte << 8;
+ Byte = *I++;
+ OS << utostr(Byte) << ", ";
+ NumToSkip |= Byte << 16;
+ OS << "// Skip to: " << ((I - Table.begin()) + NumToSkip) << "\n";
+ break;
+ }
+ case MCD::OPC_CheckField: {
+ ++I;
+ unsigned Start = *I++;
+ unsigned Len = *I++;
+ OS.indent(Indentation) << "MCD::OPC_CheckField, " << Start << ", "
+ << Len << ", ";// << Val << ", " << NumToSkip << ",\n";
+ // ULEB128 encoded field value.
+ for (; *I >= 128; ++I)
+ OS << (unsigned)*I << ", ";
+ OS << (unsigned)*I++ << ", ";
+ // 24-bit numtoskip value.
+ uint8_t Byte = *I++;
+ uint32_t NumToSkip = Byte;
+ OS << (unsigned)Byte << ", ";
+ Byte = *I++;
+ OS << (unsigned)Byte << ", ";
+ NumToSkip |= Byte << 8;
+ Byte = *I++;
+ OS << utostr(Byte) << ", ";
+ NumToSkip |= Byte << 16;
+ OS << "// Skip to: " << ((I - Table.begin()) + NumToSkip) << "\n";
+ break;
+ }
+ case MCD::OPC_CheckPredicate: {
+ ++I;
+ OS.indent(Indentation) << "MCD::OPC_CheckPredicate, ";
+ for (; *I >= 128; ++I)
+ OS << (unsigned)*I << ", ";
+ OS << (unsigned)*I++ << ", ";
+
+ // 24-bit numtoskip value.
+ uint8_t Byte = *I++;
+ uint32_t NumToSkip = Byte;
+ OS << (unsigned)Byte << ", ";
+ Byte = *I++;
+ OS << (unsigned)Byte << ", ";
+ NumToSkip |= Byte << 8;
+ Byte = *I++;
+ OS << utostr(Byte) << ", ";
+ NumToSkip |= Byte << 16;
+ OS << "// Skip to: " << ((I - Table.begin()) + NumToSkip) << "\n";
+ break;
+ }
+ case MCD::OPC_Decode:
+ case MCD::OPC_TryDecode: {
+ bool IsTry = *I == MCD::OPC_TryDecode;
+ ++I;
+ // Extract the ULEB128 encoded Opcode to a buffer.
+ uint8_t Buffer[16], *p = Buffer;
+ while ((*p++ = *I++) >= 128)
+ assert((p - Buffer) <= (ptrdiff_t)sizeof(Buffer)
+ && "ULEB128 value too large!");
+ // Decode the Opcode value.
+ unsigned Opc = decodeULEB128(Buffer);
+ OS.indent(Indentation) << "MCD::OPC_" << (IsTry ? "Try" : "")
+ << "Decode, ";
+ for (p = Buffer; *p >= 128; ++p)
+ OS << (unsigned)*p << ", ";
+ OS << (unsigned)*p << ", ";
+
+ // Decoder index.
+ for (; *I >= 128; ++I)
+ OS << (unsigned)*I << ", ";
+ OS << (unsigned)*I++ << ", ";
+
+ if (!IsTry) {
+ OS << "// Opcode: " << NumberedEncodings[Opc] << "\n";
+ break;
+ }
+
+ // Fallthrough for OPC_TryDecode.
+
+ // 24-bit numtoskip value.
+ uint8_t Byte = *I++;
+ uint32_t NumToSkip = Byte;
+ OS << (unsigned)Byte << ", ";
+ Byte = *I++;
+ OS << (unsigned)Byte << ", ";
+ NumToSkip |= Byte << 8;
+ Byte = *I++;
+ OS << utostr(Byte) << ", ";
+ NumToSkip |= Byte << 16;
+
+ OS << "// Opcode: " << NumberedEncodings[Opc]
+ << ", skip to: " << ((I - Table.begin()) + NumToSkip) << "\n";
+ break;
+ }
+ case MCD::OPC_SoftFail: {
+ ++I;
+ OS.indent(Indentation) << "MCD::OPC_SoftFail";
+ // Positive mask
+ uint64_t Value = 0;
+ unsigned Shift = 0;
+ do {
+ OS << ", " << (unsigned)*I;
+ Value += (*I & 0x7f) << Shift;
+ Shift += 7;
+ } while (*I++ >= 128);
+ if (Value > 127) {
+ OS << " /* 0x";
+ OS.write_hex(Value);
+ OS << " */";
+ }
+ // Negative mask
+ Value = 0;
+ Shift = 0;
+ do {
+ OS << ", " << (unsigned)*I;
+ Value += (*I & 0x7f) << Shift;
+ Shift += 7;
+ } while (*I++ >= 128);
+ if (Value > 127) {
+ OS << " /* 0x";
+ OS.write_hex(Value);
+ OS << " */";
+ }
+ OS << ",\n";
+ break;
+ }
+ case MCD::OPC_Fail: {
+ ++I;
+ OS.indent(Indentation) << "MCD::OPC_Fail,\n";
+ break;
+ }
+ }
+ }
+ OS.indent(Indentation) << "0\n";
+
+ Indentation -= 2;
+
+ OS.indent(Indentation) << "};\n\n";
+}
+
+void DecoderEmitter::emitInstrLenTable(formatted_raw_ostream &OS,
+ std::vector<unsigned> &InstrLen) const {
+ OS << "static const uint8_t InstrLenTable[] = {\n";
+ for (unsigned &Len : InstrLen) {
+ OS << Len << ",\n";
+ }
+ OS << "};\n\n";
+}
+
+void DecoderEmitter::emitPredicateFunction(formatted_raw_ostream &OS,
+ PredicateSet &Predicates,
+ unsigned Indentation) const {
+ // The predicate function is just a big switch statement based on the
+ // input predicate index.
+ OS.indent(Indentation) << "static bool checkDecoderPredicate(unsigned Idx, "
+ << "const FeatureBitset &Bits) {\n";
+ Indentation += 2;
+ if (!Predicates.empty()) {
+ OS.indent(Indentation) << "switch (Idx) {\n";
+ OS.indent(Indentation) << "default: llvm_unreachable(\"Invalid index!\");\n";
+ unsigned Index = 0;
+ for (const auto &Predicate : Predicates) {
+ OS.indent(Indentation) << "case " << Index++ << ":\n";
+ OS.indent(Indentation+2) << "return (" << Predicate << ");\n";
+ }
+ OS.indent(Indentation) << "}\n";
+ } else {
+ // No case statement to emit
+ OS.indent(Indentation) << "llvm_unreachable(\"Invalid index!\");\n";
+ }
+ Indentation -= 2;
+ OS.indent(Indentation) << "}\n\n";
+}
+
+void DecoderEmitter::emitDecoderFunction(formatted_raw_ostream &OS,
+ DecoderSet &Decoders,
+ unsigned Indentation) const {
+ // The decoder function is just a big switch statement based on the
+ // input decoder index.
+ OS.indent(Indentation) << "template <typename InsnType>\n";
+ OS.indent(Indentation) << "static DecodeStatus decodeToMCInst(DecodeStatus S,"
+ << " unsigned Idx, InsnType insn, MCInst &MI,\n";
+ OS.indent(Indentation)
+ << " uint64_t "
+ << "Address, const MCDisassembler *Decoder, bool &DecodeComplete) {\n";
+ Indentation += 2;
+ OS.indent(Indentation) << "DecodeComplete = true;\n";
+ // TODO: When InsnType is large, using uint64_t limits all fields to 64 bits
+ // It would be better for emitBinaryParser to use a 64-bit tmp whenever
+ // possible but fall back to an InsnType-sized tmp for truly large fields.
+ OS.indent(Indentation) << "using TmpType = "
+ "std::conditional_t<std::is_integral<InsnType>::"
+ "value, InsnType, uint64_t>;\n";
+ OS.indent(Indentation) << "TmpType tmp;\n";
+ OS.indent(Indentation) << "switch (Idx) {\n";
+ OS.indent(Indentation) << "default: llvm_unreachable(\"Invalid index!\");\n";
+ unsigned Index = 0;
+ for (const auto &Decoder : Decoders) {
+ OS.indent(Indentation) << "case " << Index++ << ":\n";
+ OS << Decoder;
+ OS.indent(Indentation+2) << "return S;\n";
+ }
+ OS.indent(Indentation) << "}\n";
+ Indentation -= 2;
+ OS.indent(Indentation) << "}\n\n";
+}
+
+// Populates the field of the insn given the start position and the number of
+// consecutive bits to scan for.
+//
+// Returns false if and on the first uninitialized bit value encountered.
+// Returns true, otherwise.
+bool FilterChooser::fieldFromInsn(uint64_t &Field, insn_t &Insn,
+ unsigned StartBit, unsigned NumBits) const {
+ Field = 0;
+
+ for (unsigned i = 0; i < NumBits; ++i) {
+ if (Insn[StartBit + i] == BIT_UNSET)
+ return false;
+
+ if (Insn[StartBit + i] == BIT_TRUE)
+ Field = Field | (1ULL << i);
+ }
+
+ return true;
+}
+
+/// dumpFilterArray - dumpFilterArray prints out debugging info for the given
+/// filter array as a series of chars.
+void FilterChooser::dumpFilterArray(raw_ostream &o,
+ const std::vector<bit_value_t> &filter) const {
+ for (unsigned bitIndex = BitWidth; bitIndex > 0; bitIndex--) {
+ switch (filter[bitIndex - 1]) {
+ case BIT_UNFILTERED:
+ o << ".";
+ break;
+ case BIT_UNSET:
+ o << "_";
+ break;
+ case BIT_TRUE:
+ o << "1";
+ break;
+ case BIT_FALSE:
+ o << "0";
+ break;
+ }
+ }
+}
+
+/// dumpStack - dumpStack traverses the filter chooser chain and calls
+/// dumpFilterArray on each filter chooser up to the top level one.
+void FilterChooser::dumpStack(raw_ostream &o, const char *prefix) const {
+ const FilterChooser *current = this;
+
+ while (current) {
+ o << prefix;
+ dumpFilterArray(o, current->FilterBitValues);
+ o << '\n';
+ current = current->Parent;
+ }
+}
+
+// Calculates the island(s) needed to decode the instruction.
+// This returns a list of undecoded bits of an instructions, for example,
+// Inst{20} = 1 && Inst{3-0} == 0b1111 represents two islands of yet-to-be
+// decoded bits in order to verify that the instruction matches the Opcode.
+unsigned FilterChooser::getIslands(std::vector<unsigned> &StartBits,
+ std::vector<unsigned> &EndBits,
+ std::vector<uint64_t> &FieldVals,
+ const insn_t &Insn) const {
+ unsigned Num, BitNo;
+ Num = BitNo = 0;
+
+ uint64_t FieldVal = 0;
+
+ // 0: Init
+ // 1: Water (the bit value does not affect decoding)
+ // 2: Island (well-known bit value needed for decoding)
+ int State = 0;
+
+ for (unsigned i = 0; i < BitWidth; ++i) {
+ int64_t Val = Value(Insn[i]);
+ bool Filtered = PositionFiltered(i);
+ switch (State) {
+ default: llvm_unreachable("Unreachable code!");
+ case 0:
+ case 1:
+ if (Filtered || Val == -1)
+ State = 1; // Still in Water
+ else {
+ State = 2; // Into the Island
+ BitNo = 0;
+ StartBits.push_back(i);
+ FieldVal = Val;
+ }
+ break;
+ case 2:
+ if (Filtered || Val == -1) {
+ State = 1; // Into the Water
+ EndBits.push_back(i - 1);
+ FieldVals.push_back(FieldVal);
+ ++Num;
+ } else {
+ State = 2; // Still in Island
+ ++BitNo;
+ FieldVal = FieldVal | Val << BitNo;
+ }
+ break;
+ }
+ }
+ // If we are still in Island after the loop, do some housekeeping.
+ if (State == 2) {
+ EndBits.push_back(BitWidth - 1);
+ FieldVals.push_back(FieldVal);
+ ++Num;
+ }
+
+ assert(StartBits.size() == Num && EndBits.size() == Num &&
+ FieldVals.size() == Num);
+ return Num;
+}
+
+void FilterChooser::emitBinaryParser(raw_ostream &o, unsigned &Indentation,
+ const OperandInfo &OpInfo,
+ bool &OpHasCompleteDecoder) const {
+ const std::string &Decoder = OpInfo.Decoder;
+
+ bool UseInsertBits = OpInfo.numFields() != 1 || OpInfo.InitValue != 0;
+
+ if (UseInsertBits) {
+ o.indent(Indentation) << "tmp = 0x";
+ o.write_hex(OpInfo.InitValue);
+ o << ";\n";
+ }
+
+ for (const EncodingField &EF : OpInfo) {
+ o.indent(Indentation);
+ if (UseInsertBits)
+ o << "insertBits(tmp, ";
+ else
+ o << "tmp = ";
+ o << "fieldFromInstruction(insn, " << EF.Base << ", " << EF.Width << ')';
+ if (UseInsertBits)
+ o << ", " << EF.Offset << ", " << EF.Width << ')';
+ else if (EF.Offset != 0)
+ o << " << " << EF.Offset;
+ o << ";\n";
+ }
+
+ if (Decoder != "") {
+ OpHasCompleteDecoder = OpInfo.HasCompleteDecoder;
+ o.indent(Indentation) << "if (!Check(S, " << Decoder
+ << "(MI, tmp, Address, Decoder))) { "
+ << (OpHasCompleteDecoder ? ""
+ : "DecodeComplete = false; ")
+ << "return MCDisassembler::Fail; }\n";
+ } else {
+ OpHasCompleteDecoder = true;
+ o.indent(Indentation) << "MI.addOperand(MCOperand::createImm(tmp));\n";
+ }
+}
+
+void FilterChooser::emitDecoder(raw_ostream &OS, unsigned Indentation,
+ unsigned Opc, bool &HasCompleteDecoder) const {
+ HasCompleteDecoder = true;
+
+ for (const auto &Op : Operands.find(Opc)->second) {
+ // If a custom instruction decoder was specified, use that.
+ if (Op.numFields() == 0 && !Op.Decoder.empty()) {
+ HasCompleteDecoder = Op.HasCompleteDecoder;
+ OS.indent(Indentation)
+ << "if (!Check(S, " << Op.Decoder
+ << "(MI, insn, Address, Decoder))) { "
+ << (HasCompleteDecoder ? "" : "DecodeComplete = false; ")
+ << "return MCDisassembler::Fail; }\n";
+ break;
+ }
+
+ bool OpHasCompleteDecoder;
+ emitBinaryParser(OS, Indentation, Op, OpHasCompleteDecoder);
+ if (!OpHasCompleteDecoder)
+ HasCompleteDecoder = false;
+ }
+}
+
+unsigned FilterChooser::getDecoderIndex(DecoderSet &Decoders,
+ unsigned Opc,
+ bool &HasCompleteDecoder) const {
+ // Build up the predicate string.
+ SmallString<256> Decoder;
+ // FIXME: emitDecoder() function can take a buffer directly rather than
+ // a stream.
+ raw_svector_ostream S(Decoder);
+ unsigned I = 4;
+ emitDecoder(S, I, Opc, HasCompleteDecoder);
+
+ // Using the full decoder string as the key value here is a bit
+ // heavyweight, but is effective. If the string comparisons become a
+ // performance concern, we can implement a mangling of the predicate
+ // data easily enough with a map back to the actual string. That's
+ // overkill for now, though.
+
+ // Make sure the predicate is in the table.
+ Decoders.insert(CachedHashString(Decoder));
+ // Now figure out the index for when we write out the table.
+ DecoderSet::const_iterator P = find(Decoders, Decoder.str());
+ return (unsigned)(P - Decoders.begin());
+}
+
+// If ParenIfBinOp is true, print a surrounding () if Val uses && or ||.
+bool FilterChooser::emitPredicateMatchAux(const Init &Val, bool ParenIfBinOp,
+ raw_ostream &OS) const {
+ if (auto *D = dyn_cast<DefInit>(&Val)) {
+ if (!D->getDef()->isSubClassOf("SubtargetFeature"))
+ return true;
+ OS << "Bits[" << Emitter->PredicateNamespace << "::" << D->getAsString()
+ << "]";
+ return false;
+ }
+ if (auto *D = dyn_cast<DagInit>(&Val)) {
+ std::string Op = D->getOperator()->getAsString();
+ if (Op == "not" && D->getNumArgs() == 1) {
+ OS << '!';
+ return emitPredicateMatchAux(*D->getArg(0), true, OS);
+ }
+ if ((Op == "any_of" || Op == "all_of") && D->getNumArgs() > 0) {
+ bool Paren = D->getNumArgs() > 1 && std::exchange(ParenIfBinOp, true);
+ if (Paren)
+ OS << '(';
+ ListSeparator LS(Op == "any_of" ? " || " : " && ");
+ for (auto *Arg : D->getArgs()) {
+ OS << LS;
+ if (emitPredicateMatchAux(*Arg, ParenIfBinOp, OS))
+ return true;
+ }
+ if (Paren)
+ OS << ')';
+ return false;
+ }
+ }
+ return true;
+}
+
+bool FilterChooser::emitPredicateMatch(raw_ostream &o, unsigned &Indentation,
+ unsigned Opc) const {
+ ListInit *Predicates =
+ AllInstructions[Opc].EncodingDef->getValueAsListInit("Predicates");
+ bool IsFirstEmission = true;
+ for (unsigned i = 0; i < Predicates->size(); ++i) {
+ Record *Pred = Predicates->getElementAsRecord(i);
+ if (!Pred->getValue("AssemblerMatcherPredicate"))
+ continue;
+
+ if (!isa<DagInit>(Pred->getValue("AssemblerCondDag")->getValue()))
+ continue;
+
+ if (!IsFirstEmission)
+ o << " && ";
+ if (emitPredicateMatchAux(*Pred->getValueAsDag("AssemblerCondDag"),
+ Predicates->size() > 1, o))
+ PrintFatalError(Pred->getLoc(), "Invalid AssemblerCondDag!");
+ IsFirstEmission = false;
+ }
+ return !Predicates->empty();
+}
+
+bool FilterChooser::doesOpcodeNeedPredicate(unsigned Opc) const {
+ ListInit *Predicates =
+ AllInstructions[Opc].EncodingDef->getValueAsListInit("Predicates");
+ for (unsigned i = 0; i < Predicates->size(); ++i) {
+ Record *Pred = Predicates->getElementAsRecord(i);
+ if (!Pred->getValue("AssemblerMatcherPredicate"))
+ continue;
+
+ if (isa<DagInit>(Pred->getValue("AssemblerCondDag")->getValue()))
+ return true;
+ }
+ return false;
+}
+
+unsigned FilterChooser::getPredicateIndex(DecoderTableInfo &TableInfo,
+ StringRef Predicate) const {
+ // Using the full predicate string as the key value here is a bit
+ // heavyweight, but is effective. If the string comparisons become a
+ // performance concern, we can implement a mangling of the predicate
+ // data easily enough with a map back to the actual string. That's
+ // overkill for now, though.
+
+ // Make sure the predicate is in the table.
+ TableInfo.Predicates.insert(CachedHashString(Predicate));
+ // Now figure out the index for when we write out the table.
+ PredicateSet::const_iterator P = find(TableInfo.Predicates, Predicate);
+ return (unsigned)(P - TableInfo.Predicates.begin());
+}
+
+void FilterChooser::emitPredicateTableEntry(DecoderTableInfo &TableInfo,
+ unsigned Opc) const {
+ if (!doesOpcodeNeedPredicate(Opc))
+ return;
+
+ // Build up the predicate string.
+ SmallString<256> Predicate;
+ // FIXME: emitPredicateMatch() functions can take a buffer directly rather
+ // than a stream.
+ raw_svector_ostream PS(Predicate);
+ unsigned I = 0;
+ emitPredicateMatch(PS, I, Opc);
+
+ // Figure out the index into the predicate table for the predicate just
+ // computed.
+ unsigned PIdx = getPredicateIndex(TableInfo, PS.str());
+ SmallString<16> PBytes;
+ raw_svector_ostream S(PBytes);
+ encodeULEB128(PIdx, S);
+
+ TableInfo.Table.push_back(MCD::OPC_CheckPredicate);
+ // Predicate index
+ for (unsigned i = 0, e = PBytes.size(); i != e; ++i)
+ TableInfo.Table.push_back(PBytes[i]);
+ // Push location for NumToSkip backpatching.
+ TableInfo.FixupStack.back().push_back(TableInfo.Table.size());
+ TableInfo.Table.push_back(0);
+ TableInfo.Table.push_back(0);
+ TableInfo.Table.push_back(0);
+}
+
+void FilterChooser::emitSoftFailTableEntry(DecoderTableInfo &TableInfo,
+ unsigned Opc) const {
+ const RecordVal *RV = AllInstructions[Opc].EncodingDef->getValue("SoftFail");
+ BitsInit *SFBits = RV ? dyn_cast<BitsInit>(RV->getValue()) : nullptr;
+
+ if (!SFBits) return;
+ BitsInit *InstBits =
+ AllInstructions[Opc].EncodingDef->getValueAsBitsInit("Inst");
+
+ APInt PositiveMask(BitWidth, 0ULL);
+ APInt NegativeMask(BitWidth, 0ULL);
+ for (unsigned i = 0; i < BitWidth; ++i) {
+ bit_value_t B = bitFromBits(*SFBits, i);
+ bit_value_t IB = bitFromBits(*InstBits, i);
+
+ if (B != BIT_TRUE) continue;
+
+ switch (IB) {
+ case BIT_FALSE:
+ // The bit is meant to be false, so emit a check to see if it is true.
+ PositiveMask.setBit(i);
+ break;
+ case BIT_TRUE:
+ // The bit is meant to be true, so emit a check to see if it is false.
+ NegativeMask.setBit(i);
+ break;
+ default:
+ // The bit is not set; this must be an error!
+ errs() << "SoftFail Conflict: bit SoftFail{" << i << "} in "
+ << AllInstructions[Opc] << " is set but Inst{" << i
+ << "} is unset!\n"
+ << " - You can only mark a bit as SoftFail if it is fully defined"
+ << " (1/0 - not '?') in Inst\n";
+ return;
+ }
+ }
+
+ bool NeedPositiveMask = PositiveMask.getBoolValue();
+ bool NeedNegativeMask = NegativeMask.getBoolValue();
+
+ if (!NeedPositiveMask && !NeedNegativeMask)
+ return;
+
+ TableInfo.Table.push_back(MCD::OPC_SoftFail);
+
+ SmallString<16> MaskBytes;
+ raw_svector_ostream S(MaskBytes);
+ if (NeedPositiveMask) {
+ encodeULEB128(PositiveMask.getZExtValue(), S);
+ for (unsigned i = 0, e = MaskBytes.size(); i != e; ++i)
+ TableInfo.Table.push_back(MaskBytes[i]);
+ } else
+ TableInfo.Table.push_back(0);
+ if (NeedNegativeMask) {
+ MaskBytes.clear();
+ encodeULEB128(NegativeMask.getZExtValue(), S);
+ for (unsigned i = 0, e = MaskBytes.size(); i != e; ++i)
+ TableInfo.Table.push_back(MaskBytes[i]);
+ } else
+ TableInfo.Table.push_back(0);
+}
+
+// Emits table entries to decode the singleton.
+void FilterChooser::emitSingletonTableEntry(DecoderTableInfo &TableInfo,
+ EncodingIDAndOpcode Opc) const {
+ std::vector<unsigned> StartBits;
+ std::vector<unsigned> EndBits;
+ std::vector<uint64_t> FieldVals;
+ insn_t Insn;
+ insnWithID(Insn, Opc.EncodingID);
+
+ // Look for islands of undecoded bits of the singleton.
+ getIslands(StartBits, EndBits, FieldVals, Insn);
+
+ unsigned Size = StartBits.size();
+
+ // Emit the predicate table entry if one is needed.
+ emitPredicateTableEntry(TableInfo, Opc.EncodingID);
+
+ // Check any additional encoding fields needed.
+ for (unsigned I = Size; I != 0; --I) {
+ unsigned NumBits = EndBits[I-1] - StartBits[I-1] + 1;
+ TableInfo.Table.push_back(MCD::OPC_CheckField);
+ TableInfo.Table.push_back(StartBits[I-1]);
+ TableInfo.Table.push_back(NumBits);
+ uint8_t Buffer[16], *p;
+ encodeULEB128(FieldVals[I-1], Buffer);
+ for (p = Buffer; *p >= 128 ; ++p)
+ TableInfo.Table.push_back(*p);
+ TableInfo.Table.push_back(*p);
+ // Push location for NumToSkip backpatching.
+ TableInfo.FixupStack.back().push_back(TableInfo.Table.size());
+ // The fixup is always 24-bits, so go ahead and allocate the space
+ // in the table so all our relative position calculations work OK even
+ // before we fully resolve the real value here.
+ TableInfo.Table.push_back(0);
+ TableInfo.Table.push_back(0);
+ TableInfo.Table.push_back(0);
+ }
+
+ // Check for soft failure of the match.
+ emitSoftFailTableEntry(TableInfo, Opc.EncodingID);
+
+ bool HasCompleteDecoder;
+ unsigned DIdx =
+ getDecoderIndex(TableInfo.Decoders, Opc.EncodingID, HasCompleteDecoder);
+
+ // Produce OPC_Decode or OPC_TryDecode opcode based on the information
+ // whether the instruction decoder is complete or not. If it is complete
+ // then it handles all possible values of remaining variable/unfiltered bits
+ // and for any value can determine if the bitpattern is a valid instruction
+ // or not. This means OPC_Decode will be the final step in the decoding
+ // process. If it is not complete, then the Fail return code from the
+ // decoder method indicates that additional processing should be done to see
+ // if there is any other instruction that also matches the bitpattern and
+ // can decode it.
+ TableInfo.Table.push_back(HasCompleteDecoder ? MCD::OPC_Decode :
+ MCD::OPC_TryDecode);
+ NumEncodingsSupported++;
+ uint8_t Buffer[16], *p;
+ encodeULEB128(Opc.Opcode, Buffer);
+ for (p = Buffer; *p >= 128 ; ++p)
+ TableInfo.Table.push_back(*p);
+ TableInfo.Table.push_back(*p);
+
+ SmallString<16> Bytes;
+ raw_svector_ostream S(Bytes);
+ encodeULEB128(DIdx, S);
+
+ // Decoder index
+ for (unsigned i = 0, e = Bytes.size(); i != e; ++i)
+ TableInfo.Table.push_back(Bytes[i]);
+
+ if (!HasCompleteDecoder) {
+ // Push location for NumToSkip backpatching.
+ TableInfo.FixupStack.back().push_back(TableInfo.Table.size());
+ // Allocate the space for the fixup.
+ TableInfo.Table.push_back(0);
+ TableInfo.Table.push_back(0);
+ TableInfo.Table.push_back(0);
+ }
+}
+
+// Emits table entries to decode the singleton, and then to decode the rest.
+void FilterChooser::emitSingletonTableEntry(DecoderTableInfo &TableInfo,
+ const Filter &Best) const {
+ EncodingIDAndOpcode Opc = Best.getSingletonOpc();
+
+ // complex singletons need predicate checks from the first singleton
+ // to refer forward to the variable filterchooser that follows.
+ TableInfo.FixupStack.emplace_back();
+
+ emitSingletonTableEntry(TableInfo, Opc);
+
+ resolveTableFixups(TableInfo.Table, TableInfo.FixupStack.back(),
+ TableInfo.Table.size());
+ TableInfo.FixupStack.pop_back();
+
+ Best.getVariableFC().emitTableEntries(TableInfo);
+}
+
+// Assign a single filter and run with it. Top level API client can initialize
+// with a single filter to start the filtering process.
+void FilterChooser::runSingleFilter(unsigned startBit, unsigned numBit,
+ bool mixed) {
+ Filters.clear();
+ Filters.emplace_back(*this, startBit, numBit, true);
+ BestIndex = 0; // Sole Filter instance to choose from.
+ bestFilter().recurse();
+}
+
+// reportRegion is a helper function for filterProcessor to mark a region as
+// eligible for use as a filter region.
+void FilterChooser::reportRegion(bitAttr_t RA, unsigned StartBit,
+ unsigned BitIndex, bool AllowMixed) {
+ if (RA == ATTR_MIXED && AllowMixed)
+ Filters.emplace_back(*this, StartBit, BitIndex - StartBit, true);
+ else if (RA == ATTR_ALL_SET && !AllowMixed)
+ Filters.emplace_back(*this, StartBit, BitIndex - StartBit, false);
+}
+
+// FilterProcessor scans the well-known encoding bits of the instructions and
+// builds up a list of candidate filters. It chooses the best filter and
+// recursively descends down the decoding tree.
+bool FilterChooser::filterProcessor(bool AllowMixed, bool Greedy) {
+ Filters.clear();
+ BestIndex = -1;
+ unsigned numInstructions = Opcodes.size();
+
+ assert(numInstructions && "Filter created with no instructions");
+
+ // No further filtering is necessary.
+ if (numInstructions == 1)
+ return true;
+
+ // Heuristics. See also doFilter()'s "Heuristics" comment when num of
+ // instructions is 3.
+ if (AllowMixed && !Greedy) {
+ assert(numInstructions == 3);
+
+ for (auto Opcode : Opcodes) {
+ std::vector<unsigned> StartBits;
+ std::vector<unsigned> EndBits;
+ std::vector<uint64_t> FieldVals;
+ insn_t Insn;
+
+ insnWithID(Insn, Opcode.EncodingID);
+
+ // Look for islands of undecoded bits of any instruction.
+ if (getIslands(StartBits, EndBits, FieldVals, Insn) > 0) {
+ // Found an instruction with island(s). Now just assign a filter.
+ runSingleFilter(StartBits[0], EndBits[0] - StartBits[0] + 1, true);
+ return true;
+ }
+ }
+ }
+
+ unsigned BitIndex;
+
+ // We maintain BIT_WIDTH copies of the bitAttrs automaton.
+ // The automaton consumes the corresponding bit from each
+ // instruction.
+ //
+ // Input symbols: 0, 1, and _ (unset).
+ // States: NONE, FILTERED, ALL_SET, ALL_UNSET, and MIXED.
+ // Initial state: NONE.
+ //
+ // (NONE) ------- [01] -> (ALL_SET)
+ // (NONE) ------- _ ----> (ALL_UNSET)
+ // (ALL_SET) ---- [01] -> (ALL_SET)
+ // (ALL_SET) ---- _ ----> (MIXED)
+ // (ALL_UNSET) -- [01] -> (MIXED)
+ // (ALL_UNSET) -- _ ----> (ALL_UNSET)
+ // (MIXED) ------ . ----> (MIXED)
+ // (FILTERED)---- . ----> (FILTERED)
+
+ std::vector<bitAttr_t> bitAttrs;
+
+ // FILTERED bit positions provide no entropy and are not worthy of pursuing.
+ // Filter::recurse() set either BIT_TRUE or BIT_FALSE for each position.
+ for (BitIndex = 0; BitIndex < BitWidth; ++BitIndex)
+ if (FilterBitValues[BitIndex] == BIT_TRUE ||
+ FilterBitValues[BitIndex] == BIT_FALSE)
+ bitAttrs.push_back(ATTR_FILTERED);
+ else
+ bitAttrs.push_back(ATTR_NONE);
+
+ for (unsigned InsnIndex = 0; InsnIndex < numInstructions; ++InsnIndex) {
+ insn_t insn;
+
+ insnWithID(insn, Opcodes[InsnIndex].EncodingID);
+
+ for (BitIndex = 0; BitIndex < BitWidth; ++BitIndex) {
+ switch (bitAttrs[BitIndex]) {
+ case ATTR_NONE:
+ if (insn[BitIndex] == BIT_UNSET)
+ bitAttrs[BitIndex] = ATTR_ALL_UNSET;
+ else
+ bitAttrs[BitIndex] = ATTR_ALL_SET;
+ break;
+ case ATTR_ALL_SET:
+ if (insn[BitIndex] == BIT_UNSET)
+ bitAttrs[BitIndex] = ATTR_MIXED;
+ break;
+ case ATTR_ALL_UNSET:
+ if (insn[BitIndex] != BIT_UNSET)
+ bitAttrs[BitIndex] = ATTR_MIXED;
+ break;
+ case ATTR_MIXED:
+ case ATTR_FILTERED:
+ break;
+ }
+ }
+ }
+
+ // The regionAttr automaton consumes the bitAttrs automatons' state,
+ // lowest-to-highest.
+ //
+ // Input symbols: F(iltered), (all_)S(et), (all_)U(nset), M(ixed)
+ // States: NONE, ALL_SET, MIXED
+ // Initial state: NONE
+ //
+ // (NONE) ----- F --> (NONE)
+ // (NONE) ----- S --> (ALL_SET) ; and set region start
+ // (NONE) ----- U --> (NONE)
+ // (NONE) ----- M --> (MIXED) ; and set region start
+ // (ALL_SET) -- F --> (NONE) ; and report an ALL_SET region
+ // (ALL_SET) -- S --> (ALL_SET)
+ // (ALL_SET) -- U --> (NONE) ; and report an ALL_SET region
+ // (ALL_SET) -- M --> (MIXED) ; and report an ALL_SET region
+ // (MIXED) ---- F --> (NONE) ; and report a MIXED region
+ // (MIXED) ---- S --> (ALL_SET) ; and report a MIXED region
+ // (MIXED) ---- U --> (NONE) ; and report a MIXED region
+ // (MIXED) ---- M --> (MIXED)
+
+ bitAttr_t RA = ATTR_NONE;
+ unsigned StartBit = 0;
+
+ for (BitIndex = 0; BitIndex < BitWidth; ++BitIndex) {
+ bitAttr_t bitAttr = bitAttrs[BitIndex];
+
+ assert(bitAttr != ATTR_NONE && "Bit without attributes");
+
+ switch (RA) {
+ case ATTR_NONE:
+ switch (bitAttr) {
+ case ATTR_FILTERED:
+ break;
+ case ATTR_ALL_SET:
+ StartBit = BitIndex;
+ RA = ATTR_ALL_SET;
+ break;
+ case ATTR_ALL_UNSET:
+ break;
+ case ATTR_MIXED:
+ StartBit = BitIndex;
+ RA = ATTR_MIXED;
+ break;
+ default:
+ llvm_unreachable("Unexpected bitAttr!");
+ }
+ break;
+ case ATTR_ALL_SET:
+ switch (bitAttr) {
+ case ATTR_FILTERED:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ RA = ATTR_NONE;
+ break;
+ case ATTR_ALL_SET:
+ break;
+ case ATTR_ALL_UNSET:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ RA = ATTR_NONE;
+ break;
+ case ATTR_MIXED:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ StartBit = BitIndex;
+ RA = ATTR_MIXED;
+ break;
+ default:
+ llvm_unreachable("Unexpected bitAttr!");
+ }
+ break;
+ case ATTR_MIXED:
+ switch (bitAttr) {
+ case ATTR_FILTERED:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ StartBit = BitIndex;
+ RA = ATTR_NONE;
+ break;
+ case ATTR_ALL_SET:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ StartBit = BitIndex;
+ RA = ATTR_ALL_SET;
+ break;
+ case ATTR_ALL_UNSET:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ RA = ATTR_NONE;
+ break;
+ case ATTR_MIXED:
+ break;
+ default:
+ llvm_unreachable("Unexpected bitAttr!");
+ }
+ break;
+ case ATTR_ALL_UNSET:
+ llvm_unreachable("regionAttr state machine has no ATTR_UNSET state");
+ case ATTR_FILTERED:
+ llvm_unreachable("regionAttr state machine has no ATTR_FILTERED state");
+ }
+ }
+
+ // At the end, if we're still in ALL_SET or MIXED states, report a region
+ switch (RA) {
+ case ATTR_NONE:
+ break;
+ case ATTR_FILTERED:
+ break;
+ case ATTR_ALL_SET:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ break;
+ case ATTR_ALL_UNSET:
+ break;
+ case ATTR_MIXED:
+ reportRegion(RA, StartBit, BitIndex, AllowMixed);
+ break;
+ }
+
+ // We have finished with the filter processings. Now it's time to choose
+ // the best performing filter.
+ BestIndex = 0;
+ bool AllUseless = true;
+ unsigned BestScore = 0;
+
+ for (unsigned i = 0, e = Filters.size(); i != e; ++i) {
+ unsigned Usefulness = Filters[i].usefulness();
+
+ if (Usefulness)
+ AllUseless = false;
+
+ if (Usefulness > BestScore) {
+ BestIndex = i;
+ BestScore = Usefulness;
+ }
+ }
+
+ if (!AllUseless)
+ bestFilter().recurse();
+
+ return !AllUseless;
+} // end of FilterChooser::filterProcessor(bool)
+
+// Decides on the best configuration of filter(s) to use in order to decode
+// the instructions. A conflict of instructions may occur, in which case we
+// dump the conflict set to the standard error.
+void FilterChooser::doFilter() {
+ unsigned Num = Opcodes.size();
+ assert(Num && "FilterChooser created with no instructions");
+
+ // Try regions of consecutive known bit values first.
+ if (filterProcessor(false))
+ return;
+
+ // Then regions of mixed bits (both known and unitialized bit values allowed).
+ if (filterProcessor(true))
+ return;
+
+ // Heuristics to cope with conflict set {t2CMPrs, t2SUBSrr, t2SUBSrs} where
+ // no single instruction for the maximum ATTR_MIXED region Inst{14-4} has a
+ // well-known encoding pattern. In such case, we backtrack and scan for the
+ // the very first consecutive ATTR_ALL_SET region and assign a filter to it.
+ if (Num == 3 && filterProcessor(true, false))
+ return;
+
+ // If we come to here, the instruction decoding has failed.
+ // Set the BestIndex to -1 to indicate so.
+ BestIndex = -1;
+}
+
+// emitTableEntries - Emit state machine entries to decode our share of
+// instructions.
+void FilterChooser::emitTableEntries(DecoderTableInfo &TableInfo) const {
+ if (Opcodes.size() == 1) {
+ // There is only one instruction in the set, which is great!
+ // Call emitSingletonDecoder() to see whether there are any remaining
+ // encodings bits.
+ emitSingletonTableEntry(TableInfo, Opcodes[0]);
+ return;
+ }
+
+ // Choose the best filter to do the decodings!
+ if (BestIndex != -1) {
+ const Filter &Best = Filters[BestIndex];
+ if (Best.getNumFiltered() == 1)
+ emitSingletonTableEntry(TableInfo, Best);
+ else
+ Best.emitTableEntry(TableInfo);
+ return;
+ }
+
+ // We don't know how to decode these instructions! Dump the
+ // conflict set and bail.
+
+ // Print out useful conflict information for postmortem analysis.
+ errs() << "Decoding Conflict:\n";
+
+ dumpStack(errs(), "\t\t");
+
+ for (auto Opcode : Opcodes) {
+ errs() << '\t';
+ emitNameWithID(errs(), Opcode.EncodingID);
+ errs() << " ";
+ dumpBits(
+ errs(),
+ getBitsField(*AllInstructions[Opcode.EncodingID].EncodingDef, "Inst"));
+ errs() << '\n';
+ }
+}
+
+static std::string findOperandDecoderMethod(Record *Record) {
+ std::string Decoder;
+
+ RecordVal *DecoderString = Record->getValue("DecoderMethod");
+ StringInit *String = DecoderString ?
+ dyn_cast<StringInit>(DecoderString->getValue()) : nullptr;
+ if (String) {
+ Decoder = std::string(String->getValue());
+ if (!Decoder.empty())
+ return Decoder;
+ }
+
+ if (Record->isSubClassOf("RegisterOperand"))
+ Record = Record->getValueAsDef("RegClass");
+
+ if (Record->isSubClassOf("RegisterClass")) {
+ Decoder = "Decode" + Record->getName().str() + "RegisterClass";
+ } else if (Record->isSubClassOf("PointerLikeRegClass")) {
+ Decoder = "DecodePointerLikeRegClass" +
+ utostr(Record->getValueAsInt("RegClassKind"));
+ }
+
+ return Decoder;
+}
+
+OperandInfo getOpInfo(Record *TypeRecord) {
+ std::string Decoder = findOperandDecoderMethod(TypeRecord);
+
+ RecordVal *HasCompleteDecoderVal = TypeRecord->getValue("hasCompleteDecoder");
+ BitInit *HasCompleteDecoderBit =
+ HasCompleteDecoderVal
+ ? dyn_cast<BitInit>(HasCompleteDecoderVal->getValue())
+ : nullptr;
+ bool HasCompleteDecoder =
+ HasCompleteDecoderBit ? HasCompleteDecoderBit->getValue() : true;
+
+ return OperandInfo(Decoder, HasCompleteDecoder);
+}
+
+void parseVarLenInstOperand(const Record &Def,
+ std::vector<OperandInfo> &Operands,
+ const CodeGenInstruction &CGI) {
+
+ const RecordVal *RV = Def.getValue("Inst");
+ VarLenInst VLI(cast<DagInit>(RV->getValue()), RV);
+ SmallVector<int> TiedTo;
+
+ for (unsigned Idx = 0; Idx < CGI.Operands.size(); ++Idx) {
+ auto &Op = CGI.Operands[Idx];
+ if (Op.MIOperandInfo && Op.MIOperandInfo->getNumArgs() > 0)
+ for (auto *Arg : Op.MIOperandInfo->getArgs())
+ Operands.push_back(getOpInfo(cast<DefInit>(Arg)->getDef()));
+ else
+ Operands.push_back(getOpInfo(Op.Rec));
+
+ int TiedReg = Op.getTiedRegister();
+ TiedTo.push_back(-1);
+ if (TiedReg != -1) {
+ TiedTo[Idx] = TiedReg;
+ TiedTo[TiedReg] = Idx;
+ }
+ }
+
+ unsigned CurrBitPos = 0;
+ for (auto &EncodingSegment : VLI) {
+ unsigned Offset = 0;
+ StringRef OpName;
+
+ if (const StringInit *SI = dyn_cast<StringInit>(EncodingSegment.Value)) {
+ OpName = SI->getValue();
+ } else if (const DagInit *DI = dyn_cast<DagInit>(EncodingSegment.Value)) {
+ OpName = cast<StringInit>(DI->getArg(0))->getValue();
+ Offset = cast<IntInit>(DI->getArg(2))->getValue();
+ }
+
+ if (!OpName.empty()) {
+ auto OpSubOpPair =
+ const_cast<CodeGenInstruction &>(CGI).Operands.ParseOperandName(
+ OpName);
+ unsigned OpIdx = CGI.Operands.getFlattenedOperandNumber(OpSubOpPair);
+ Operands[OpIdx].addField(CurrBitPos, EncodingSegment.BitWidth, Offset);
+ if (!EncodingSegment.CustomDecoder.empty())
+ Operands[OpIdx].Decoder = EncodingSegment.CustomDecoder.str();
+
+ int TiedReg = TiedTo[OpSubOpPair.first];
+ if (TiedReg != -1) {
+ unsigned OpIdx = CGI.Operands.getFlattenedOperandNumber(
+ std::make_pair(TiedReg, OpSubOpPair.second));
+ Operands[OpIdx].addField(CurrBitPos, EncodingSegment.BitWidth, Offset);
+ }
+ }
+
+ CurrBitPos += EncodingSegment.BitWidth;
+ }
+}
+
+static void debugDumpRecord(const Record &Rec) {
+ // Dump the record, so we can see what's going on...
+ std::string E;
+ raw_string_ostream S(E);
+ S << "Dumping record for previous error:\n";
+ S << Rec;
+ PrintNote(E);
+}
+
+/// For an operand field named OpName: populate OpInfo.InitValue with the
+/// constant-valued bit values, and OpInfo.Fields with the ranges of bits to
+/// insert from the decoded instruction.
+static void addOneOperandFields(const Record &EncodingDef, const BitsInit &Bits,
+ std::map<std::string, std::string> &TiedNames,
+ StringRef OpName, OperandInfo &OpInfo) {
+ // Some bits of the operand may be required to be 1 depending on the
+ // instruction's encoding. Collect those bits.
+ if (const RecordVal *EncodedValue = EncodingDef.getValue(OpName))
+ if (const BitsInit *OpBits = dyn_cast<BitsInit>(EncodedValue->getValue()))
+ for (unsigned I = 0; I < OpBits->getNumBits(); ++I)
+ if (const BitInit *OpBit = dyn_cast<BitInit>(OpBits->getBit(I)))
+ if (OpBit->getValue())
+ OpInfo.InitValue |= 1ULL << I;
+
+ for (unsigned I = 0, J = 0; I != Bits.getNumBits(); I = J) {
+ VarInit *Var;
+ unsigned Offset = 0;
+ for (; J != Bits.getNumBits(); ++J) {
+ VarBitInit *BJ = dyn_cast<VarBitInit>(Bits.getBit(J));
+ if (BJ) {
+ Var = dyn_cast<VarInit>(BJ->getBitVar());
+ if (I == J)
+ Offset = BJ->getBitNum();
+ else if (BJ->getBitNum() != Offset + J - I)
+ break;
+ } else {
+ Var = dyn_cast<VarInit>(Bits.getBit(J));
+ }
+ if (!Var || (Var->getName() != OpName &&
+ Var->getName() != TiedNames[std::string(OpName)]))
+ break;
+ }
+ if (I == J)
+ ++J;
+ else
+ OpInfo.addField(I, J - I, Offset);
+ }
+}
+
+static unsigned
+populateInstruction(CodeGenTarget &Target, const Record &EncodingDef,
+ const CodeGenInstruction &CGI, unsigned Opc,
+ std::map<unsigned, std::vector<OperandInfo>> &Operands,
+ bool IsVarLenInst) {
+ const Record &Def = *CGI.TheDef;
+ // If all the bit positions are not specified; do not decode this instruction.
+ // We are bound to fail! For proper disassembly, the well-known encoding bits
+ // of the instruction must be fully specified.
+
+ BitsInit &Bits = getBitsField(EncodingDef, "Inst");
+ if (Bits.allInComplete())
+ return 0;
+
+ std::vector<OperandInfo> InsnOperands;
+
+ // If the instruction has specified a custom decoding hook, use that instead
+ // of trying to auto-generate the decoder.
+ StringRef InstDecoder = EncodingDef.getValueAsString("DecoderMethod");
+ if (InstDecoder != "") {
+ bool HasCompleteInstDecoder = EncodingDef.getValueAsBit("hasCompleteDecoder");
+ InsnOperands.push_back(
+ OperandInfo(std::string(InstDecoder), HasCompleteInstDecoder));
+ Operands[Opc] = InsnOperands;
+ return Bits.getNumBits();
+ }
+
+ // Generate a description of the operand of the instruction that we know
+ // how to decode automatically.
+ // FIXME: We'll need to have a way to manually override this as needed.
+
+ // Gather the outputs/inputs of the instruction, so we can find their
+ // positions in the encoding. This assumes for now that they appear in the
+ // MCInst in the order that they're listed.
+ std::vector<std::pair<Init*, StringRef>> InOutOperands;
+ DagInit *Out = Def.getValueAsDag("OutOperandList");
+ DagInit *In = Def.getValueAsDag("InOperandList");
+ for (unsigned i = 0; i < Out->getNumArgs(); ++i)
+ InOutOperands.push_back(
+ std::make_pair(Out->getArg(i), Out->getArgNameStr(i)));
+ for (unsigned i = 0; i < In->getNumArgs(); ++i)
+ InOutOperands.push_back(
+ std::make_pair(In->getArg(i), In->getArgNameStr(i)));
+
+ // Search for tied operands, so that we can correctly instantiate
+ // operands that are not explicitly represented in the encoding.
+ std::map<std::string, std::string> TiedNames;
+ for (unsigned i = 0; i < CGI.Operands.size(); ++i) {
+ auto &Op = CGI.Operands[i];
+ for (unsigned j = 0; j < Op.Constraints.size(); ++j) {
+ const CGIOperandList::ConstraintInfo &CI = Op.Constraints[j];
+ if (CI.isTied()) {
+ int tiedTo = CI.getTiedOperand();
+ std::pair<unsigned, unsigned> SO =
+ CGI.Operands.getSubOperandNumber(tiedTo);
+ std::string TiedName = CGI.Operands[SO.first].SubOpNames[SO.second];
+ if (TiedName.empty())
+ TiedName = CGI.Operands[SO.first].Name;
+ std::string MyName = Op.SubOpNames[j];
+ if (MyName.empty())
+ MyName = Op.Name;
+
+ TiedNames[MyName] = TiedName;
+ TiedNames[TiedName] = MyName;
+ }
+ }
+ }
+
+ if (IsVarLenInst) {
+ parseVarLenInstOperand(EncodingDef, InsnOperands, CGI);
+ } else {
+ std::map<std::string, std::vector<OperandInfo>> NumberedInsnOperands;
+ std::set<std::string> NumberedInsnOperandsNoTie;
+ bool SupportPositionalDecoding =
+ Target.getInstructionSet()->getValueAsBit(
+ "useDeprecatedPositionallyEncodedOperands") &&
+ Target.getInstructionSet()->getValueAsBit(
+ "decodePositionallyEncodedOperands");
+ if (SupportPositionalDecoding) {
+ const std::vector<RecordVal> &Vals = Def.getValues();
+ unsigned NumberedOp = 0;
+
+ std::set<unsigned> NamedOpIndices;
+ if (Target.getInstructionSet()->getValueAsBit(
+ "noNamedPositionallyEncodedOperands"))
+ // Collect the set of operand indices that might correspond to named
+ // operand, and skip these when assigning operands based on position.
+ for (unsigned i = 0, e = Vals.size(); i != e; ++i) {
+ unsigned OpIdx;
+ if (!CGI.Operands.hasOperandNamed(Vals[i].getName(), OpIdx))
+ continue;
+
+ NamedOpIndices.insert(OpIdx);
+ }
+
+ for (unsigned i = 0, e = Vals.size(); i != e; ++i) {
+ // Ignore fixed fields in the record, we're looking for values like:
+ // bits<5> RST = { ?, ?, ?, ?, ? };
+ if (Vals[i].isNonconcreteOK() || Vals[i].getValue()->isComplete())
+ continue;
+
+ // Determine if Vals[i] actually contributes to the Inst encoding.
+ unsigned bi = 0;
+ for (; bi < Bits.getNumBits(); ++bi) {
+ VarInit *Var = nullptr;
+ VarBitInit *BI = dyn_cast<VarBitInit>(Bits.getBit(bi));
+ if (BI)
+ Var = dyn_cast<VarInit>(BI->getBitVar());
+ else
+ Var = dyn_cast<VarInit>(Bits.getBit(bi));
+
+ if (Var && Var->getName() == Vals[i].getName())
+ break;
+ }
+
+ if (bi == Bits.getNumBits())
+ continue;
+
+ // Skip variables that correspond to explicitly-named operands.
+ unsigned OpIdx;
+ std::pair<unsigned, unsigned> SubOp;
+ if (CGI.Operands.hasSubOperandAlias(Vals[i].getName(), SubOp) ||
+ CGI.Operands.hasOperandNamed(Vals[i].getName(), OpIdx))
+ continue;
+
+ // Get the bit range for this operand:
+ unsigned bitStart = bi++, bitWidth = 1;
+ for (; bi < Bits.getNumBits(); ++bi) {
+ VarInit *Var = nullptr;
+ VarBitInit *BI = dyn_cast<VarBitInit>(Bits.getBit(bi));
+ if (BI)
+ Var = dyn_cast<VarInit>(BI->getBitVar());
+ else
+ Var = dyn_cast<VarInit>(Bits.getBit(bi));
+
+ if (!Var)
+ break;
+
+ if (Var->getName() != Vals[i].getName())
+ break;
+
+ ++bitWidth;
+ }
+
+ unsigned NumberOps = CGI.Operands.size();
+ while (NumberedOp < NumberOps &&
+ (CGI.Operands.isFlatOperandNotEmitted(NumberedOp) ||
+ (!NamedOpIndices.empty() &&
+ NamedOpIndices.count(
+ CGI.Operands.getSubOperandNumber(NumberedOp).first))))
+ ++NumberedOp;
+
+ OpIdx = NumberedOp++;
+
+ // OpIdx now holds the ordered operand number of Vals[i].
+ std::pair<unsigned, unsigned> SO =
+ CGI.Operands.getSubOperandNumber(OpIdx);
+ const std::string &Name = CGI.Operands[SO.first].Name;
+
+ LLVM_DEBUG(dbgs() << "Numbered operand mapping for " << Def.getName()
+ << ": " << Name << "(" << SO.first << ", "
+ << SO.second << ") => " << Vals[i].getName() << "\n");
+
+ std::string Decoder;
+ Record *TypeRecord = CGI.Operands[SO.first].Rec;
+
+ RecordVal *DecoderString = TypeRecord->getValue("DecoderMethod");
+ StringInit *String =
+ DecoderString ? dyn_cast<StringInit>(DecoderString->getValue())
+ : nullptr;
+ if (String && String->getValue() != "")
+ Decoder = std::string(String->getValue());
+
+ if (Decoder == "" && CGI.Operands[SO.first].MIOperandInfo &&
+ CGI.Operands[SO.first].MIOperandInfo->getNumArgs()) {
+ Init *Arg = CGI.Operands[SO.first].MIOperandInfo->getArg(SO.second);
+ if (DefInit *DI = cast<DefInit>(Arg))
+ TypeRecord = DI->getDef();
+ }
+
+ bool isReg = false;
+ if (TypeRecord->isSubClassOf("RegisterOperand"))
+ TypeRecord = TypeRecord->getValueAsDef("RegClass");
+ if (TypeRecord->isSubClassOf("RegisterClass")) {
+ Decoder = "Decode" + TypeRecord->getName().str() + "RegisterClass";
+ isReg = true;
+ } else if (TypeRecord->isSubClassOf("PointerLikeRegClass")) {
+ Decoder = "DecodePointerLikeRegClass" +
+ utostr(TypeRecord->getValueAsInt("RegClassKind"));
+ isReg = true;
+ }
+
+ DecoderString = TypeRecord->getValue("DecoderMethod");
+ String = DecoderString ? dyn_cast<StringInit>(DecoderString->getValue())
+ : nullptr;
+ if (!isReg && String && String->getValue() != "")
+ Decoder = std::string(String->getValue());
+
+ RecordVal *HasCompleteDecoderVal =
+ TypeRecord->getValue("hasCompleteDecoder");
+ BitInit *HasCompleteDecoderBit =
+ HasCompleteDecoderVal
+ ? dyn_cast<BitInit>(HasCompleteDecoderVal->getValue())
+ : nullptr;
+ bool HasCompleteDecoder =
+ HasCompleteDecoderBit ? HasCompleteDecoderBit->getValue() : true;
+
+ OperandInfo OpInfo(Decoder, HasCompleteDecoder);
+ OpInfo.addField(bitStart, bitWidth, 0);
+
+ NumberedInsnOperands[Name].push_back(OpInfo);
+
+ // FIXME: For complex operands with custom decoders we can't handle tied
+ // sub-operands automatically. Skip those here and assume that this is
+ // fixed up elsewhere.
+ if (CGI.Operands[SO.first].MIOperandInfo &&
+ CGI.Operands[SO.first].MIOperandInfo->getNumArgs() > 1 && String &&
+ String->getValue() != "")
+ NumberedInsnOperandsNoTie.insert(Name);
+ }
+ }
+
+ // For each operand, see if we can figure out where it is encoded.
+ for (const auto &Op : InOutOperands) {
+ Init *OpInit = Op.first;
+ StringRef OpName = Op.second;
+
+ if (SupportPositionalDecoding) {
+ if (!NumberedInsnOperands[std::string(OpName)].empty()) {
+ llvm::append_range(InsnOperands,
+ NumberedInsnOperands[std::string(OpName)]);
+ continue;
+ }
+ if (!NumberedInsnOperands[TiedNames[std::string(OpName)]].empty()) {
+ if (!NumberedInsnOperandsNoTie.count(
+ TiedNames[std::string(OpName)])) {
+ // Figure out to which (sub)operand we're tied.
+ unsigned i =
+ CGI.Operands.getOperandNamed(TiedNames[std::string(OpName)]);
+ int tiedTo = CGI.Operands[i].getTiedRegister();
+ if (tiedTo == -1) {
+ i = CGI.Operands.getOperandNamed(OpName);
+ tiedTo = CGI.Operands[i].getTiedRegister();
+ }
+
+ if (tiedTo != -1) {
+ std::pair<unsigned, unsigned> SO =
+ CGI.Operands.getSubOperandNumber(tiedTo);
+
+ InsnOperands.push_back(
+ NumberedInsnOperands[TiedNames[std::string(OpName)]]
+ [SO.second]);
+ }
+ }
+ continue;
+ }
+ }
+
+ // We're ready to find the instruction encoding locations for this operand.
+
+ // First, find the operand type ("OpInit"), and sub-op names
+ // ("SubArgDag") if present.
+ DagInit *SubArgDag = dyn_cast<DagInit>(OpInit);
+ if (SubArgDag)
+ OpInit = SubArgDag->getOperator();
+ Record *OpTypeRec = cast<DefInit>(OpInit)->getDef();
+ // Lookup the sub-operands from the operand type record (note that only
+ // Operand subclasses have MIOperandInfo, see CodeGenInstruction.cpp).
+ DagInit *SubOps = OpTypeRec->isSubClassOf("Operand")
+ ? OpTypeRec->getValueAsDag("MIOperandInfo")
+ : nullptr;
+
+ // Lookup the decoder method and construct a new OperandInfo to hold our result.
+ OperandInfo OpInfo = getOpInfo(OpTypeRec);
+
+ // If we have named sub-operands...
+ if (SubArgDag) {
+ // Then there should not be a custom decoder specified on the top-level
+ // type.
+ if (!OpInfo.Decoder.empty()) {
+ PrintError(EncodingDef.getLoc(),
+ "DecoderEmitter: operand \"" + OpName + "\" has type \"" +
+ OpInit->getAsString() +
+ "\" with a custom DecoderMethod, but also named "
+ "sub-operands.");
+ continue;
+ }
+
+ // Decode each of the sub-ops separately.
+ assert(SubOps && SubArgDag->getNumArgs() == SubOps->getNumArgs());
+ for (unsigned i = 0; i < SubOps->getNumArgs(); ++i) {
+ StringRef SubOpName = SubArgDag->getArgNameStr(i);
+ OperandInfo SubOpInfo =
+ getOpInfo(cast<DefInit>(SubOps->getArg(i))->getDef());
+
+ addOneOperandFields(EncodingDef, Bits, TiedNames, SubOpName,
+ SubOpInfo);
+ InsnOperands.push_back(SubOpInfo);
+ }
+ continue;
+ }
+
+ // Otherwise, if we have an operand with sub-operands, but they aren't
+ // named...
+ if (SubOps && OpInfo.Decoder.empty()) {
+ // If it's a single sub-operand, and no custom decoder, use the decoder
+ // from the one sub-operand.
+ if (SubOps->getNumArgs() == 1)
+ OpInfo = getOpInfo(cast<DefInit>(SubOps->getArg(0))->getDef());
+
+ // If we have multiple sub-ops, there'd better have a custom
+ // decoder. (Otherwise we don't know how to populate them properly...)
+ if (SubOps->getNumArgs() > 1) {
+ PrintError(EncodingDef.getLoc(),
+ "DecoderEmitter: operand \"" + OpName +
+ "\" uses MIOperandInfo with multiple ops, but doesn't "
+ "have a custom decoder!");
+ debugDumpRecord(EncodingDef);
+ continue;
+ }
+ }
+
+ addOneOperandFields(EncodingDef, Bits, TiedNames, OpName, OpInfo);
+ // FIXME: it should be an error not to find a definition for a given
+ // operand, rather than just failing to add it to the resulting
+ // instruction! (This is a longstanding bug, which will be addressed in an
+ // upcoming change.)
+ if (OpInfo.numFields() > 0)
+ InsnOperands.push_back(OpInfo);
+ }
+ }
+ Operands[Opc] = InsnOperands;
+
+#if 0
+ LLVM_DEBUG({
+ // Dumps the instruction encoding bits.
+ dumpBits(errs(), Bits);
+
+ errs() << '\n';
+
+ // Dumps the list of operand info.
+ for (unsigned i = 0, e = CGI.Operands.size(); i != e; ++i) {
+ const CGIOperandList::OperandInfo &Info = CGI.Operands[i];
+ const std::string &OperandName = Info.Name;
+ const Record &OperandDef = *Info.Rec;
+
+ errs() << "\t" << OperandName << " (" << OperandDef.getName() << ")\n";
+ }
+ });
+#endif
+
+ return Bits.getNumBits();
+}
+
+// emitFieldFromInstruction - Emit the templated helper function
+// fieldFromInstruction().
+// On Windows we make sure that this function is not inlined when
+// using the VS compiler. It has a bug which causes the function
+// to be optimized out in some circumstances. See llvm.org/pr38292
+static void emitFieldFromInstruction(formatted_raw_ostream &OS) {
+ OS << "// Helper functions for extracting fields from encoded instructions.\n"
+ << "// InsnType must either be integral or an APInt-like object that "
+ "must:\n"
+ << "// * be default-constructible and copy-constructible\n"
+ << "// * be constructible from an APInt (this can be private)\n"
+ << "// * Support insertBits(bits, startBit, numBits)\n"
+ << "// * Support extractBitsAsZExtValue(numBits, startBit)\n"
+ << "// * Support the ~, &, ==, and != operators with other objects of "
+ "the same type\n"
+ << "// * Support the != and bitwise & with uint64_t\n"
+ << "// * Support put (<<) to raw_ostream&\n"
+ << "template <typename InsnType>\n"
+ << "#if defined(_MSC_VER) && !defined(__clang__)\n"
+ << "__declspec(noinline)\n"
+ << "#endif\n"
+ << "static std::enable_if_t<std::is_integral<InsnType>::value, InsnType>\n"
+ << "fieldFromInstruction(const InsnType &insn, unsigned startBit,\n"
+ << " unsigned numBits) {\n"
+ << " assert(startBit + numBits <= 64 && \"Cannot support >64-bit "
+ "extractions!\");\n"
+ << " assert(startBit + numBits <= (sizeof(InsnType) * 8) &&\n"
+ << " \"Instruction field out of bounds!\");\n"
+ << " InsnType fieldMask;\n"
+ << " if (numBits == sizeof(InsnType) * 8)\n"
+ << " fieldMask = (InsnType)(-1LL);\n"
+ << " else\n"
+ << " fieldMask = (((InsnType)1 << numBits) - 1) << startBit;\n"
+ << " return (insn & fieldMask) >> startBit;\n"
+ << "}\n"
+ << "\n"
+ << "template <typename InsnType>\n"
+ << "static std::enable_if_t<!std::is_integral<InsnType>::value, "
+ "uint64_t>\n"
+ << "fieldFromInstruction(const InsnType &insn, unsigned startBit,\n"
+ << " unsigned numBits) {\n"
+ << " return insn.extractBitsAsZExtValue(numBits, startBit);\n"
+ << "}\n\n";
+}
+
+// emitInsertBits - Emit the templated helper function insertBits().
+static void emitInsertBits(formatted_raw_ostream &OS) {
+ OS << "// Helper function for inserting bits extracted from an encoded "
+ "instruction into\n"
+ << "// a field.\n"
+ << "template <typename InsnType>\n"
+ << "static std::enable_if_t<std::is_integral<InsnType>::value>\n"
+ << "insertBits(InsnType &field, InsnType bits, unsigned startBit, "
+ "unsigned numBits) {\n"
+ << " assert(startBit + numBits <= sizeof field * 8);\n"
+ << " field |= (InsnType)bits << startBit;\n"
+ << "}\n"
+ << "\n"
+ << "template <typename InsnType>\n"
+ << "static std::enable_if_t<!std::is_integral<InsnType>::value>\n"
+ << "insertBits(InsnType &field, uint64_t bits, unsigned startBit, "
+ "unsigned numBits) {\n"
+ << " field.insertBits(bits, startBit, numBits);\n"
+ << "}\n\n";
+}
+
+// emitDecodeInstruction - Emit the templated helper function
+// decodeInstruction().
+static void emitDecodeInstruction(formatted_raw_ostream &OS,
+ bool IsVarLenInst) {
+ OS << "template <typename InsnType>\n"
+ << "static DecodeStatus decodeInstruction(const uint8_t DecodeTable[], "
+ "MCInst &MI,\n"
+ << " InsnType insn, uint64_t "
+ "Address,\n"
+ << " const MCDisassembler *DisAsm,\n"
+ << " const MCSubtargetInfo &STI";
+ if (IsVarLenInst) {
+ OS << ",\n"
+ << " llvm::function_ref<void(APInt "
+ "&,"
+ << " uint64_t)> makeUp";
+ }
+ OS << ") {\n"
+ << " const FeatureBitset &Bits = STI.getFeatureBits();\n"
+ << "\n"
+ << " const uint8_t *Ptr = DecodeTable;\n"
+ << " uint64_t CurFieldValue = 0;\n"
+ << " DecodeStatus S = MCDisassembler::Success;\n"
+ << " while (true) {\n"
+ << " ptrdiff_t Loc = Ptr - DecodeTable;\n"
+ << " switch (*Ptr) {\n"
+ << " default:\n"
+ << " errs() << Loc << \": Unexpected decode table opcode!\\n\";\n"
+ << " return MCDisassembler::Fail;\n"
+ << " case MCD::OPC_ExtractField: {\n"
+ << " unsigned Start = *++Ptr;\n"
+ << " unsigned Len = *++Ptr;\n"
+ << " ++Ptr;\n";
+ if (IsVarLenInst)
+ OS << " makeUp(insn, Start + Len);\n";
+ OS << " CurFieldValue = fieldFromInstruction(insn, Start, Len);\n"
+ << " LLVM_DEBUG(dbgs() << Loc << \": OPC_ExtractField(\" << Start << "
+ "\", \"\n"
+ << " << Len << \"): \" << CurFieldValue << \"\\n\");\n"
+ << " break;\n"
+ << " }\n"
+ << " case MCD::OPC_FilterValue: {\n"
+ << " // Decode the field value.\n"
+ << " unsigned Len;\n"
+ << " uint64_t Val = decodeULEB128(++Ptr, &Len);\n"
+ << " Ptr += Len;\n"
+ << " // NumToSkip is a plain 24-bit integer.\n"
+ << " unsigned NumToSkip = *Ptr++;\n"
+ << " NumToSkip |= (*Ptr++) << 8;\n"
+ << " NumToSkip |= (*Ptr++) << 16;\n"
+ << "\n"
+ << " // Perform the filter operation.\n"
+ << " if (Val != CurFieldValue)\n"
+ << " Ptr += NumToSkip;\n"
+ << " LLVM_DEBUG(dbgs() << Loc << \": OPC_FilterValue(\" << Val << "
+ "\", \" << NumToSkip\n"
+ << " << \"): \" << ((Val != CurFieldValue) ? \"FAIL:\" "
+ ": \"PASS:\")\n"
+ << " << \" continuing at \" << (Ptr - DecodeTable) << "
+ "\"\\n\");\n"
+ << "\n"
+ << " break;\n"
+ << " }\n"
+ << " case MCD::OPC_CheckField: {\n"
+ << " unsigned Start = *++Ptr;\n"
+ << " unsigned Len = *++Ptr;\n";
+ if (IsVarLenInst)
+ OS << " makeUp(insn, Start + Len);\n";
+ OS << " uint64_t FieldValue = fieldFromInstruction(insn, Start, Len);\n"
+ << " // Decode the field value.\n"
+ << " unsigned PtrLen = 0;\n"
+ << " uint64_t ExpectedValue = decodeULEB128(++Ptr, &PtrLen);\n"
+ << " Ptr += PtrLen;\n"
+ << " // NumToSkip is a plain 24-bit integer.\n"
+ << " unsigned NumToSkip = *Ptr++;\n"
+ << " NumToSkip |= (*Ptr++) << 8;\n"
+ << " NumToSkip |= (*Ptr++) << 16;\n"
+ << "\n"
+ << " // If the actual and expected values don't match, skip.\n"
+ << " if (ExpectedValue != FieldValue)\n"
+ << " Ptr += NumToSkip;\n"
+ << " LLVM_DEBUG(dbgs() << Loc << \": OPC_CheckField(\" << Start << "
+ "\", \"\n"
+ << " << Len << \", \" << ExpectedValue << \", \" << "
+ "NumToSkip\n"
+ << " << \"): FieldValue = \" << FieldValue << \", "
+ "ExpectedValue = \"\n"
+ << " << ExpectedValue << \": \"\n"
+ << " << ((ExpectedValue == FieldValue) ? \"PASS\\n\" : "
+ "\"FAIL\\n\"));\n"
+ << " break;\n"
+ << " }\n"
+ << " case MCD::OPC_CheckPredicate: {\n"
+ << " unsigned Len;\n"
+ << " // Decode the Predicate Index value.\n"
+ << " unsigned PIdx = decodeULEB128(++Ptr, &Len);\n"
+ << " Ptr += Len;\n"
+ << " // NumToSkip is a plain 24-bit integer.\n"
+ << " unsigned NumToSkip = *Ptr++;\n"
+ << " NumToSkip |= (*Ptr++) << 8;\n"
+ << " NumToSkip |= (*Ptr++) << 16;\n"
+ << " // Check the predicate.\n"
+ << " bool Pred;\n"
+ << " if (!(Pred = checkDecoderPredicate(PIdx, Bits)))\n"
+ << " Ptr += NumToSkip;\n"
+ << " (void)Pred;\n"
+ << " LLVM_DEBUG(dbgs() << Loc << \": OPC_CheckPredicate(\" << PIdx "
+ "<< \"): \"\n"
+ << " << (Pred ? \"PASS\\n\" : \"FAIL\\n\"));\n"
+ << "\n"
+ << " break;\n"
+ << " }\n"
+ << " case MCD::OPC_Decode: {\n"
+ << " unsigned Len;\n"
+ << " // Decode the Opcode value.\n"
+ << " unsigned Opc = decodeULEB128(++Ptr, &Len);\n"
+ << " Ptr += Len;\n"
+ << " unsigned DecodeIdx = decodeULEB128(Ptr, &Len);\n"
+ << " Ptr += Len;\n"
+ << "\n"
+ << " MI.clear();\n"
+ << " MI.setOpcode(Opc);\n"
+ << " bool DecodeComplete;\n";
+ if (IsVarLenInst) {
+ OS << " Len = InstrLenTable[Opc];\n"
+ << " makeUp(insn, Len);\n";
+ }
+ OS << " S = decodeToMCInst(S, DecodeIdx, insn, MI, Address, DisAsm, "
+ "DecodeComplete);\n"
+ << " assert(DecodeComplete);\n"
+ << "\n"
+ << " LLVM_DEBUG(dbgs() << Loc << \": OPC_Decode: opcode \" << Opc\n"
+ << " << \", using decoder \" << DecodeIdx << \": \"\n"
+ << " << (S != MCDisassembler::Fail ? \"PASS\" : "
+ "\"FAIL\") << \"\\n\");\n"
+ << " return S;\n"
+ << " }\n"
+ << " case MCD::OPC_TryDecode: {\n"
+ << " unsigned Len;\n"
+ << " // Decode the Opcode value.\n"
+ << " unsigned Opc = decodeULEB128(++Ptr, &Len);\n"
+ << " Ptr += Len;\n"
+ << " unsigned DecodeIdx = decodeULEB128(Ptr, &Len);\n"
+ << " Ptr += Len;\n"
+ << " // NumToSkip is a plain 24-bit integer.\n"
+ << " unsigned NumToSkip = *Ptr++;\n"
+ << " NumToSkip |= (*Ptr++) << 8;\n"
+ << " NumToSkip |= (*Ptr++) << 16;\n"
+ << "\n"
+ << " // Perform the decode operation.\n"
+ << " MCInst TmpMI;\n"
+ << " TmpMI.setOpcode(Opc);\n"
+ << " bool DecodeComplete;\n"
+ << " S = decodeToMCInst(S, DecodeIdx, insn, TmpMI, Address, DisAsm, "
+ "DecodeComplete);\n"
+ << " LLVM_DEBUG(dbgs() << Loc << \": OPC_TryDecode: opcode \" << "
+ "Opc\n"
+ << " << \", using decoder \" << DecodeIdx << \": \");\n"
+ << "\n"
+ << " if (DecodeComplete) {\n"
+ << " // Decoding complete.\n"
+ << " LLVM_DEBUG(dbgs() << (S != MCDisassembler::Fail ? \"PASS\" : "
+ "\"FAIL\") << \"\\n\");\n"
+ << " MI = TmpMI;\n"
+ << " return S;\n"
+ << " } else {\n"
+ << " assert(S == MCDisassembler::Fail);\n"
+ << " // If the decoding was incomplete, skip.\n"
+ << " Ptr += NumToSkip;\n"
+ << " LLVM_DEBUG(dbgs() << \"FAIL: continuing at \" << (Ptr - "
+ "DecodeTable) << \"\\n\");\n"
+ << " // Reset decode status. This also drops a SoftFail status "
+ "that could be\n"
+ << " // set before the decode attempt.\n"
+ << " S = MCDisassembler::Success;\n"
+ << " }\n"
+ << " break;\n"
+ << " }\n"
+ << " case MCD::OPC_SoftFail: {\n"
+ << " // Decode the mask values.\n"
+ << " unsigned Len;\n"
+ << " uint64_t PositiveMask = decodeULEB128(++Ptr, &Len);\n"
+ << " Ptr += Len;\n"
+ << " uint64_t NegativeMask = decodeULEB128(Ptr, &Len);\n"
+ << " Ptr += Len;\n"
+ << " bool Fail = (insn & PositiveMask) != 0 || (~insn & "
+ "NegativeMask) != 0;\n"
+ << " if (Fail)\n"
+ << " S = MCDisassembler::SoftFail;\n"
+ << " LLVM_DEBUG(dbgs() << Loc << \": OPC_SoftFail: \" << (Fail ? "
+ "\"FAIL\\n\" : \"PASS\\n\"));\n"
+ << " break;\n"
+ << " }\n"
+ << " case MCD::OPC_Fail: {\n"
+ << " LLVM_DEBUG(dbgs() << Loc << \": OPC_Fail\\n\");\n"
+ << " return MCDisassembler::Fail;\n"
+ << " }\n"
+ << " }\n"
+ << " }\n"
+ << " llvm_unreachable(\"bogosity detected in disassembler state "
+ "machine!\");\n"
+ << "}\n\n";
+}
+
+// Helper to propagate SoftFail status. Returns false if the status is Fail;
+// callers are expected to early-exit in that condition. (Note, the '&' operator
+// is correct to propagate the values of this enum; see comment on 'enum
+// DecodeStatus'.)
+static void emitCheck(formatted_raw_ostream &OS) {
+ OS << "static bool Check(DecodeStatus &Out, DecodeStatus In) {\n"
+ << " Out = static_cast<DecodeStatus>(Out & In);\n"
+ << " return Out != MCDisassembler::Fail;\n"
+ << "}\n\n";
+}
+
+// Emits disassembler code for instruction decoding.
+void DecoderEmitter::run(raw_ostream &o) {
+ formatted_raw_ostream OS(o);
+ OS << "#include \"llvm/MC/MCInst.h\"\n";
+ OS << "#include \"llvm/MC/MCSubtargetInfo.h\"\n";
+ OS << "#include \"llvm/MC/SubtargetFeature.h\"\n";
+ OS << "#include \"llvm/Support/DataTypes.h\"\n";
+ OS << "#include \"llvm/Support/Debug.h\"\n";
+ OS << "#include \"llvm/Support/LEB128.h\"\n";
+ OS << "#include \"llvm/Support/raw_ostream.h\"\n";
+ OS << "#include <assert.h>\n";
+ OS << '\n';
+ OS << "namespace llvm {\n\n";
+
+ emitFieldFromInstruction(OS);
+ emitInsertBits(OS);
+ emitCheck(OS);
+
+ Target.reverseBitsForLittleEndianEncoding();
+
+ // Parameterize the decoders based on namespace and instruction width.
+ std::set<StringRef> HwModeNames;
+ const auto &NumberedInstructions = Target.getInstructionsByEnumValue();
+ NumberedEncodings.reserve(NumberedInstructions.size());
+ DenseMap<Record *, unsigned> IndexOfInstruction;
+ // First, collect all HwModes referenced by the target.
+ for (const auto &NumberedInstruction : NumberedInstructions) {
+ IndexOfInstruction[NumberedInstruction->TheDef] = NumberedEncodings.size();
+
+ if (const RecordVal *RV =
+ NumberedInstruction->TheDef->getValue("EncodingInfos")) {
+ if (auto *DI = dyn_cast_or_null<DefInit>(RV->getValue())) {
+ const CodeGenHwModes &HWM = Target.getHwModes();
+ EncodingInfoByHwMode EBM(DI->getDef(), HWM);
+ for (auto &KV : EBM)
+ HwModeNames.insert(HWM.getMode(KV.first).Name);
+ }
+ }
+ }
+
+ // If HwModeNames is empty, add the empty string so we always have one HwMode.
+ if (HwModeNames.empty())
+ HwModeNames.insert("");
+
+ for (const auto &NumberedInstruction : NumberedInstructions) {
+ IndexOfInstruction[NumberedInstruction->TheDef] = NumberedEncodings.size();
+
+ if (const RecordVal *RV =
+ NumberedInstruction->TheDef->getValue("EncodingInfos")) {
+ if (DefInit *DI = dyn_cast_or_null<DefInit>(RV->getValue())) {
+ const CodeGenHwModes &HWM = Target.getHwModes();
+ EncodingInfoByHwMode EBM(DI->getDef(), HWM);
+ for (auto &KV : EBM) {
+ NumberedEncodings.emplace_back(KV.second, NumberedInstruction,
+ HWM.getMode(KV.first).Name);
+ HwModeNames.insert(HWM.getMode(KV.first).Name);
+ }
+ continue;
+ }
+ }
+ // This instruction is encoded the same on all HwModes. Emit it for all
+ // HwModes.
+ for (StringRef HwModeName : HwModeNames)
+ NumberedEncodings.emplace_back(NumberedInstruction->TheDef,
+ NumberedInstruction, HwModeName);
+ }
+ for (const auto &NumberedAlias : RK.getAllDerivedDefinitions("AdditionalEncoding"))
+ NumberedEncodings.emplace_back(
+ NumberedAlias,
+ &Target.getInstruction(NumberedAlias->getValueAsDef("AliasOf")));
+
+ std::map<std::pair<std::string, unsigned>, std::vector<EncodingIDAndOpcode>>
+ OpcMap;
+ std::map<unsigned, std::vector<OperandInfo>> Operands;
+ std::vector<unsigned> InstrLen;
+
+ bool IsVarLenInst =
+ any_of(NumberedInstructions, [](const CodeGenInstruction *CGI) {
+ RecordVal *RV = CGI->TheDef->getValue("Inst");
+ return RV && isa<DagInit>(RV->getValue());
+ });
+ unsigned MaxInstLen = 0;
+
+ for (unsigned i = 0; i < NumberedEncodings.size(); ++i) {
+ const Record *EncodingDef = NumberedEncodings[i].EncodingDef;
+ const CodeGenInstruction *Inst = NumberedEncodings[i].Inst;
+ const Record *Def = Inst->TheDef;
+ unsigned Size = EncodingDef->getValueAsInt("Size");
+ if (Def->getValueAsString("Namespace") == "TargetOpcode" ||
+ Def->getValueAsBit("isPseudo") ||
+ Def->getValueAsBit("isAsmParserOnly") ||
+ Def->getValueAsBit("isCodeGenOnly")) {
+ NumEncodingsLackingDisasm++;
+ continue;
+ }
+
+ if (i < NumberedInstructions.size())
+ NumInstructions++;
+ NumEncodings++;
+
+ if (!Size && !IsVarLenInst)
+ continue;
+
+ if (IsVarLenInst)
+ InstrLen.resize(NumberedInstructions.size(), 0);
+
+ if (unsigned Len = populateInstruction(Target, *EncodingDef, *Inst, i,
+ Operands, IsVarLenInst)) {
+ if (IsVarLenInst) {
+ MaxInstLen = std::max(MaxInstLen, Len);
+ InstrLen[i] = Len;
+ }
+ std::string DecoderNamespace =
+ std::string(EncodingDef->getValueAsString("DecoderNamespace"));
+ if (!NumberedEncodings[i].HwModeName.empty())
+ DecoderNamespace +=
+ std::string("_") + NumberedEncodings[i].HwModeName.str();
+ OpcMap[std::make_pair(DecoderNamespace, Size)].emplace_back(
+ i, IndexOfInstruction.find(Def)->second);
+ } else {
+ NumEncodingsOmitted++;
+ }
+ }
+
+ DecoderTableInfo TableInfo;
+ for (const auto &Opc : OpcMap) {
+ // Emit the decoder for this namespace+width combination.
+ ArrayRef<EncodingAndInst> NumberedEncodingsRef(
+ NumberedEncodings.data(), NumberedEncodings.size());
+ FilterChooser FC(NumberedEncodingsRef, Opc.second, Operands,
+ IsVarLenInst ? MaxInstLen : 8 * Opc.first.second, this);
+
+ // The decode table is cleared for each top level decoder function. The
+ // predicates and decoders themselves, however, are shared across all
+ // decoders to give more opportunities for uniqueing.
+ TableInfo.Table.clear();
+ TableInfo.FixupStack.clear();
+ TableInfo.Table.reserve(16384);
+ TableInfo.FixupStack.emplace_back();
+ FC.emitTableEntries(TableInfo);
+ // Any NumToSkip fixups in the top level scope can resolve to the
+ // OPC_Fail at the end of the table.
+ assert(TableInfo.FixupStack.size() == 1 && "fixup stack phasing error!");
+ // Resolve any NumToSkip fixups in the current scope.
+ resolveTableFixups(TableInfo.Table, TableInfo.FixupStack.back(),
+ TableInfo.Table.size());
+ TableInfo.FixupStack.clear();
+
+ TableInfo.Table.push_back(MCD::OPC_Fail);
+
+ // Print the table to the output stream.
+ emitTable(OS, TableInfo.Table, 0, FC.getBitWidth(), Opc.first.first);
+ }
+
+ // For variable instruction, we emit a instruction length table
+ // to let the decoder know how long the instructions are.
+ // You can see example usage in M68k's disassembler.
+ if (IsVarLenInst)
+ emitInstrLenTable(OS, InstrLen);
+ // Emit the predicate function.
+ emitPredicateFunction(OS, TableInfo.Predicates, 0);
+
+ // Emit the decoder function.
+ emitDecoderFunction(OS, TableInfo.Decoders, 0);
+
+ // Emit the main entry point for the decoder, decodeInstruction().
+ emitDecodeInstruction(OS, IsVarLenInst);
+
+ OS << "\n} // end namespace llvm\n";
+}
+
+namespace llvm {
+
+void EmitDecoder(RecordKeeper &RK, raw_ostream &OS,
+ const std::string &PredicateNamespace) {
+ DecoderEmitter(RK, PredicateNamespace).run(OS);
+}
+
+} // end namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/DirectiveEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/DirectiveEmitter.cpp
new file mode 100644
index 0000000000..f32fbe3e25
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/DirectiveEmitter.cpp
@@ -0,0 +1,885 @@
+//===- DirectiveEmitter.cpp - Directive Language Emitter ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// DirectiveEmitter uses the descriptions of directives and clauses to construct
+// common code declarations to be used in Frontends.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/TableGen/DirectiveEmitter.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+
+using namespace llvm;
+
+namespace {
+// Simple RAII helper for defining ifdef-undef-endif scopes.
+class IfDefScope {
+public:
+ IfDefScope(StringRef Name, raw_ostream &OS) : Name(Name), OS(OS) {
+ OS << "#ifdef " << Name << "\n"
+ << "#undef " << Name << "\n";
+ }
+
+ ~IfDefScope() { OS << "\n#endif // " << Name << "\n\n"; }
+
+private:
+ StringRef Name;
+ raw_ostream &OS;
+};
+} // end anonymous namespace
+
+namespace llvm {
+
+// Generate enum class
+void GenerateEnumClass(const std::vector<Record *> &Records, raw_ostream &OS,
+ StringRef Enum, StringRef Prefix,
+ const DirectiveLanguage &DirLang) {
+ OS << "\n";
+ OS << "enum class " << Enum << " {\n";
+ for (const auto &R : Records) {
+ BaseRecord Rec{R};
+ OS << " " << Prefix << Rec.getFormattedName() << ",\n";
+ }
+ OS << "};\n";
+ OS << "\n";
+ OS << "static constexpr std::size_t " << Enum
+ << "_enumSize = " << Records.size() << ";\n";
+
+ // Make the enum values available in the defined namespace. This allows us to
+ // write something like Enum_X if we have a `using namespace <CppNamespace>`.
+ // At the same time we do not loose the strong type guarantees of the enum
+ // class, that is we cannot pass an unsigned as Directive without an explicit
+ // cast.
+ if (DirLang.hasMakeEnumAvailableInNamespace()) {
+ OS << "\n";
+ for (const auto &R : Records) {
+ BaseRecord Rec{R};
+ OS << "constexpr auto " << Prefix << Rec.getFormattedName() << " = "
+ << "llvm::" << DirLang.getCppNamespace() << "::" << Enum
+ << "::" << Prefix << Rec.getFormattedName() << ";\n";
+ }
+ }
+}
+
+// Generate enums for values that clauses can take.
+// Also generate function declarations for get<Enum>Name(StringRef Str).
+void GenerateEnumClauseVal(const std::vector<Record *> &Records,
+ raw_ostream &OS, const DirectiveLanguage &DirLang,
+ std::string &EnumHelperFuncs) {
+ for (const auto &R : Records) {
+ Clause C{R};
+ const auto &ClauseVals = C.getClauseVals();
+ if (ClauseVals.size() <= 0)
+ continue;
+
+ const auto &EnumName = C.getEnumName();
+ if (EnumName.size() == 0) {
+ PrintError("enumClauseValue field not set in Clause" +
+ C.getFormattedName() + ".");
+ return;
+ }
+
+ OS << "\n";
+ OS << "enum class " << EnumName << " {\n";
+ for (const auto &CV : ClauseVals) {
+ ClauseVal CVal{CV};
+ OS << " " << CV->getName() << "=" << CVal.getValue() << ",\n";
+ }
+ OS << "};\n";
+
+ if (DirLang.hasMakeEnumAvailableInNamespace()) {
+ OS << "\n";
+ for (const auto &CV : ClauseVals) {
+ OS << "constexpr auto " << CV->getName() << " = "
+ << "llvm::" << DirLang.getCppNamespace() << "::" << EnumName
+ << "::" << CV->getName() << ";\n";
+ }
+ EnumHelperFuncs += (llvm::Twine(EnumName) + llvm::Twine(" get") +
+ llvm::Twine(EnumName) + llvm::Twine("(StringRef);\n"))
+ .str();
+
+ EnumHelperFuncs +=
+ (llvm::Twine("llvm::StringRef get") + llvm::Twine(DirLang.getName()) +
+ llvm::Twine(EnumName) + llvm::Twine("Name(") +
+ llvm::Twine(EnumName) + llvm::Twine(");\n"))
+ .str();
+ }
+ }
+}
+
+bool HasDuplicateClauses(const std::vector<Record *> &Clauses,
+ const Directive &Directive,
+ llvm::StringSet<> &CrtClauses) {
+ bool HasError = false;
+ for (const auto &C : Clauses) {
+ VersionedClause VerClause{C};
+ const auto insRes = CrtClauses.insert(VerClause.getClause().getName());
+ if (!insRes.second) {
+ PrintError("Clause " + VerClause.getClause().getRecordName() +
+ " already defined on directive " + Directive.getRecordName());
+ HasError = true;
+ }
+ }
+ return HasError;
+}
+
+// Check for duplicate clauses in lists. Clauses cannot appear twice in the
+// three allowed list. Also, since required implies allowed, clauses cannot
+// appear in both the allowedClauses and requiredClauses lists.
+bool HasDuplicateClausesInDirectives(const std::vector<Record *> &Directives) {
+ bool HasDuplicate = false;
+ for (const auto &D : Directives) {
+ Directive Dir{D};
+ llvm::StringSet<> Clauses;
+ // Check for duplicates in the three allowed lists.
+ if (HasDuplicateClauses(Dir.getAllowedClauses(), Dir, Clauses) ||
+ HasDuplicateClauses(Dir.getAllowedOnceClauses(), Dir, Clauses) ||
+ HasDuplicateClauses(Dir.getAllowedExclusiveClauses(), Dir, Clauses)) {
+ HasDuplicate = true;
+ }
+ // Check for duplicate between allowedClauses and required
+ Clauses.clear();
+ if (HasDuplicateClauses(Dir.getAllowedClauses(), Dir, Clauses) ||
+ HasDuplicateClauses(Dir.getRequiredClauses(), Dir, Clauses)) {
+ HasDuplicate = true;
+ }
+ if (HasDuplicate)
+ PrintFatalError("One or more clauses are defined multiple times on"
+ " directive " +
+ Dir.getRecordName());
+ }
+
+ return HasDuplicate;
+}
+
+// Check consitency of records. Return true if an error has been detected.
+// Return false if the records are valid.
+bool DirectiveLanguage::HasValidityErrors() const {
+ if (getDirectiveLanguages().size() != 1) {
+ PrintFatalError("A single definition of DirectiveLanguage is needed.");
+ return true;
+ }
+
+ return HasDuplicateClausesInDirectives(getDirectives());
+}
+
+// Generate the declaration section for the enumeration in the directive
+// language
+void EmitDirectivesDecl(RecordKeeper &Records, raw_ostream &OS) {
+ const auto DirLang = DirectiveLanguage{Records};
+ if (DirLang.HasValidityErrors())
+ return;
+
+ OS << "#ifndef LLVM_" << DirLang.getName() << "_INC\n";
+ OS << "#define LLVM_" << DirLang.getName() << "_INC\n";
+
+ if (DirLang.hasEnableBitmaskEnumInNamespace())
+ OS << "\n#include \"llvm/ADT/BitmaskEnum.h\"\n";
+
+ OS << "\n";
+ OS << "namespace llvm {\n";
+ OS << "class StringRef;\n";
+
+ // Open namespaces defined in the directive language
+ llvm::SmallVector<StringRef, 2> Namespaces;
+ llvm::SplitString(DirLang.getCppNamespace(), Namespaces, "::");
+ for (auto Ns : Namespaces)
+ OS << "namespace " << Ns << " {\n";
+
+ if (DirLang.hasEnableBitmaskEnumInNamespace())
+ OS << "\nLLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();\n";
+
+ // Emit Directive enumeration
+ GenerateEnumClass(DirLang.getDirectives(), OS, "Directive",
+ DirLang.getDirectivePrefix(), DirLang);
+
+ // Emit Clause enumeration
+ GenerateEnumClass(DirLang.getClauses(), OS, "Clause",
+ DirLang.getClausePrefix(), DirLang);
+
+ // Emit ClauseVal enumeration
+ std::string EnumHelperFuncs;
+ GenerateEnumClauseVal(DirLang.getClauses(), OS, DirLang, EnumHelperFuncs);
+
+ // Generic function signatures
+ OS << "\n";
+ OS << "// Enumeration helper functions\n";
+ OS << "Directive get" << DirLang.getName()
+ << "DirectiveKind(llvm::StringRef Str);\n";
+ OS << "\n";
+ OS << "llvm::StringRef get" << DirLang.getName()
+ << "DirectiveName(Directive D);\n";
+ OS << "\n";
+ OS << "Clause get" << DirLang.getName()
+ << "ClauseKind(llvm::StringRef Str);\n";
+ OS << "\n";
+ OS << "llvm::StringRef get" << DirLang.getName() << "ClauseName(Clause C);\n";
+ OS << "\n";
+ OS << "/// Return true if \\p C is a valid clause for \\p D in version \\p "
+ << "Version.\n";
+ OS << "bool isAllowedClauseForDirective(Directive D, "
+ << "Clause C, unsigned Version);\n";
+ OS << "\n";
+ if (EnumHelperFuncs.length() > 0) {
+ OS << EnumHelperFuncs;
+ OS << "\n";
+ }
+
+ // Closing namespaces
+ for (auto Ns : llvm::reverse(Namespaces))
+ OS << "} // namespace " << Ns << "\n";
+
+ OS << "} // namespace llvm\n";
+
+ OS << "#endif // LLVM_" << DirLang.getName() << "_INC\n";
+}
+
+// Generate function implementation for get<Enum>Name(StringRef Str)
+void GenerateGetName(const std::vector<Record *> &Records, raw_ostream &OS,
+ StringRef Enum, const DirectiveLanguage &DirLang,
+ StringRef Prefix) {
+ OS << "\n";
+ OS << "llvm::StringRef llvm::" << DirLang.getCppNamespace() << "::get"
+ << DirLang.getName() << Enum << "Name(" << Enum << " Kind) {\n";
+ OS << " switch (Kind) {\n";
+ for (const auto &R : Records) {
+ BaseRecord Rec{R};
+ OS << " case " << Prefix << Rec.getFormattedName() << ":\n";
+ OS << " return \"";
+ if (Rec.getAlternativeName().empty())
+ OS << Rec.getName();
+ else
+ OS << Rec.getAlternativeName();
+ OS << "\";\n";
+ }
+ OS << " }\n"; // switch
+ OS << " llvm_unreachable(\"Invalid " << DirLang.getName() << " " << Enum
+ << " kind\");\n";
+ OS << "}\n";
+}
+
+// Generate function implementation for get<Enum>Kind(StringRef Str)
+void GenerateGetKind(const std::vector<Record *> &Records, raw_ostream &OS,
+ StringRef Enum, const DirectiveLanguage &DirLang,
+ StringRef Prefix, bool ImplicitAsUnknown) {
+
+ auto DefaultIt = llvm::find_if(
+ Records, [](Record *R) { return R->getValueAsBit("isDefault") == true; });
+
+ if (DefaultIt == Records.end()) {
+ PrintError("At least one " + Enum + " must be defined as default.");
+ return;
+ }
+
+ BaseRecord DefaultRec{(*DefaultIt)};
+
+ OS << "\n";
+ OS << Enum << " llvm::" << DirLang.getCppNamespace() << "::get"
+ << DirLang.getName() << Enum << "Kind(llvm::StringRef Str) {\n";
+ OS << " return llvm::StringSwitch<" << Enum << ">(Str)\n";
+
+ for (const auto &R : Records) {
+ BaseRecord Rec{R};
+ if (ImplicitAsUnknown && R->getValueAsBit("isImplicit")) {
+ OS << " .Case(\"" << Rec.getName() << "\"," << Prefix
+ << DefaultRec.getFormattedName() << ")\n";
+ } else {
+ OS << " .Case(\"" << Rec.getName() << "\"," << Prefix
+ << Rec.getFormattedName() << ")\n";
+ }
+ }
+ OS << " .Default(" << Prefix << DefaultRec.getFormattedName() << ");\n";
+ OS << "}\n";
+}
+
+// Generate function implementation for get<ClauseVal>Kind(StringRef Str)
+void GenerateGetKindClauseVal(const DirectiveLanguage &DirLang,
+ raw_ostream &OS) {
+ for (const auto &R : DirLang.getClauses()) {
+ Clause C{R};
+ const auto &ClauseVals = C.getClauseVals();
+ if (ClauseVals.size() <= 0)
+ continue;
+
+ auto DefaultIt = llvm::find_if(ClauseVals, [](Record *CV) {
+ return CV->getValueAsBit("isDefault") == true;
+ });
+
+ if (DefaultIt == ClauseVals.end()) {
+ PrintError("At least one val in Clause " + C.getFormattedName() +
+ " must be defined as default.");
+ return;
+ }
+ const auto DefaultName = (*DefaultIt)->getName();
+
+ const auto &EnumName = C.getEnumName();
+ if (EnumName.size() == 0) {
+ PrintError("enumClauseValue field not set in Clause" +
+ C.getFormattedName() + ".");
+ return;
+ }
+
+ OS << "\n";
+ OS << EnumName << " llvm::" << DirLang.getCppNamespace() << "::get"
+ << EnumName << "(llvm::StringRef Str) {\n";
+ OS << " return llvm::StringSwitch<" << EnumName << ">(Str)\n";
+ for (const auto &CV : ClauseVals) {
+ ClauseVal CVal{CV};
+ OS << " .Case(\"" << CVal.getFormattedName() << "\"," << CV->getName()
+ << ")\n";
+ }
+ OS << " .Default(" << DefaultName << ");\n";
+ OS << "}\n";
+
+ OS << "\n";
+ OS << "llvm::StringRef llvm::" << DirLang.getCppNamespace() << "::get"
+ << DirLang.getName() << EnumName
+ << "Name(llvm::" << DirLang.getCppNamespace() << "::" << EnumName
+ << " x) {\n";
+ OS << " switch (x) {\n";
+ for (const auto &CV : ClauseVals) {
+ ClauseVal CVal{CV};
+ OS << " case " << CV->getName() << ":\n";
+ OS << " return \"" << CVal.getFormattedName() << "\";\n";
+ }
+ OS << " }\n"; // switch
+ OS << " llvm_unreachable(\"Invalid " << DirLang.getName() << " "
+ << EnumName << " kind\");\n";
+ OS << "}\n";
+ }
+}
+
+void GenerateCaseForVersionedClauses(const std::vector<Record *> &Clauses,
+ raw_ostream &OS, StringRef DirectiveName,
+ const DirectiveLanguage &DirLang,
+ llvm::StringSet<> &Cases) {
+ for (const auto &C : Clauses) {
+ VersionedClause VerClause{C};
+
+ const auto ClauseFormattedName = VerClause.getClause().getFormattedName();
+
+ if (Cases.insert(ClauseFormattedName).second) {
+ OS << " case " << DirLang.getClausePrefix() << ClauseFormattedName
+ << ":\n";
+ OS << " return " << VerClause.getMinVersion()
+ << " <= Version && " << VerClause.getMaxVersion() << " >= Version;\n";
+ }
+ }
+}
+
+// Generate the isAllowedClauseForDirective function implementation.
+void GenerateIsAllowedClause(const DirectiveLanguage &DirLang,
+ raw_ostream &OS) {
+ OS << "\n";
+ OS << "bool llvm::" << DirLang.getCppNamespace()
+ << "::isAllowedClauseForDirective("
+ << "Directive D, Clause C, unsigned Version) {\n";
+ OS << " assert(unsigned(D) <= llvm::" << DirLang.getCppNamespace()
+ << "::Directive_enumSize);\n";
+ OS << " assert(unsigned(C) <= llvm::" << DirLang.getCppNamespace()
+ << "::Clause_enumSize);\n";
+
+ OS << " switch (D) {\n";
+
+ for (const auto &D : DirLang.getDirectives()) {
+ Directive Dir{D};
+
+ OS << " case " << DirLang.getDirectivePrefix() << Dir.getFormattedName()
+ << ":\n";
+ if (Dir.getAllowedClauses().size() == 0 &&
+ Dir.getAllowedOnceClauses().size() == 0 &&
+ Dir.getAllowedExclusiveClauses().size() == 0 &&
+ Dir.getRequiredClauses().size() == 0) {
+ OS << " return false;\n";
+ } else {
+ OS << " switch (C) {\n";
+
+ llvm::StringSet<> Cases;
+
+ GenerateCaseForVersionedClauses(Dir.getAllowedClauses(), OS,
+ Dir.getName(), DirLang, Cases);
+
+ GenerateCaseForVersionedClauses(Dir.getAllowedOnceClauses(), OS,
+ Dir.getName(), DirLang, Cases);
+
+ GenerateCaseForVersionedClauses(Dir.getAllowedExclusiveClauses(), OS,
+ Dir.getName(), DirLang, Cases);
+
+ GenerateCaseForVersionedClauses(Dir.getRequiredClauses(), OS,
+ Dir.getName(), DirLang, Cases);
+
+ OS << " default:\n";
+ OS << " return false;\n";
+ OS << " }\n"; // End of clauses switch
+ }
+ OS << " break;\n";
+ }
+
+ OS << " }\n"; // End of directives switch
+ OS << " llvm_unreachable(\"Invalid " << DirLang.getName()
+ << " Directive kind\");\n";
+ OS << "}\n"; // End of function isAllowedClauseForDirective
+}
+
+// Generate a simple enum set with the give clauses.
+void GenerateClauseSet(const std::vector<Record *> &Clauses, raw_ostream &OS,
+ StringRef ClauseSetPrefix, Directive &Dir,
+ const DirectiveLanguage &DirLang) {
+
+ OS << "\n";
+ OS << " static " << DirLang.getClauseEnumSetClass() << " " << ClauseSetPrefix
+ << DirLang.getDirectivePrefix() << Dir.getFormattedName() << " {\n";
+
+ for (const auto &C : Clauses) {
+ VersionedClause VerClause{C};
+ OS << " llvm::" << DirLang.getCppNamespace()
+ << "::Clause::" << DirLang.getClausePrefix()
+ << VerClause.getClause().getFormattedName() << ",\n";
+ }
+ OS << " };\n";
+}
+
+// Generate an enum set for the 4 kinds of clauses linked to a directive.
+void GenerateDirectiveClauseSets(const DirectiveLanguage &DirLang,
+ raw_ostream &OS) {
+
+ IfDefScope Scope("GEN_FLANG_DIRECTIVE_CLAUSE_SETS", OS);
+
+ OS << "\n";
+ OS << "namespace llvm {\n";
+
+ // Open namespaces defined in the directive language.
+ llvm::SmallVector<StringRef, 2> Namespaces;
+ llvm::SplitString(DirLang.getCppNamespace(), Namespaces, "::");
+ for (auto Ns : Namespaces)
+ OS << "namespace " << Ns << " {\n";
+
+ for (const auto &D : DirLang.getDirectives()) {
+ Directive Dir{D};
+
+ OS << "\n";
+ OS << " // Sets for " << Dir.getName() << "\n";
+
+ GenerateClauseSet(Dir.getAllowedClauses(), OS, "allowedClauses_", Dir,
+ DirLang);
+ GenerateClauseSet(Dir.getAllowedOnceClauses(), OS, "allowedOnceClauses_",
+ Dir, DirLang);
+ GenerateClauseSet(Dir.getAllowedExclusiveClauses(), OS,
+ "allowedExclusiveClauses_", Dir, DirLang);
+ GenerateClauseSet(Dir.getRequiredClauses(), OS, "requiredClauses_", Dir,
+ DirLang);
+ }
+
+ // Closing namespaces
+ for (auto Ns : llvm::reverse(Namespaces))
+ OS << "} // namespace " << Ns << "\n";
+
+ OS << "} // namespace llvm\n";
+}
+
+// Generate a map of directive (key) with DirectiveClauses struct as values.
+// The struct holds the 4 sets of enumeration for the 4 kinds of clauses
+// allowances (allowed, allowed once, allowed exclusive and required).
+void GenerateDirectiveClauseMap(const DirectiveLanguage &DirLang,
+ raw_ostream &OS) {
+
+ IfDefScope Scope("GEN_FLANG_DIRECTIVE_CLAUSE_MAP", OS);
+
+ OS << "\n";
+ OS << "{\n";
+
+ for (const auto &D : DirLang.getDirectives()) {
+ Directive Dir{D};
+ OS << " {llvm::" << DirLang.getCppNamespace()
+ << "::Directive::" << DirLang.getDirectivePrefix()
+ << Dir.getFormattedName() << ",\n";
+ OS << " {\n";
+ OS << " llvm::" << DirLang.getCppNamespace() << "::allowedClauses_"
+ << DirLang.getDirectivePrefix() << Dir.getFormattedName() << ",\n";
+ OS << " llvm::" << DirLang.getCppNamespace() << "::allowedOnceClauses_"
+ << DirLang.getDirectivePrefix() << Dir.getFormattedName() << ",\n";
+ OS << " llvm::" << DirLang.getCppNamespace()
+ << "::allowedExclusiveClauses_" << DirLang.getDirectivePrefix()
+ << Dir.getFormattedName() << ",\n";
+ OS << " llvm::" << DirLang.getCppNamespace() << "::requiredClauses_"
+ << DirLang.getDirectivePrefix() << Dir.getFormattedName() << ",\n";
+ OS << " }\n";
+ OS << " },\n";
+ }
+
+ OS << "}\n";
+}
+
+// Generate classes entry for Flang clauses in the Flang parse-tree
+// If the clause as a non-generic class, no entry is generated.
+// If the clause does not hold a value, an EMPTY_CLASS is used.
+// If the clause class is generic then a WRAPPER_CLASS is used. When the value
+// is optional, the value class is wrapped into a std::optional.
+void GenerateFlangClauseParserClass(const DirectiveLanguage &DirLang,
+ raw_ostream &OS) {
+
+ IfDefScope Scope("GEN_FLANG_CLAUSE_PARSER_CLASSES", OS);
+
+ OS << "\n";
+
+ for (const auto &C : DirLang.getClauses()) {
+ Clause Clause{C};
+ if (!Clause.getFlangClass().empty()) {
+ OS << "WRAPPER_CLASS(" << Clause.getFormattedParserClassName() << ", ";
+ if (Clause.isValueOptional() && Clause.isValueList()) {
+ OS << "std::optional<std::list<" << Clause.getFlangClass() << ">>";
+ } else if (Clause.isValueOptional()) {
+ OS << "std::optional<" << Clause.getFlangClass() << ">";
+ } else if (Clause.isValueList()) {
+ OS << "std::list<" << Clause.getFlangClass() << ">";
+ } else {
+ OS << Clause.getFlangClass();
+ }
+ } else {
+ OS << "EMPTY_CLASS(" << Clause.getFormattedParserClassName();
+ }
+ OS << ");\n";
+ }
+}
+
+// Generate a list of the different clause classes for Flang.
+void GenerateFlangClauseParserClassList(const DirectiveLanguage &DirLang,
+ raw_ostream &OS) {
+
+ IfDefScope Scope("GEN_FLANG_CLAUSE_PARSER_CLASSES_LIST", OS);
+
+ OS << "\n";
+ llvm::interleaveComma(DirLang.getClauses(), OS, [&](Record *C) {
+ Clause Clause{C};
+ OS << Clause.getFormattedParserClassName() << "\n";
+ });
+}
+
+// Generate dump node list for the clauses holding a generic class name.
+void GenerateFlangClauseDump(const DirectiveLanguage &DirLang,
+ raw_ostream &OS) {
+
+ IfDefScope Scope("GEN_FLANG_DUMP_PARSE_TREE_CLAUSES", OS);
+
+ OS << "\n";
+ for (const auto &C : DirLang.getClauses()) {
+ Clause Clause{C};
+ OS << "NODE(" << DirLang.getFlangClauseBaseClass() << ", "
+ << Clause.getFormattedParserClassName() << ")\n";
+ }
+}
+
+// Generate Unparse functions for clauses classes in the Flang parse-tree
+// If the clause is a non-generic class, no entry is generated.
+void GenerateFlangClauseUnparse(const DirectiveLanguage &DirLang,
+ raw_ostream &OS) {
+
+ IfDefScope Scope("GEN_FLANG_CLAUSE_UNPARSE", OS);
+
+ OS << "\n";
+
+ for (const auto &C : DirLang.getClauses()) {
+ Clause Clause{C};
+ if (!Clause.getFlangClass().empty()) {
+ if (Clause.isValueOptional() && Clause.getDefaultValue().empty()) {
+ OS << "void Unparse(const " << DirLang.getFlangClauseBaseClass()
+ << "::" << Clause.getFormattedParserClassName() << " &x) {\n";
+ OS << " Word(\"" << Clause.getName().upper() << "\");\n";
+
+ OS << " Walk(\"(\", x.v, \")\");\n";
+ OS << "}\n";
+ } else if (Clause.isValueOptional()) {
+ OS << "void Unparse(const " << DirLang.getFlangClauseBaseClass()
+ << "::" << Clause.getFormattedParserClassName() << " &x) {\n";
+ OS << " Word(\"" << Clause.getName().upper() << "\");\n";
+ OS << " Put(\"(\");\n";
+ OS << " if (x.v.has_value())\n";
+ if (Clause.isValueList())
+ OS << " Walk(x.v, \",\");\n";
+ else
+ OS << " Walk(x.v);\n";
+ OS << " else\n";
+ OS << " Put(\"" << Clause.getDefaultValue() << "\");\n";
+ OS << " Put(\")\");\n";
+ OS << "}\n";
+ } else {
+ OS << "void Unparse(const " << DirLang.getFlangClauseBaseClass()
+ << "::" << Clause.getFormattedParserClassName() << " &x) {\n";
+ OS << " Word(\"" << Clause.getName().upper() << "\");\n";
+ OS << " Put(\"(\");\n";
+ if (Clause.isValueList())
+ OS << " Walk(x.v, \",\");\n";
+ else
+ OS << " Walk(x.v);\n";
+ OS << " Put(\")\");\n";
+ OS << "}\n";
+ }
+ } else {
+ OS << "void Before(const " << DirLang.getFlangClauseBaseClass()
+ << "::" << Clause.getFormattedParserClassName() << " &) { Word(\""
+ << Clause.getName().upper() << "\"); }\n";
+ }
+ }
+}
+
+// Generate check in the Enter functions for clauses classes.
+void GenerateFlangClauseCheckPrototypes(const DirectiveLanguage &DirLang,
+ raw_ostream &OS) {
+
+ IfDefScope Scope("GEN_FLANG_CLAUSE_CHECK_ENTER", OS);
+
+ OS << "\n";
+ for (const auto &C : DirLang.getClauses()) {
+ Clause Clause{C};
+ OS << "void Enter(const parser::" << DirLang.getFlangClauseBaseClass()
+ << "::" << Clause.getFormattedParserClassName() << " &);\n";
+ }
+}
+
+// Generate the mapping for clauses between the parser class and the
+// corresponding clause Kind
+void GenerateFlangClauseParserKindMap(const DirectiveLanguage &DirLang,
+ raw_ostream &OS) {
+
+ IfDefScope Scope("GEN_FLANG_CLAUSE_PARSER_KIND_MAP", OS);
+
+ OS << "\n";
+ for (const auto &C : DirLang.getClauses()) {
+ Clause Clause{C};
+ OS << "if constexpr (std::is_same_v<A, parser::"
+ << DirLang.getFlangClauseBaseClass()
+ << "::" << Clause.getFormattedParserClassName();
+ OS << ">)\n";
+ OS << " return llvm::" << DirLang.getCppNamespace()
+ << "::Clause::" << DirLang.getClausePrefix() << Clause.getFormattedName()
+ << ";\n";
+ }
+
+ OS << "llvm_unreachable(\"Invalid " << DirLang.getName()
+ << " Parser clause\");\n";
+}
+
+bool compareClauseName(Record *R1, Record *R2) {
+ Clause C1{R1};
+ Clause C2{R2};
+ return (C1.getName() > C2.getName());
+}
+
+// Generate the parser for the clauses.
+void GenerateFlangClausesParser(const DirectiveLanguage &DirLang,
+ raw_ostream &OS) {
+ std::vector<Record *> Clauses = DirLang.getClauses();
+ // Sort clauses in reverse alphabetical order so with clauses with same
+ // beginning, the longer option is tried before.
+ llvm::sort(Clauses, compareClauseName);
+ IfDefScope Scope("GEN_FLANG_CLAUSES_PARSER", OS);
+ OS << "\n";
+ unsigned index = 0;
+ unsigned lastClauseIndex = DirLang.getClauses().size() - 1;
+ OS << "TYPE_PARSER(\n";
+ for (const auto &C : Clauses) {
+ Clause Clause{C};
+ if (Clause.getAliases().empty()) {
+ OS << " \"" << Clause.getName() << "\"";
+ } else {
+ OS << " ("
+ << "\"" << Clause.getName() << "\"_tok";
+ for (StringRef alias : Clause.getAliases()) {
+ OS << " || \"" << alias << "\"_tok";
+ }
+ OS << ")";
+ }
+
+ OS << " >> construct<" << DirLang.getFlangClauseBaseClass()
+ << ">(construct<" << DirLang.getFlangClauseBaseClass()
+ << "::" << Clause.getFormattedParserClassName() << ">(";
+ if (Clause.getFlangClass().empty()) {
+ OS << "))";
+ if (index != lastClauseIndex)
+ OS << " ||";
+ OS << "\n";
+ ++index;
+ continue;
+ }
+
+ if (Clause.isValueOptional())
+ OS << "maybe(";
+ OS << "parenthesized(";
+
+ if (!Clause.getPrefix().empty())
+ OS << "\"" << Clause.getPrefix() << ":\" >> ";
+
+ // The common Flang parser are used directly. Their name is identical to
+ // the Flang class with first letter as lowercase. If the Flang class is
+ // not a common class, we assume there is a specific Parser<>{} with the
+ // Flang class name provided.
+ llvm::SmallString<128> Scratch;
+ StringRef Parser =
+ llvm::StringSwitch<StringRef>(Clause.getFlangClass())
+ .Case("Name", "name")
+ .Case("ScalarIntConstantExpr", "scalarIntConstantExpr")
+ .Case("ScalarIntExpr", "scalarIntExpr")
+ .Case("ScalarLogicalExpr", "scalarLogicalExpr")
+ .Default(("Parser<" + Clause.getFlangClass() + ">{}")
+ .toStringRef(Scratch));
+ OS << Parser;
+ if (!Clause.getPrefix().empty() && Clause.isPrefixOptional())
+ OS << " || " << Parser;
+ OS << ")"; // close parenthesized(.
+
+ if (Clause.isValueOptional()) // close maybe(.
+ OS << ")";
+ OS << "))";
+ if (index != lastClauseIndex)
+ OS << " ||";
+ OS << "\n";
+ ++index;
+ }
+ OS << ")\n";
+}
+
+// Generate the implementation section for the enumeration in the directive
+// language
+void EmitDirectivesFlangImpl(const DirectiveLanguage &DirLang,
+ raw_ostream &OS) {
+
+ GenerateDirectiveClauseSets(DirLang, OS);
+
+ GenerateDirectiveClauseMap(DirLang, OS);
+
+ GenerateFlangClauseParserClass(DirLang, OS);
+
+ GenerateFlangClauseParserClassList(DirLang, OS);
+
+ GenerateFlangClauseDump(DirLang, OS);
+
+ GenerateFlangClauseUnparse(DirLang, OS);
+
+ GenerateFlangClauseCheckPrototypes(DirLang, OS);
+
+ GenerateFlangClauseParserKindMap(DirLang, OS);
+
+ GenerateFlangClausesParser(DirLang, OS);
+}
+
+void GenerateClauseClassMacro(const DirectiveLanguage &DirLang,
+ raw_ostream &OS) {
+ // Generate macros style information for legacy code in clang
+ IfDefScope Scope("GEN_CLANG_CLAUSE_CLASS", OS);
+
+ OS << "\n";
+
+ OS << "#ifndef CLAUSE\n";
+ OS << "#define CLAUSE(Enum, Str, Implicit)\n";
+ OS << "#endif\n";
+ OS << "#ifndef CLAUSE_CLASS\n";
+ OS << "#define CLAUSE_CLASS(Enum, Str, Class)\n";
+ OS << "#endif\n";
+ OS << "#ifndef CLAUSE_NO_CLASS\n";
+ OS << "#define CLAUSE_NO_CLASS(Enum, Str)\n";
+ OS << "#endif\n";
+ OS << "\n";
+ OS << "#define __CLAUSE(Name, Class) \\\n";
+ OS << " CLAUSE(" << DirLang.getClausePrefix()
+ << "##Name, #Name, /* Implicit */ false) \\\n";
+ OS << " CLAUSE_CLASS(" << DirLang.getClausePrefix()
+ << "##Name, #Name, Class)\n";
+ OS << "#define __CLAUSE_NO_CLASS(Name) \\\n";
+ OS << " CLAUSE(" << DirLang.getClausePrefix()
+ << "##Name, #Name, /* Implicit */ false) \\\n";
+ OS << " CLAUSE_NO_CLASS(" << DirLang.getClausePrefix() << "##Name, #Name)\n";
+ OS << "#define __IMPLICIT_CLAUSE_CLASS(Name, Str, Class) \\\n";
+ OS << " CLAUSE(" << DirLang.getClausePrefix()
+ << "##Name, Str, /* Implicit */ true) \\\n";
+ OS << " CLAUSE_CLASS(" << DirLang.getClausePrefix()
+ << "##Name, Str, Class)\n";
+ OS << "#define __IMPLICIT_CLAUSE_NO_CLASS(Name, Str) \\\n";
+ OS << " CLAUSE(" << DirLang.getClausePrefix()
+ << "##Name, Str, /* Implicit */ true) \\\n";
+ OS << " CLAUSE_NO_CLASS(" << DirLang.getClausePrefix() << "##Name, Str)\n";
+ OS << "\n";
+
+ for (const auto &R : DirLang.getClauses()) {
+ Clause C{R};
+ if (C.getClangClass().empty()) { // NO_CLASS
+ if (C.isImplicit()) {
+ OS << "__IMPLICIT_CLAUSE_NO_CLASS(" << C.getFormattedName() << ", \""
+ << C.getFormattedName() << "\")\n";
+ } else {
+ OS << "__CLAUSE_NO_CLASS(" << C.getFormattedName() << ")\n";
+ }
+ } else { // CLASS
+ if (C.isImplicit()) {
+ OS << "__IMPLICIT_CLAUSE_CLASS(" << C.getFormattedName() << ", \""
+ << C.getFormattedName() << "\", " << C.getClangClass() << ")\n";
+ } else {
+ OS << "__CLAUSE(" << C.getFormattedName() << ", " << C.getClangClass()
+ << ")\n";
+ }
+ }
+ }
+
+ OS << "\n";
+ OS << "#undef __IMPLICIT_CLAUSE_NO_CLASS\n";
+ OS << "#undef __IMPLICIT_CLAUSE_CLASS\n";
+ OS << "#undef __CLAUSE\n";
+ OS << "#undef CLAUSE_NO_CLASS\n";
+ OS << "#undef CLAUSE_CLASS\n";
+ OS << "#undef CLAUSE\n";
+}
+
+// Generate the implemenation for the enumeration in the directive
+// language. This code can be included in library.
+void EmitDirectivesBasicImpl(const DirectiveLanguage &DirLang,
+ raw_ostream &OS) {
+ IfDefScope Scope("GEN_DIRECTIVES_IMPL", OS);
+
+ // getDirectiveKind(StringRef Str)
+ GenerateGetKind(DirLang.getDirectives(), OS, "Directive", DirLang,
+ DirLang.getDirectivePrefix(), /*ImplicitAsUnknown=*/false);
+
+ // getDirectiveName(Directive Kind)
+ GenerateGetName(DirLang.getDirectives(), OS, "Directive", DirLang,
+ DirLang.getDirectivePrefix());
+
+ // getClauseKind(StringRef Str)
+ GenerateGetKind(DirLang.getClauses(), OS, "Clause", DirLang,
+ DirLang.getClausePrefix(),
+ /*ImplicitAsUnknown=*/true);
+
+ // getClauseName(Clause Kind)
+ GenerateGetName(DirLang.getClauses(), OS, "Clause", DirLang,
+ DirLang.getClausePrefix());
+
+ // get<ClauseVal>Kind(StringRef Str)
+ GenerateGetKindClauseVal(DirLang, OS);
+
+ // isAllowedClauseForDirective(Directive D, Clause C, unsigned Version)
+ GenerateIsAllowedClause(DirLang, OS);
+}
+
+// Generate the implemenation section for the enumeration in the directive
+// language.
+void EmitDirectivesImpl(RecordKeeper &Records, raw_ostream &OS) {
+ const auto DirLang = DirectiveLanguage{Records};
+ if (DirLang.HasValidityErrors())
+ return;
+
+ EmitDirectivesFlangImpl(DirLang, OS);
+
+ GenerateClauseClassMacro(DirLang, OS);
+
+ EmitDirectivesBasicImpl(DirLang, OS);
+}
+
+} // namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/DisassemblerEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/DisassemblerEmitter.cpp
new file mode 100644
index 0000000000..dfa4b30ee5
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/DisassemblerEmitter.cpp
@@ -0,0 +1,138 @@
+//===- DisassemblerEmitter.cpp - Generate a disassembler ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenTarget.h"
+#include "WebAssemblyDisassemblerEmitter.h"
+#include "X86DisassemblerTables.h"
+#include "X86RecognizableInstr.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+
+using namespace llvm;
+using namespace llvm::X86Disassembler;
+
+/// DisassemblerEmitter - Contains disassembler table emitters for various
+/// architectures.
+
+/// X86 Disassembler Emitter
+///
+/// *** IF YOU'RE HERE TO RESOLVE A "Primary decode conflict", LOOK DOWN NEAR
+/// THE END OF THIS COMMENT!
+///
+/// The X86 disassembler emitter is part of the X86 Disassembler, which is
+/// documented in lib/Target/X86/X86Disassembler.h.
+///
+/// The emitter produces the tables that the disassembler uses to translate
+/// instructions. The emitter generates the following tables:
+///
+/// - One table (CONTEXTS_SYM) that contains a mapping of attribute masks to
+/// instruction contexts. Although for each attribute there are cases where
+/// that attribute determines decoding, in the majority of cases decoding is
+/// the same whether or not an attribute is present. For example, a 64-bit
+/// instruction with an OPSIZE prefix and an XS prefix decodes the same way in
+/// all cases as a 64-bit instruction with only OPSIZE set. (The XS prefix
+/// may have effects on its execution, but does not change the instruction
+/// returned.) This allows considerable space savings in other tables.
+/// - Six tables (ONEBYTE_SYM, TWOBYTE_SYM, THREEBYTE38_SYM, THREEBYTE3A_SYM,
+/// THREEBYTEA6_SYM, and THREEBYTEA7_SYM contain the hierarchy that the
+/// decoder traverses while decoding an instruction. At the lowest level of
+/// this hierarchy are instruction UIDs, 16-bit integers that can be used to
+/// uniquely identify the instruction and correspond exactly to its position
+/// in the list of CodeGenInstructions for the target.
+/// - One table (INSTRUCTIONS_SYM) contains information about the operands of
+/// each instruction and how to decode them.
+///
+/// During table generation, there may be conflicts between instructions that
+/// occupy the same space in the decode tables. These conflicts are resolved as
+/// follows in setTableFields() (X86DisassemblerTables.cpp)
+///
+/// - If the current context is the native context for one of the instructions
+/// (that is, the attributes specified for it in the LLVM tables specify
+/// precisely the current context), then it has priority.
+/// - If the current context isn't native for either of the instructions, then
+/// the higher-priority context wins (that is, the one that is more specific).
+/// That hierarchy is determined by outranks() (X86DisassemblerTables.cpp)
+/// - If the current context is native for both instructions, then the table
+/// emitter reports a conflict and dies.
+///
+/// *** RESOLUTION FOR "Primary decode conflict"S
+///
+/// If two instructions collide, typically the solution is (in order of
+/// likelihood):
+///
+/// (1) to filter out one of the instructions by editing filter()
+/// (X86RecognizableInstr.cpp). This is the most common resolution, but
+/// check the Intel manuals first to make sure that (2) and (3) are not the
+/// problem.
+/// (2) to fix the tables (X86.td and its subsidiaries) so the opcodes are
+/// accurate. Sometimes they are not.
+/// (3) to fix the tables to reflect the actual context (for example, required
+/// prefixes), and possibly to add a new context by editing
+/// include/llvm/Support/X86DisassemblerDecoderCommon.h. This is unlikely
+/// to be the cause.
+///
+/// DisassemblerEmitter.cpp contains the implementation for the emitter,
+/// which simply pulls out instructions from the CodeGenTarget and pushes them
+/// into X86DisassemblerTables.
+/// X86DisassemblerTables.h contains the interface for the instruction tables,
+/// which manage and emit the structures discussed above.
+/// X86DisassemblerTables.cpp contains the implementation for the instruction
+/// tables.
+/// X86ModRMFilters.h contains filters that can be used to determine which
+/// ModR/M values are valid for a particular instruction. These are used to
+/// populate ModRMDecisions.
+/// X86RecognizableInstr.h contains the interface for a single instruction,
+/// which knows how to translate itself from a CodeGenInstruction and provide
+/// the information necessary for integration into the tables.
+/// X86RecognizableInstr.cpp contains the implementation for a single
+/// instruction.
+
+namespace llvm {
+
+extern void EmitDecoder(RecordKeeper &RK, raw_ostream &OS,
+ const std::string &PredicateNamespace);
+
+void EmitDisassembler(RecordKeeper &Records, raw_ostream &OS) {
+ CodeGenTarget Target(Records);
+ emitSourceFileHeader(" * " + Target.getName().str() + " Disassembler", OS);
+
+ // X86 uses a custom disassembler.
+ if (Target.getName() == "X86") {
+ DisassemblerTables Tables;
+
+ ArrayRef<const CodeGenInstruction*> numberedInstructions =
+ Target.getInstructionsByEnumValue();
+
+ for (unsigned i = 0, e = numberedInstructions.size(); i != e; ++i)
+ RecognizableInstr::processInstr(Tables, *numberedInstructions[i], i);
+
+ if (Tables.hasConflicts()) {
+ PrintError(Target.getTargetRecord()->getLoc(), "Primary decode conflict");
+ return;
+ }
+
+ Tables.emit(OS);
+ return;
+ }
+
+ // WebAssembly has variable length opcodes, so can't use EmitFixedLenDecoder
+ // below (which depends on a Size table-gen Record), and also uses a custom
+ // disassembler.
+ if (Target.getName() == "WebAssembly") {
+ emitWebAssemblyDisassemblerTables(OS, Target.getInstructionsByEnumValue());
+ return;
+ }
+
+ std::string PredicateNamespace = std::string(Target.getName());
+ if (PredicateNamespace == "Thumb")
+ PredicateNamespace = "ARM";
+ EmitDecoder(Records, OS, PredicateNamespace);
+}
+
+} // end namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/ExegesisEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/ExegesisEmitter.cpp
new file mode 100644
index 0000000000..bc8ccdac55
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/ExegesisEmitter.cpp
@@ -0,0 +1,211 @@
+//===- ExegesisEmitter.cpp - Generate exegesis target data ----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend emits llvm-exegesis information.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include <cassert>
+#include <map>
+#include <string>
+#include <vector>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "exegesis-emitter"
+
+namespace {
+
+class ExegesisEmitter {
+public:
+ ExegesisEmitter(RecordKeeper &RK);
+
+ void run(raw_ostream &OS) const;
+
+private:
+ unsigned getPfmCounterId(llvm::StringRef Name) const {
+ const auto It = PfmCounterNameTable.find(Name);
+ if (It == PfmCounterNameTable.end())
+ PrintFatalError("no pfm counter id for " + Name);
+ return It->second;
+ }
+
+ // Collects all the ProcPfmCounters definitions available in this target.
+ void emitPfmCounters(raw_ostream &OS) const;
+
+ void emitPfmCountersInfo(const Record &Def,
+ unsigned &IssueCountersTableOffset,
+ raw_ostream &OS) const;
+
+ void emitPfmCountersLookupTable(raw_ostream &OS) const;
+
+ RecordKeeper &Records;
+ std::string Target;
+
+ // Table of counter name -> counter index.
+ const std::map<llvm::StringRef, unsigned> PfmCounterNameTable;
+};
+
+static std::map<llvm::StringRef, unsigned>
+collectPfmCounters(const RecordKeeper &Records) {
+ std::map<llvm::StringRef, unsigned> PfmCounterNameTable;
+ const auto AddPfmCounterName = [&PfmCounterNameTable](
+ const Record *PfmCounterDef) {
+ const llvm::StringRef Counter = PfmCounterDef->getValueAsString("Counter");
+ if (!Counter.empty())
+ PfmCounterNameTable.emplace(Counter, 0);
+ };
+ for (Record *Def : Records.getAllDerivedDefinitions("ProcPfmCounters")) {
+ // Check that ResourceNames are unique.
+ llvm::SmallSet<llvm::StringRef, 16> Seen;
+ for (const Record *IssueCounter :
+ Def->getValueAsListOfDefs("IssueCounters")) {
+ const llvm::StringRef ResourceName =
+ IssueCounter->getValueAsString("ResourceName");
+ if (ResourceName.empty())
+ PrintFatalError(IssueCounter->getLoc(), "invalid empty ResourceName");
+ if (!Seen.insert(ResourceName).second)
+ PrintFatalError(IssueCounter->getLoc(),
+ "duplicate ResourceName " + ResourceName);
+ AddPfmCounterName(IssueCounter);
+ }
+ AddPfmCounterName(Def->getValueAsDef("CycleCounter"));
+ AddPfmCounterName(Def->getValueAsDef("UopsCounter"));
+ }
+ unsigned Index = 0;
+ for (auto &NameAndIndex : PfmCounterNameTable)
+ NameAndIndex.second = Index++;
+ return PfmCounterNameTable;
+}
+
+ExegesisEmitter::ExegesisEmitter(RecordKeeper &RK)
+ : Records(RK), PfmCounterNameTable(collectPfmCounters(RK)) {
+ std::vector<Record *> Targets = Records.getAllDerivedDefinitions("Target");
+ if (Targets.size() == 0)
+ PrintFatalError("No 'Target' subclasses defined!");
+ if (Targets.size() != 1)
+ PrintFatalError("Multiple subclasses of Target defined!");
+ Target = std::string(Targets[0]->getName());
+}
+
+void ExegesisEmitter::emitPfmCountersInfo(const Record &Def,
+ unsigned &IssueCountersTableOffset,
+ raw_ostream &OS) const {
+ const auto CycleCounter =
+ Def.getValueAsDef("CycleCounter")->getValueAsString("Counter");
+ const auto UopsCounter =
+ Def.getValueAsDef("UopsCounter")->getValueAsString("Counter");
+ const size_t NumIssueCounters =
+ Def.getValueAsListOfDefs("IssueCounters").size();
+
+ OS << "\nstatic const PfmCountersInfo " << Target << Def.getName()
+ << " = {\n";
+
+ // Cycle Counter.
+ if (CycleCounter.empty())
+ OS << " nullptr, // No cycle counter.\n";
+ else
+ OS << " " << Target << "PfmCounterNames[" << getPfmCounterId(CycleCounter)
+ << "], // Cycle counter\n";
+
+ // Uops Counter.
+ if (UopsCounter.empty())
+ OS << " nullptr, // No uops counter.\n";
+ else
+ OS << " " << Target << "PfmCounterNames[" << getPfmCounterId(UopsCounter)
+ << "], // Uops counter\n";
+
+ // Issue Counters
+ if (NumIssueCounters == 0)
+ OS << " nullptr, // No issue counters.\n 0\n";
+ else
+ OS << " " << Target << "PfmIssueCounters + " << IssueCountersTableOffset
+ << ", " << NumIssueCounters << " // Issue counters.\n";
+
+ OS << "};\n";
+ IssueCountersTableOffset += NumIssueCounters;
+}
+
+void ExegesisEmitter::emitPfmCounters(raw_ostream &OS) const {
+ // Emit the counter name table.
+ OS << "\nstatic const char *" << Target << "PfmCounterNames[] = {\n";
+ for (const auto &NameAndIndex : PfmCounterNameTable)
+ OS << " \"" << NameAndIndex.first << "\", // " << NameAndIndex.second
+ << "\n";
+ OS << "};\n\n";
+
+ // Emit the IssueCounters table.
+ const auto PfmCounterDefs =
+ Records.getAllDerivedDefinitions("ProcPfmCounters");
+ // Only emit if non-empty.
+ const bool HasAtLeastOnePfmIssueCounter =
+ llvm::any_of(PfmCounterDefs, [](const Record *Def) {
+ return !Def->getValueAsListOfDefs("IssueCounters").empty();
+ });
+ if (HasAtLeastOnePfmIssueCounter) {
+ OS << "static const PfmCountersInfo::IssueCounter " << Target
+ << "PfmIssueCounters[] = {\n";
+ for (const Record *Def : PfmCounterDefs) {
+ for (const Record *ICDef : Def->getValueAsListOfDefs("IssueCounters"))
+ OS << " { " << Target << "PfmCounterNames["
+ << getPfmCounterId(ICDef->getValueAsString("Counter")) << "], \""
+ << ICDef->getValueAsString("ResourceName") << "\"},\n";
+ }
+ OS << "};\n";
+ }
+
+ // Now generate the PfmCountersInfo.
+ unsigned IssueCountersTableOffset = 0;
+ for (const Record *Def : PfmCounterDefs)
+ emitPfmCountersInfo(*Def, IssueCountersTableOffset, OS);
+
+ OS << "\n";
+} // namespace
+
+void ExegesisEmitter::emitPfmCountersLookupTable(raw_ostream &OS) const {
+ std::vector<Record *> Bindings =
+ Records.getAllDerivedDefinitions("PfmCountersBinding");
+ assert(!Bindings.empty() && "there must be at least one binding");
+ llvm::sort(Bindings, [](const Record *L, const Record *R) {
+ return L->getValueAsString("CpuName") < R->getValueAsString("CpuName");
+ });
+
+ OS << "// Sorted (by CpuName) array of pfm counters.\n"
+ << "static const CpuAndPfmCounters " << Target << "CpuPfmCounters[] = {\n";
+ for (Record *Binding : Bindings) {
+ // Emit as { "cpu", procinit },
+ OS << " { \"" //
+ << Binding->getValueAsString("CpuName") << "\"," //
+ << " &" << Target << Binding->getValueAsDef("Counters")->getName() //
+ << " },\n";
+ }
+ OS << "};\n\n";
+}
+
+void ExegesisEmitter::run(raw_ostream &OS) const {
+ emitSourceFileHeader("Exegesis Tables", OS);
+ emitPfmCounters(OS);
+ emitPfmCountersLookupTable(OS);
+}
+
+} // end anonymous namespace
+
+namespace llvm {
+
+void EmitExegesis(RecordKeeper &RK, raw_ostream &OS) {
+ ExegesisEmitter(RK).run(OS);
+}
+
+} // end namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/FastISelEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/FastISelEmitter.cpp
new file mode 100644
index 0000000000..0a88f67be1
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/FastISelEmitter.cpp
@@ -0,0 +1,875 @@
+///===- FastISelEmitter.cpp - Generate an instruction selector -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend emits code for use by the "fast" instruction
+// selection algorithm. See the comments at the top of
+// lib/CodeGen/SelectionDAG/FastISel.cpp for background.
+//
+// This file scans through the target's tablegen instruction-info files
+// and extracts instructions with obvious-looking patterns, and it emits
+// code to look up these instructions by type and operator.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenDAGPatterns.h"
+#include "CodeGenInstruction.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include <utility>
+using namespace llvm;
+
+
+/// InstructionMemo - This class holds additional information about an
+/// instruction needed to emit code for it.
+///
+namespace {
+struct InstructionMemo {
+ std::string Name;
+ const CodeGenRegisterClass *RC;
+ std::string SubRegNo;
+ std::vector<std::string> PhysRegs;
+ std::string PredicateCheck;
+
+ InstructionMemo(StringRef Name, const CodeGenRegisterClass *RC,
+ std::string SubRegNo, std::vector<std::string> PhysRegs,
+ std::string PredicateCheck)
+ : Name(Name), RC(RC), SubRegNo(std::move(SubRegNo)),
+ PhysRegs(std::move(PhysRegs)),
+ PredicateCheck(std::move(PredicateCheck)) {}
+
+ // Make sure we do not copy InstructionMemo.
+ InstructionMemo(const InstructionMemo &Other) = delete;
+ InstructionMemo(InstructionMemo &&Other) = default;
+};
+} // End anonymous namespace
+
+/// ImmPredicateSet - This uniques predicates (represented as a string) and
+/// gives them unique (small) integer ID's that start at 0.
+namespace {
+class ImmPredicateSet {
+ DenseMap<TreePattern *, unsigned> ImmIDs;
+ std::vector<TreePredicateFn> PredsByName;
+public:
+
+ unsigned getIDFor(TreePredicateFn Pred) {
+ unsigned &Entry = ImmIDs[Pred.getOrigPatFragRecord()];
+ if (Entry == 0) {
+ PredsByName.push_back(Pred);
+ Entry = PredsByName.size();
+ }
+ return Entry-1;
+ }
+
+ const TreePredicateFn &getPredicate(unsigned i) {
+ assert(i < PredsByName.size());
+ return PredsByName[i];
+ }
+
+ typedef std::vector<TreePredicateFn>::const_iterator iterator;
+ iterator begin() const { return PredsByName.begin(); }
+ iterator end() const { return PredsByName.end(); }
+
+};
+} // End anonymous namespace
+
+/// OperandsSignature - This class holds a description of a list of operand
+/// types. It has utility methods for emitting text based on the operands.
+///
+namespace {
+struct OperandsSignature {
+ class OpKind {
+ enum { OK_Reg, OK_FP, OK_Imm, OK_Invalid = -1 };
+ char Repr;
+ public:
+
+ OpKind() : Repr(OK_Invalid) {}
+
+ bool operator<(OpKind RHS) const { return Repr < RHS.Repr; }
+ bool operator==(OpKind RHS) const { return Repr == RHS.Repr; }
+
+ static OpKind getReg() { OpKind K; K.Repr = OK_Reg; return K; }
+ static OpKind getFP() { OpKind K; K.Repr = OK_FP; return K; }
+ static OpKind getImm(unsigned V) {
+ assert((unsigned)OK_Imm+V < 128 &&
+ "Too many integer predicates for the 'Repr' char");
+ OpKind K; K.Repr = OK_Imm+V; return K;
+ }
+
+ bool isReg() const { return Repr == OK_Reg; }
+ bool isFP() const { return Repr == OK_FP; }
+ bool isImm() const { return Repr >= OK_Imm; }
+
+ unsigned getImmCode() const { assert(isImm()); return Repr-OK_Imm; }
+
+ void printManglingSuffix(raw_ostream &OS, ImmPredicateSet &ImmPredicates,
+ bool StripImmCodes) const {
+ if (isReg())
+ OS << 'r';
+ else if (isFP())
+ OS << 'f';
+ else {
+ OS << 'i';
+ if (!StripImmCodes)
+ if (unsigned Code = getImmCode())
+ OS << "_" << ImmPredicates.getPredicate(Code-1).getFnName();
+ }
+ }
+ };
+
+
+ SmallVector<OpKind, 3> Operands;
+
+ bool operator<(const OperandsSignature &O) const {
+ return Operands < O.Operands;
+ }
+ bool operator==(const OperandsSignature &O) const {
+ return Operands == O.Operands;
+ }
+
+ bool empty() const { return Operands.empty(); }
+
+ bool hasAnyImmediateCodes() const {
+ for (unsigned i = 0, e = Operands.size(); i != e; ++i)
+ if (Operands[i].isImm() && Operands[i].getImmCode() != 0)
+ return true;
+ return false;
+ }
+
+ /// getWithoutImmCodes - Return a copy of this with any immediate codes forced
+ /// to zero.
+ OperandsSignature getWithoutImmCodes() const {
+ OperandsSignature Result;
+ for (unsigned i = 0, e = Operands.size(); i != e; ++i)
+ if (!Operands[i].isImm())
+ Result.Operands.push_back(Operands[i]);
+ else
+ Result.Operands.push_back(OpKind::getImm(0));
+ return Result;
+ }
+
+ void emitImmediatePredicate(raw_ostream &OS, ImmPredicateSet &ImmPredicates) {
+ bool EmittedAnything = false;
+ for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
+ if (!Operands[i].isImm()) continue;
+
+ unsigned Code = Operands[i].getImmCode();
+ if (Code == 0) continue;
+
+ if (EmittedAnything)
+ OS << " &&\n ";
+
+ TreePredicateFn PredFn = ImmPredicates.getPredicate(Code-1);
+
+ // Emit the type check.
+ TreePattern *TP = PredFn.getOrigPatFragRecord();
+ ValueTypeByHwMode VVT = TP->getTree(0)->getType(0);
+ assert(VVT.isSimple() &&
+ "Cannot use variable value types with fast isel");
+ OS << "VT == " << getEnumName(VVT.getSimple().SimpleTy) << " && ";
+
+ OS << PredFn.getFnName() << "(imm" << i <<')';
+ EmittedAnything = true;
+ }
+ }
+
+ /// initialize - Examine the given pattern and initialize the contents
+ /// of the Operands array accordingly. Return true if all the operands
+ /// are supported, false otherwise.
+ ///
+ bool initialize(TreePatternNode *InstPatNode, const CodeGenTarget &Target,
+ MVT::SimpleValueType VT,
+ ImmPredicateSet &ImmediatePredicates,
+ const CodeGenRegisterClass *OrigDstRC) {
+ if (InstPatNode->isLeaf())
+ return false;
+
+ if (InstPatNode->getOperator()->getName() == "imm") {
+ Operands.push_back(OpKind::getImm(0));
+ return true;
+ }
+
+ if (InstPatNode->getOperator()->getName() == "fpimm") {
+ Operands.push_back(OpKind::getFP());
+ return true;
+ }
+
+ const CodeGenRegisterClass *DstRC = nullptr;
+
+ for (unsigned i = 0, e = InstPatNode->getNumChildren(); i != e; ++i) {
+ TreePatternNode *Op = InstPatNode->getChild(i);
+
+ // Handle imm operands specially.
+ if (!Op->isLeaf() && Op->getOperator()->getName() == "imm") {
+ unsigned PredNo = 0;
+ if (!Op->getPredicateCalls().empty()) {
+ TreePredicateFn PredFn = Op->getPredicateCalls()[0].Fn;
+ // If there is more than one predicate weighing in on this operand
+ // then we don't handle it. This doesn't typically happen for
+ // immediates anyway.
+ if (Op->getPredicateCalls().size() > 1 ||
+ !PredFn.isImmediatePattern() || PredFn.usesOperands())
+ return false;
+ // Ignore any instruction with 'FastIselShouldIgnore', these are
+ // not needed and just bloat the fast instruction selector. For
+ // example, X86 doesn't need to generate code to match ADD16ri8 since
+ // ADD16ri will do just fine.
+ Record *Rec = PredFn.getOrigPatFragRecord()->getRecord();
+ if (Rec->getValueAsBit("FastIselShouldIgnore"))
+ return false;
+
+ PredNo = ImmediatePredicates.getIDFor(PredFn)+1;
+ }
+
+ Operands.push_back(OpKind::getImm(PredNo));
+ continue;
+ }
+
+
+ // For now, filter out any operand with a predicate.
+ // For now, filter out any operand with multiple values.
+ if (!Op->getPredicateCalls().empty() || Op->getNumTypes() != 1)
+ return false;
+
+ if (!Op->isLeaf()) {
+ if (Op->getOperator()->getName() == "fpimm") {
+ Operands.push_back(OpKind::getFP());
+ continue;
+ }
+ // For now, ignore other non-leaf nodes.
+ return false;
+ }
+
+ assert(Op->hasConcreteType(0) && "Type infererence not done?");
+
+ // For now, all the operands must have the same type (if they aren't
+ // immediates). Note that this causes us to reject variable sized shifts
+ // on X86.
+ if (Op->getSimpleType(0) != VT)
+ return false;
+
+ DefInit *OpDI = dyn_cast<DefInit>(Op->getLeafValue());
+ if (!OpDI)
+ return false;
+ Record *OpLeafRec = OpDI->getDef();
+
+ // For now, the only other thing we accept is register operands.
+ const CodeGenRegisterClass *RC = nullptr;
+ if (OpLeafRec->isSubClassOf("RegisterOperand"))
+ OpLeafRec = OpLeafRec->getValueAsDef("RegClass");
+ if (OpLeafRec->isSubClassOf("RegisterClass"))
+ RC = &Target.getRegisterClass(OpLeafRec);
+ else if (OpLeafRec->isSubClassOf("Register"))
+ RC = Target.getRegBank().getRegClassForRegister(OpLeafRec);
+ else if (OpLeafRec->isSubClassOf("ValueType")) {
+ RC = OrigDstRC;
+ } else
+ return false;
+
+ // For now, this needs to be a register class of some sort.
+ if (!RC)
+ return false;
+
+ // For now, all the operands must have the same register class or be
+ // a strict subclass of the destination.
+ if (DstRC) {
+ if (DstRC != RC && !DstRC->hasSubClass(RC))
+ return false;
+ } else
+ DstRC = RC;
+ Operands.push_back(OpKind::getReg());
+ }
+ return true;
+ }
+
+ void PrintParameters(raw_ostream &OS) const {
+ ListSeparator LS;
+ for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
+ OS << LS;
+ if (Operands[i].isReg()) {
+ OS << "unsigned Op" << i;
+ } else if (Operands[i].isImm()) {
+ OS << "uint64_t imm" << i;
+ } else if (Operands[i].isFP()) {
+ OS << "const ConstantFP *f" << i;
+ } else {
+ llvm_unreachable("Unknown operand kind!");
+ }
+ }
+ }
+
+ void PrintArguments(raw_ostream &OS,
+ const std::vector<std::string> &PR) const {
+ assert(PR.size() == Operands.size());
+ ListSeparator LS;
+ for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
+ if (PR[i] != "")
+ // Implicit physical register operand.
+ continue;
+
+ OS << LS;
+ if (Operands[i].isReg()) {
+ OS << "Op" << i;
+ } else if (Operands[i].isImm()) {
+ OS << "imm" << i;
+ } else if (Operands[i].isFP()) {
+ OS << "f" << i;
+ } else {
+ llvm_unreachable("Unknown operand kind!");
+ }
+ }
+ }
+
+ void PrintArguments(raw_ostream &OS) const {
+ ListSeparator LS;
+ for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
+ OS << LS;
+ if (Operands[i].isReg()) {
+ OS << "Op" << i;
+ } else if (Operands[i].isImm()) {
+ OS << "imm" << i;
+ } else if (Operands[i].isFP()) {
+ OS << "f" << i;
+ } else {
+ llvm_unreachable("Unknown operand kind!");
+ }
+ }
+ }
+
+
+ void PrintManglingSuffix(raw_ostream &OS, const std::vector<std::string> &PR,
+ ImmPredicateSet &ImmPredicates,
+ bool StripImmCodes = false) const {
+ for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
+ if (PR[i] != "")
+ // Implicit physical register operand. e.g. Instruction::Mul expect to
+ // select to a binary op. On x86, mul may take a single operand with
+ // the other operand being implicit. We must emit something that looks
+ // like a binary instruction except for the very inner fastEmitInst_*
+ // call.
+ continue;
+ Operands[i].printManglingSuffix(OS, ImmPredicates, StripImmCodes);
+ }
+ }
+
+ void PrintManglingSuffix(raw_ostream &OS, ImmPredicateSet &ImmPredicates,
+ bool StripImmCodes = false) const {
+ for (unsigned i = 0, e = Operands.size(); i != e; ++i)
+ Operands[i].printManglingSuffix(OS, ImmPredicates, StripImmCodes);
+ }
+};
+} // End anonymous namespace
+
+namespace {
+class FastISelMap {
+ // A multimap is needed instead of a "plain" map because the key is
+ // the instruction's complexity (an int) and they are not unique.
+ typedef std::multimap<int, InstructionMemo> PredMap;
+ typedef std::map<MVT::SimpleValueType, PredMap> RetPredMap;
+ typedef std::map<MVT::SimpleValueType, RetPredMap> TypeRetPredMap;
+ typedef std::map<std::string, TypeRetPredMap> OpcodeTypeRetPredMap;
+ typedef std::map<OperandsSignature, OpcodeTypeRetPredMap>
+ OperandsOpcodeTypeRetPredMap;
+
+ OperandsOpcodeTypeRetPredMap SimplePatterns;
+
+ // This is used to check that there are no duplicate predicates
+ std::set<std::tuple<OperandsSignature, std::string, MVT::SimpleValueType,
+ MVT::SimpleValueType, std::string>>
+ SimplePatternsCheck;
+
+ std::map<OperandsSignature, std::vector<OperandsSignature> >
+ SignaturesWithConstantForms;
+
+ StringRef InstNS;
+ ImmPredicateSet ImmediatePredicates;
+public:
+ explicit FastISelMap(StringRef InstNS);
+
+ void collectPatterns(CodeGenDAGPatterns &CGP);
+ void printImmediatePredicates(raw_ostream &OS);
+ void printFunctionDefinitions(raw_ostream &OS);
+private:
+ void emitInstructionCode(raw_ostream &OS,
+ const OperandsSignature &Operands,
+ const PredMap &PM,
+ const std::string &RetVTName);
+};
+} // End anonymous namespace
+
+static std::string getOpcodeName(Record *Op, CodeGenDAGPatterns &CGP) {
+ return std::string(CGP.getSDNodeInfo(Op).getEnumName());
+}
+
+static std::string getLegalCName(std::string OpName) {
+ std::string::size_type pos = OpName.find("::");
+ if (pos != std::string::npos)
+ OpName.replace(pos, 2, "_");
+ return OpName;
+}
+
+FastISelMap::FastISelMap(StringRef instns) : InstNS(instns) {}
+
+static std::string PhyRegForNode(TreePatternNode *Op,
+ const CodeGenTarget &Target) {
+ std::string PhysReg;
+
+ if (!Op->isLeaf())
+ return PhysReg;
+
+ Record *OpLeafRec = cast<DefInit>(Op->getLeafValue())->getDef();
+ if (!OpLeafRec->isSubClassOf("Register"))
+ return PhysReg;
+
+ PhysReg += cast<StringInit>(OpLeafRec->getValue("Namespace")->getValue())
+ ->getValue();
+ PhysReg += "::";
+ PhysReg += Target.getRegBank().getReg(OpLeafRec)->getName();
+ return PhysReg;
+}
+
+void FastISelMap::collectPatterns(CodeGenDAGPatterns &CGP) {
+ const CodeGenTarget &Target = CGP.getTargetInfo();
+
+ // Scan through all the patterns and record the simple ones.
+ for (CodeGenDAGPatterns::ptm_iterator I = CGP.ptm_begin(),
+ E = CGP.ptm_end(); I != E; ++I) {
+ const PatternToMatch &Pattern = *I;
+
+ // For now, just look at Instructions, so that we don't have to worry
+ // about emitting multiple instructions for a pattern.
+ TreePatternNode *Dst = Pattern.getDstPattern();
+ if (Dst->isLeaf()) continue;
+ Record *Op = Dst->getOperator();
+ if (!Op->isSubClassOf("Instruction"))
+ continue;
+ CodeGenInstruction &II = CGP.getTargetInfo().getInstruction(Op);
+ if (II.Operands.empty())
+ continue;
+
+ // Allow instructions to be marked as unavailable for FastISel for
+ // certain cases, i.e. an ISA has two 'and' instruction which differ
+ // by what registers they can use but are otherwise identical for
+ // codegen purposes.
+ if (II.FastISelShouldIgnore)
+ continue;
+
+ // For now, ignore multi-instruction patterns.
+ bool MultiInsts = false;
+ for (unsigned i = 0, e = Dst->getNumChildren(); i != e; ++i) {
+ TreePatternNode *ChildOp = Dst->getChild(i);
+ if (ChildOp->isLeaf())
+ continue;
+ if (ChildOp->getOperator()->isSubClassOf("Instruction")) {
+ MultiInsts = true;
+ break;
+ }
+ }
+ if (MultiInsts)
+ continue;
+
+ // For now, ignore instructions where the first operand is not an
+ // output register.
+ const CodeGenRegisterClass *DstRC = nullptr;
+ std::string SubRegNo;
+ if (Op->getName() != "EXTRACT_SUBREG") {
+ Record *Op0Rec = II.Operands[0].Rec;
+ if (Op0Rec->isSubClassOf("RegisterOperand"))
+ Op0Rec = Op0Rec->getValueAsDef("RegClass");
+ if (!Op0Rec->isSubClassOf("RegisterClass"))
+ continue;
+ DstRC = &Target.getRegisterClass(Op0Rec);
+ if (!DstRC)
+ continue;
+ } else {
+ // If this isn't a leaf, then continue since the register classes are
+ // a bit too complicated for now.
+ if (!Dst->getChild(1)->isLeaf()) continue;
+
+ DefInit *SR = dyn_cast<DefInit>(Dst->getChild(1)->getLeafValue());
+ if (SR)
+ SubRegNo = getQualifiedName(SR->getDef());
+ else
+ SubRegNo = Dst->getChild(1)->getLeafValue()->getAsString();
+ }
+
+ // Inspect the pattern.
+ TreePatternNode *InstPatNode = Pattern.getSrcPattern();
+ if (!InstPatNode) continue;
+ if (InstPatNode->isLeaf()) continue;
+
+ // Ignore multiple result nodes for now.
+ if (InstPatNode->getNumTypes() > 1) continue;
+
+ Record *InstPatOp = InstPatNode->getOperator();
+ std::string OpcodeName = getOpcodeName(InstPatOp, CGP);
+ MVT::SimpleValueType RetVT = MVT::isVoid;
+ if (InstPatNode->getNumTypes()) RetVT = InstPatNode->getSimpleType(0);
+ MVT::SimpleValueType VT = RetVT;
+ if (InstPatNode->getNumChildren()) {
+ assert(InstPatNode->getChild(0)->getNumTypes() == 1);
+ VT = InstPatNode->getChild(0)->getSimpleType(0);
+ }
+
+ // For now, filter out any instructions with predicates.
+ if (!InstPatNode->getPredicateCalls().empty())
+ continue;
+
+ // Check all the operands.
+ OperandsSignature Operands;
+ if (!Operands.initialize(InstPatNode, Target, VT, ImmediatePredicates,
+ DstRC))
+ continue;
+
+ std::vector<std::string> PhysRegInputs;
+ if (InstPatNode->getOperator()->getName() == "imm" ||
+ InstPatNode->getOperator()->getName() == "fpimm")
+ PhysRegInputs.push_back("");
+ else {
+ // Compute the PhysRegs used by the given pattern, and check that
+ // the mapping from the src to dst patterns is simple.
+ bool FoundNonSimplePattern = false;
+ unsigned DstIndex = 0;
+ for (unsigned i = 0, e = InstPatNode->getNumChildren(); i != e; ++i) {
+ std::string PhysReg = PhyRegForNode(InstPatNode->getChild(i), Target);
+ if (PhysReg.empty()) {
+ if (DstIndex >= Dst->getNumChildren() ||
+ Dst->getChild(DstIndex)->getName() !=
+ InstPatNode->getChild(i)->getName()) {
+ FoundNonSimplePattern = true;
+ break;
+ }
+ ++DstIndex;
+ }
+
+ PhysRegInputs.push_back(PhysReg);
+ }
+
+ if (Op->getName() != "EXTRACT_SUBREG" && DstIndex < Dst->getNumChildren())
+ FoundNonSimplePattern = true;
+
+ if (FoundNonSimplePattern)
+ continue;
+ }
+
+ // Check if the operands match one of the patterns handled by FastISel.
+ std::string ManglingSuffix;
+ raw_string_ostream SuffixOS(ManglingSuffix);
+ Operands.PrintManglingSuffix(SuffixOS, ImmediatePredicates, true);
+ if (!StringSwitch<bool>(ManglingSuffix)
+ .Cases("", "r", "rr", "ri", "i", "f", true)
+ .Default(false))
+ continue;
+
+ // Get the predicate that guards this pattern.
+ std::string PredicateCheck = Pattern.getPredicateCheck();
+
+ // Ok, we found a pattern that we can handle. Remember it.
+ InstructionMemo Memo(
+ Pattern.getDstPattern()->getOperator()->getName(),
+ DstRC,
+ SubRegNo,
+ PhysRegInputs,
+ PredicateCheck
+ );
+
+ int complexity = Pattern.getPatternComplexity(CGP);
+
+ auto inserted_simple_pattern = SimplePatternsCheck.insert(
+ std::make_tuple(Operands, OpcodeName, VT, RetVT, PredicateCheck));
+ if (!inserted_simple_pattern.second) {
+ PrintFatalError(Pattern.getSrcRecord()->getLoc(),
+ "Duplicate predicate in FastISel table!");
+ }
+
+ // Note: Instructions with the same complexity will appear in the order
+ // that they are encountered.
+ SimplePatterns[Operands][OpcodeName][VT][RetVT].emplace(complexity,
+ std::move(Memo));
+
+ // If any of the operands were immediates with predicates on them, strip
+ // them down to a signature that doesn't have predicates so that we can
+ // associate them with the stripped predicate version.
+ if (Operands.hasAnyImmediateCodes()) {
+ SignaturesWithConstantForms[Operands.getWithoutImmCodes()]
+ .push_back(Operands);
+ }
+ }
+}
+
+void FastISelMap::printImmediatePredicates(raw_ostream &OS) {
+ if (ImmediatePredicates.begin() == ImmediatePredicates.end())
+ return;
+
+ OS << "\n// FastEmit Immediate Predicate functions.\n";
+ for (auto ImmediatePredicate : ImmediatePredicates) {
+ OS << "static bool " << ImmediatePredicate.getFnName()
+ << "(int64_t Imm) {\n";
+ OS << ImmediatePredicate.getImmediatePredicateCode() << "\n}\n";
+ }
+
+ OS << "\n\n";
+}
+
+void FastISelMap::emitInstructionCode(raw_ostream &OS,
+ const OperandsSignature &Operands,
+ const PredMap &PM,
+ const std::string &RetVTName) {
+ // Emit code for each possible instruction. There may be
+ // multiple if there are subtarget concerns. A reverse iterator
+ // is used to produce the ones with highest complexity first.
+
+ bool OneHadNoPredicate = false;
+ for (PredMap::const_reverse_iterator PI = PM.rbegin(), PE = PM.rend();
+ PI != PE; ++PI) {
+ const InstructionMemo &Memo = PI->second;
+ std::string PredicateCheck = Memo.PredicateCheck;
+
+ if (PredicateCheck.empty()) {
+ assert(!OneHadNoPredicate &&
+ "Multiple instructions match and more than one had "
+ "no predicate!");
+ OneHadNoPredicate = true;
+ } else {
+ if (OneHadNoPredicate) {
+ PrintFatalError("Multiple instructions match and one with no "
+ "predicate came before one with a predicate! "
+ "name:" + Memo.Name + " predicate: " + PredicateCheck);
+ }
+ OS << " if (" + PredicateCheck + ") {\n";
+ OS << " ";
+ }
+
+ for (unsigned i = 0; i < Memo.PhysRegs.size(); ++i) {
+ if (Memo.PhysRegs[i] != "")
+ OS << " BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, "
+ << "TII.get(TargetOpcode::COPY), " << Memo.PhysRegs[i]
+ << ").addReg(Op" << i << ");\n";
+ }
+
+ OS << " return fastEmitInst_";
+ if (Memo.SubRegNo.empty()) {
+ Operands.PrintManglingSuffix(OS, Memo.PhysRegs, ImmediatePredicates,
+ true);
+ OS << "(" << InstNS << "::" << Memo.Name << ", ";
+ OS << "&" << InstNS << "::" << Memo.RC->getName() << "RegClass";
+ if (!Operands.empty())
+ OS << ", ";
+ Operands.PrintArguments(OS, Memo.PhysRegs);
+ OS << ");\n";
+ } else {
+ OS << "extractsubreg(" << RetVTName
+ << ", Op0, " << Memo.SubRegNo << ");\n";
+ }
+
+ if (!PredicateCheck.empty()) {
+ OS << " }\n";
+ }
+ }
+ // Return 0 if all of the possibilities had predicates but none
+ // were satisfied.
+ if (!OneHadNoPredicate)
+ OS << " return 0;\n";
+ OS << "}\n";
+ OS << "\n";
+}
+
+
+void FastISelMap::printFunctionDefinitions(raw_ostream &OS) {
+ // Now emit code for all the patterns that we collected.
+ for (const auto &SimplePattern : SimplePatterns) {
+ const OperandsSignature &Operands = SimplePattern.first;
+ const OpcodeTypeRetPredMap &OTM = SimplePattern.second;
+
+ for (const auto &I : OTM) {
+ const std::string &Opcode = I.first;
+ const TypeRetPredMap &TM = I.second;
+
+ OS << "// FastEmit functions for " << Opcode << ".\n";
+ OS << "\n";
+
+ // Emit one function for each opcode,type pair.
+ for (const auto &TI : TM) {
+ MVT::SimpleValueType VT = TI.first;
+ const RetPredMap &RM = TI.second;
+ if (RM.size() != 1) {
+ for (const auto &RI : RM) {
+ MVT::SimpleValueType RetVT = RI.first;
+ const PredMap &PM = RI.second;
+
+ OS << "unsigned fastEmit_" << getLegalCName(Opcode) << "_"
+ << getLegalCName(std::string(getName(VT))) << "_"
+ << getLegalCName(std::string(getName(RetVT))) << "_";
+ Operands.PrintManglingSuffix(OS, ImmediatePredicates);
+ OS << "(";
+ Operands.PrintParameters(OS);
+ OS << ") {\n";
+
+ emitInstructionCode(OS, Operands, PM, std::string(getName(RetVT)));
+ }
+
+ // Emit one function for the type that demultiplexes on return type.
+ OS << "unsigned fastEmit_" << getLegalCName(Opcode) << "_"
+ << getLegalCName(std::string(getName(VT))) << "_";
+ Operands.PrintManglingSuffix(OS, ImmediatePredicates);
+ OS << "(MVT RetVT";
+ if (!Operands.empty())
+ OS << ", ";
+ Operands.PrintParameters(OS);
+ OS << ") {\nswitch (RetVT.SimpleTy) {\n";
+ for (const auto &RI : RM) {
+ MVT::SimpleValueType RetVT = RI.first;
+ OS << " case " << getName(RetVT) << ": return fastEmit_"
+ << getLegalCName(Opcode) << "_"
+ << getLegalCName(std::string(getName(VT))) << "_"
+ << getLegalCName(std::string(getName(RetVT))) << "_";
+ Operands.PrintManglingSuffix(OS, ImmediatePredicates);
+ OS << "(";
+ Operands.PrintArguments(OS);
+ OS << ");\n";
+ }
+ OS << " default: return 0;\n}\n}\n\n";
+
+ } else {
+ // Non-variadic return type.
+ OS << "unsigned fastEmit_" << getLegalCName(Opcode) << "_"
+ << getLegalCName(std::string(getName(VT))) << "_";
+ Operands.PrintManglingSuffix(OS, ImmediatePredicates);
+ OS << "(MVT RetVT";
+ if (!Operands.empty())
+ OS << ", ";
+ Operands.PrintParameters(OS);
+ OS << ") {\n";
+
+ OS << " if (RetVT.SimpleTy != " << getName(RM.begin()->first)
+ << ")\n return 0;\n";
+
+ const PredMap &PM = RM.begin()->second;
+
+ emitInstructionCode(OS, Operands, PM, "RetVT");
+ }
+ }
+
+ // Emit one function for the opcode that demultiplexes based on the type.
+ OS << "unsigned fastEmit_"
+ << getLegalCName(Opcode) << "_";
+ Operands.PrintManglingSuffix(OS, ImmediatePredicates);
+ OS << "(MVT VT, MVT RetVT";
+ if (!Operands.empty())
+ OS << ", ";
+ Operands.PrintParameters(OS);
+ OS << ") {\n";
+ OS << " switch (VT.SimpleTy) {\n";
+ for (const auto &TI : TM) {
+ MVT::SimpleValueType VT = TI.first;
+ std::string TypeName = std::string(getName(VT));
+ OS << " case " << TypeName << ": return fastEmit_"
+ << getLegalCName(Opcode) << "_" << getLegalCName(TypeName) << "_";
+ Operands.PrintManglingSuffix(OS, ImmediatePredicates);
+ OS << "(RetVT";
+ if (!Operands.empty())
+ OS << ", ";
+ Operands.PrintArguments(OS);
+ OS << ");\n";
+ }
+ OS << " default: return 0;\n";
+ OS << " }\n";
+ OS << "}\n";
+ OS << "\n";
+ }
+
+ OS << "// Top-level FastEmit function.\n";
+ OS << "\n";
+
+ // Emit one function for the operand signature that demultiplexes based
+ // on opcode and type.
+ OS << "unsigned fastEmit_";
+ Operands.PrintManglingSuffix(OS, ImmediatePredicates);
+ OS << "(MVT VT, MVT RetVT, unsigned Opcode";
+ if (!Operands.empty())
+ OS << ", ";
+ Operands.PrintParameters(OS);
+ OS << ") ";
+ if (!Operands.hasAnyImmediateCodes())
+ OS << "override ";
+ OS << "{\n";
+
+ // If there are any forms of this signature available that operate on
+ // constrained forms of the immediate (e.g., 32-bit sext immediate in a
+ // 64-bit operand), check them first.
+
+ std::map<OperandsSignature, std::vector<OperandsSignature> >::iterator MI
+ = SignaturesWithConstantForms.find(Operands);
+ if (MI != SignaturesWithConstantForms.end()) {
+ // Unique any duplicates out of the list.
+ llvm::sort(MI->second);
+ MI->second.erase(std::unique(MI->second.begin(), MI->second.end()),
+ MI->second.end());
+
+ // Check each in order it was seen. It would be nice to have a good
+ // relative ordering between them, but we're not going for optimality
+ // here.
+ for (unsigned i = 0, e = MI->second.size(); i != e; ++i) {
+ OS << " if (";
+ MI->second[i].emitImmediatePredicate(OS, ImmediatePredicates);
+ OS << ")\n if (unsigned Reg = fastEmit_";
+ MI->second[i].PrintManglingSuffix(OS, ImmediatePredicates);
+ OS << "(VT, RetVT, Opcode";
+ if (!MI->second[i].empty())
+ OS << ", ";
+ MI->second[i].PrintArguments(OS);
+ OS << "))\n return Reg;\n\n";
+ }
+
+ // Done with this, remove it.
+ SignaturesWithConstantForms.erase(MI);
+ }
+
+ OS << " switch (Opcode) {\n";
+ for (const auto &I : OTM) {
+ const std::string &Opcode = I.first;
+
+ OS << " case " << Opcode << ": return fastEmit_"
+ << getLegalCName(Opcode) << "_";
+ Operands.PrintManglingSuffix(OS, ImmediatePredicates);
+ OS << "(VT, RetVT";
+ if (!Operands.empty())
+ OS << ", ";
+ Operands.PrintArguments(OS);
+ OS << ");\n";
+ }
+ OS << " default: return 0;\n";
+ OS << " }\n";
+ OS << "}\n";
+ OS << "\n";
+ }
+
+ // TODO: SignaturesWithConstantForms should be empty here.
+}
+
+namespace llvm {
+
+void EmitFastISel(RecordKeeper &RK, raw_ostream &OS) {
+ CodeGenDAGPatterns CGP(RK);
+ const CodeGenTarget &Target = CGP.getTargetInfo();
+ emitSourceFileHeader("\"Fast\" Instruction Selector for the " +
+ Target.getName().str() + " target", OS);
+
+ // Determine the target's namespace name.
+ StringRef InstNS = Target.getInstNamespace();
+ assert(!InstNS.empty() && "Can't determine target-specific namespace!");
+
+ FastISelMap F(InstNS);
+ F.collectPatterns(CGP);
+ F.printImmediatePredicates(OS);
+ F.printFunctionDefinitions(OS);
+}
+
+} // End llvm namespace
diff --git a/contrib/libs/llvm16/utils/TableGen/GICombinerEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/GICombinerEmitter.cpp
new file mode 100644
index 0000000000..2ae313081a
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/GICombinerEmitter.cpp
@@ -0,0 +1,1081 @@
+//===- GlobalCombinerEmitter.cpp - Generate a combiner --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file Generate a combiner implementation for GlobalISel from a declarative
+/// syntax
+///
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenTarget.h"
+#include "GlobalISel/CodeExpander.h"
+#include "GlobalISel/CodeExpansions.h"
+#include "GlobalISel/GIMatchDag.h"
+#include "GlobalISel/GIMatchDagPredicate.h"
+#include "GlobalISel/GIMatchTree.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ScopedPrinter.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/StringMatcher.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include <cstdint>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "gicombiner-emitter"
+
+// FIXME: Use ALWAYS_ENABLED_STATISTIC once it's available.
+unsigned NumPatternTotal = 0;
+STATISTIC(NumPatternTotalStatistic, "Total number of patterns");
+
+cl::OptionCategory
+ GICombinerEmitterCat("Options for -gen-global-isel-combiner");
+static cl::list<std::string>
+ SelectedCombiners("combiners", cl::desc("Emit the specified combiners"),
+ cl::cat(GICombinerEmitterCat), cl::CommaSeparated);
+static cl::opt<bool> ShowExpansions(
+ "gicombiner-show-expansions",
+ cl::desc("Use C++ comments to indicate occurence of code expansion"),
+ cl::cat(GICombinerEmitterCat));
+static cl::opt<bool> StopAfterParse(
+ "gicombiner-stop-after-parse",
+ cl::desc("Stop processing after parsing rules and dump state"),
+ cl::cat(GICombinerEmitterCat));
+static cl::opt<bool> StopAfterBuild(
+ "gicombiner-stop-after-build",
+ cl::desc("Stop processing after building the match tree"),
+ cl::cat(GICombinerEmitterCat));
+
+namespace {
+typedef uint64_t RuleID;
+
+// We're going to be referencing the same small strings quite a lot for operand
+// names and the like. Make their lifetime management simple with a global
+// string table.
+StringSet<> StrTab;
+
+StringRef insertStrTab(StringRef S) {
+ if (S.empty())
+ return S;
+ return StrTab.insert(S).first->first();
+}
+
+class format_partition_name {
+ const GIMatchTree &Tree;
+ unsigned Idx;
+
+public:
+ format_partition_name(const GIMatchTree &Tree, unsigned Idx)
+ : Tree(Tree), Idx(Idx) {}
+ void print(raw_ostream &OS) const {
+ Tree.getPartitioner()->emitPartitionName(OS, Idx);
+ }
+};
+raw_ostream &operator<<(raw_ostream &OS, const format_partition_name &Fmt) {
+ Fmt.print(OS);
+ return OS;
+}
+
+/// Declares data that is passed from the match stage to the apply stage.
+class MatchDataInfo {
+ /// The symbol used in the tablegen patterns
+ StringRef PatternSymbol;
+ /// The data type for the variable
+ StringRef Type;
+ /// The name of the variable as declared in the generated matcher.
+ std::string VariableName;
+
+public:
+ MatchDataInfo(StringRef PatternSymbol, StringRef Type, StringRef VariableName)
+ : PatternSymbol(PatternSymbol), Type(Type), VariableName(VariableName) {}
+
+ StringRef getPatternSymbol() const { return PatternSymbol; };
+ StringRef getType() const { return Type; };
+ StringRef getVariableName() const { return VariableName; };
+};
+
+class RootInfo {
+ StringRef PatternSymbol;
+
+public:
+ RootInfo(StringRef PatternSymbol) : PatternSymbol(PatternSymbol) {}
+
+ StringRef getPatternSymbol() const { return PatternSymbol; }
+};
+
+class CombineRule {
+public:
+
+ using const_matchdata_iterator = std::vector<MatchDataInfo>::const_iterator;
+
+ struct VarInfo {
+ const GIMatchDagInstr *N;
+ const GIMatchDagOperand *Op;
+ const DagInit *Matcher;
+
+ public:
+ VarInfo(const GIMatchDagInstr *N, const GIMatchDagOperand *Op,
+ const DagInit *Matcher)
+ : N(N), Op(Op), Matcher(Matcher) {}
+ };
+
+protected:
+ /// A unique ID for this rule
+ /// ID's are used for debugging and run-time disabling of rules among other
+ /// things.
+ RuleID ID;
+
+ /// A unique ID that can be used for anonymous objects belonging to this rule.
+ /// Used to create unique names in makeNameForAnon*() without making tests
+ /// overly fragile.
+ unsigned UID = 0;
+
+ /// The record defining this rule.
+ const Record &TheDef;
+
+ /// The roots of a match. These are the leaves of the DAG that are closest to
+ /// the end of the function. I.e. the nodes that are encountered without
+ /// following any edges of the DAG described by the pattern as we work our way
+ /// from the bottom of the function to the top.
+ std::vector<RootInfo> Roots;
+
+ GIMatchDag MatchDag;
+
+ /// A block of arbitrary C++ to finish testing the match.
+ /// FIXME: This is a temporary measure until we have actual pattern matching
+ const StringInit *MatchingFixupCode = nullptr;
+
+ /// The MatchData defined by the match stage and required by the apply stage.
+ /// This allows the plumbing of arbitrary data from C++ predicates between the
+ /// stages.
+ ///
+ /// For example, suppose you have:
+ /// %A = <some-constant-expr>
+ /// %0 = G_ADD %1, %A
+ /// you could define a GIMatchPredicate that walks %A, constant folds as much
+ /// as possible and returns an APInt containing the discovered constant. You
+ /// could then declare:
+ /// def apint : GIDefMatchData<"APInt">;
+ /// add it to the rule with:
+ /// (defs root:$root, apint:$constant)
+ /// evaluate it in the pattern with a C++ function that takes a
+ /// MachineOperand& and an APInt& with:
+ /// (match [{MIR %root = G_ADD %0, %A }],
+ /// (constantfold operand:$A, apint:$constant))
+ /// and finally use it in the apply stage with:
+ /// (apply (create_operand
+ /// [{ MachineOperand::CreateImm(${constant}.getZExtValue());
+ /// ]}, apint:$constant),
+ /// [{MIR %root = FOO %0, %constant }])
+ std::vector<MatchDataInfo> MatchDataDecls;
+
+ void declareMatchData(StringRef PatternSymbol, StringRef Type,
+ StringRef VarName);
+
+ bool parseInstructionMatcher(const CodeGenTarget &Target, StringInit *ArgName,
+ const Init &Arg,
+ StringMap<std::vector<VarInfo>> &NamedEdgeDefs,
+ StringMap<std::vector<VarInfo>> &NamedEdgeUses);
+ bool parseWipMatchOpcodeMatcher(const CodeGenTarget &Target,
+ StringInit *ArgName, const Init &Arg);
+
+public:
+ CombineRule(const CodeGenTarget &Target, GIMatchDagContext &Ctx, RuleID ID,
+ const Record &R)
+ : ID(ID), TheDef(R), MatchDag(Ctx) {}
+ CombineRule(const CombineRule &) = delete;
+
+ bool parseDefs();
+ bool parseMatcher(const CodeGenTarget &Target);
+
+ RuleID getID() const { return ID; }
+ unsigned allocUID() { return UID++; }
+ StringRef getName() const { return TheDef.getName(); }
+ const Record &getDef() const { return TheDef; }
+ const StringInit *getMatchingFixupCode() const { return MatchingFixupCode; }
+ size_t getNumRoots() const { return Roots.size(); }
+
+ GIMatchDag &getMatchDag() { return MatchDag; }
+ const GIMatchDag &getMatchDag() const { return MatchDag; }
+
+ using const_root_iterator = std::vector<RootInfo>::const_iterator;
+ const_root_iterator roots_begin() const { return Roots.begin(); }
+ const_root_iterator roots_end() const { return Roots.end(); }
+ iterator_range<const_root_iterator> roots() const {
+ return llvm::make_range(Roots.begin(), Roots.end());
+ }
+
+ iterator_range<const_matchdata_iterator> matchdata_decls() const {
+ return make_range(MatchDataDecls.begin(), MatchDataDecls.end());
+ }
+
+ /// Export expansions for this rule
+ void declareExpansions(CodeExpansions &Expansions) const {
+ for (const auto &I : matchdata_decls())
+ Expansions.declare(I.getPatternSymbol(), I.getVariableName());
+ }
+
+ /// The matcher will begin from the roots and will perform the match by
+ /// traversing the edges to cover the whole DAG. This function reverses DAG
+ /// edges such that everything is reachable from a root. This is part of the
+ /// preparation work for flattening the DAG into a tree.
+ void reorientToRoots() {
+ SmallSet<const GIMatchDagInstr *, 5> Roots;
+ SmallSet<const GIMatchDagInstr *, 5> Visited;
+ SmallSet<GIMatchDagEdge *, 20> EdgesRemaining;
+
+ for (auto &I : MatchDag.roots()) {
+ Roots.insert(I);
+ Visited.insert(I);
+ }
+ for (auto &I : MatchDag.edges())
+ EdgesRemaining.insert(I);
+
+ bool Progressed = false;
+ SmallSet<GIMatchDagEdge *, 20> EdgesToRemove;
+ while (!EdgesRemaining.empty()) {
+ for (auto *EI : EdgesRemaining) {
+ if (Visited.count(EI->getFromMI())) {
+ if (Roots.count(EI->getToMI()))
+ PrintError(TheDef.getLoc(), "One or more roots are unnecessary");
+ Visited.insert(EI->getToMI());
+ EdgesToRemove.insert(EI);
+ Progressed = true;
+ }
+ }
+ for (GIMatchDagEdge *ToRemove : EdgesToRemove)
+ EdgesRemaining.erase(ToRemove);
+ EdgesToRemove.clear();
+
+ for (auto EI = EdgesRemaining.begin(), EE = EdgesRemaining.end();
+ EI != EE; ++EI) {
+ if (Visited.count((*EI)->getToMI())) {
+ (*EI)->reverse();
+ Visited.insert((*EI)->getToMI());
+ EdgesToRemove.insert(*EI);
+ Progressed = true;
+ }
+ for (GIMatchDagEdge *ToRemove : EdgesToRemove)
+ EdgesRemaining.erase(ToRemove);
+ EdgesToRemove.clear();
+ }
+
+ if (!Progressed) {
+ LLVM_DEBUG(dbgs() << "No progress\n");
+ return;
+ }
+ Progressed = false;
+ }
+ }
+};
+
+/// A convenience function to check that an Init refers to a specific def. This
+/// is primarily useful for testing for defs and similar in DagInit's since
+/// DagInit's support any type inside them.
+static bool isSpecificDef(const Init &N, StringRef Def) {
+ if (const DefInit *OpI = dyn_cast<DefInit>(&N))
+ if (OpI->getDef()->getName() == Def)
+ return true;
+ return false;
+}
+
+/// A convenience function to check that an Init refers to a def that is a
+/// subclass of the given class and coerce it to a def if it is. This is
+/// primarily useful for testing for subclasses of GIMatchKind and similar in
+/// DagInit's since DagInit's support any type inside them.
+static Record *getDefOfSubClass(const Init &N, StringRef Cls) {
+ if (const DefInit *OpI = dyn_cast<DefInit>(&N))
+ if (OpI->getDef()->isSubClassOf(Cls))
+ return OpI->getDef();
+ return nullptr;
+}
+
+/// A convenience function to check that an Init refers to a dag whose operator
+/// is a specific def and coerce it to a dag if it is. This is primarily useful
+/// for testing for subclasses of GIMatchKind and similar in DagInit's since
+/// DagInit's support any type inside them.
+static const DagInit *getDagWithSpecificOperator(const Init &N,
+ StringRef Name) {
+ if (const DagInit *I = dyn_cast<DagInit>(&N))
+ if (I->getNumArgs() > 0)
+ if (const DefInit *OpI = dyn_cast<DefInit>(I->getOperator()))
+ if (OpI->getDef()->getName() == Name)
+ return I;
+ return nullptr;
+}
+
+/// A convenience function to check that an Init refers to a dag whose operator
+/// is a def that is a subclass of the given class and coerce it to a dag if it
+/// is. This is primarily useful for testing for subclasses of GIMatchKind and
+/// similar in DagInit's since DagInit's support any type inside them.
+static const DagInit *getDagWithOperatorOfSubClass(const Init &N,
+ StringRef Cls) {
+ if (const DagInit *I = dyn_cast<DagInit>(&N))
+ if (I->getNumArgs() > 0)
+ if (const DefInit *OpI = dyn_cast<DefInit>(I->getOperator()))
+ if (OpI->getDef()->isSubClassOf(Cls))
+ return I;
+ return nullptr;
+}
+
+StringRef makeNameForAnonInstr(CombineRule &Rule) {
+ return insertStrTab(to_string(
+ format("__anon%" PRIu64 "_%u", Rule.getID(), Rule.allocUID())));
+}
+
+StringRef makeDebugName(CombineRule &Rule, StringRef Name) {
+ return insertStrTab(Name.empty() ? makeNameForAnonInstr(Rule) : StringRef(Name));
+}
+
+StringRef makeNameForAnonPredicate(CombineRule &Rule) {
+ return insertStrTab(to_string(
+ format("__anonpred%" PRIu64 "_%u", Rule.getID(), Rule.allocUID())));
+}
+
+void CombineRule::declareMatchData(StringRef PatternSymbol, StringRef Type,
+ StringRef VarName) {
+ MatchDataDecls.emplace_back(PatternSymbol, Type, VarName);
+}
+
+bool CombineRule::parseDefs() {
+ DagInit *Defs = TheDef.getValueAsDag("Defs");
+
+ if (Defs->getOperatorAsDef(TheDef.getLoc())->getName() != "defs") {
+ PrintError(TheDef.getLoc(), "Expected defs operator");
+ return false;
+ }
+
+ for (unsigned I = 0, E = Defs->getNumArgs(); I < E; ++I) {
+ // Roots should be collected into Roots
+ if (isSpecificDef(*Defs->getArg(I), "root")) {
+ Roots.emplace_back(Defs->getArgNameStr(I));
+ continue;
+ }
+
+ // Subclasses of GIDefMatchData should declare that this rule needs to pass
+ // data from the match stage to the apply stage, and ensure that the
+ // generated matcher has a suitable variable for it to do so.
+ if (Record *MatchDataRec =
+ getDefOfSubClass(*Defs->getArg(I), "GIDefMatchData")) {
+ declareMatchData(Defs->getArgNameStr(I),
+ MatchDataRec->getValueAsString("Type"),
+ llvm::to_string(llvm::format("MatchData%" PRIu64, ID)));
+ continue;
+ }
+
+ // Otherwise emit an appropriate error message.
+ if (getDefOfSubClass(*Defs->getArg(I), "GIDefKind"))
+ PrintError(TheDef.getLoc(),
+ "This GIDefKind not implemented in tablegen");
+ else if (getDefOfSubClass(*Defs->getArg(I), "GIDefKindWithArgs"))
+ PrintError(TheDef.getLoc(),
+ "This GIDefKindWithArgs not implemented in tablegen");
+ else
+ PrintError(TheDef.getLoc(),
+ "Expected a subclass of GIDefKind or a sub-dag whose "
+ "operator is of type GIDefKindWithArgs");
+ return false;
+ }
+
+ if (Roots.empty()) {
+ PrintError(TheDef.getLoc(), "Combine rules must have at least one root");
+ return false;
+ }
+ return true;
+}
+
+// Parse an (Instruction $a:Arg1, $b:Arg2, ...) matcher. Edges are formed
+// between matching operand names between different matchers.
+bool CombineRule::parseInstructionMatcher(
+ const CodeGenTarget &Target, StringInit *ArgName, const Init &Arg,
+ StringMap<std::vector<VarInfo>> &NamedEdgeDefs,
+ StringMap<std::vector<VarInfo>> &NamedEdgeUses) {
+ if (const DagInit *Matcher =
+ getDagWithOperatorOfSubClass(Arg, "Instruction")) {
+ auto &Instr =
+ Target.getInstruction(Matcher->getOperatorAsDef(TheDef.getLoc()));
+
+ StringRef Name = ArgName ? ArgName->getValue() : "";
+
+ GIMatchDagInstr *N =
+ MatchDag.addInstrNode(makeDebugName(*this, Name), insertStrTab(Name),
+ MatchDag.getContext().makeOperandList(Instr));
+
+ N->setOpcodeAnnotation(&Instr);
+ const auto &P = MatchDag.addPredicateNode<GIMatchDagOpcodePredicate>(
+ makeNameForAnonPredicate(*this), Instr);
+ MatchDag.addPredicateDependency(N, nullptr, P, &P->getOperandInfo()["mi"]);
+ unsigned OpIdx = 0;
+ for (const auto &NameInit : Matcher->getArgNames()) {
+ StringRef Name = insertStrTab(NameInit->getAsUnquotedString());
+ if (Name.empty())
+ continue;
+ N->assignNameToOperand(OpIdx, Name);
+
+ // Record the endpoints of any named edges. We'll add the cartesian
+ // product of edges later.
+ const auto &InstrOperand = N->getOperandInfo()[OpIdx];
+ if (InstrOperand.isDef()) {
+ NamedEdgeDefs.try_emplace(Name);
+ NamedEdgeDefs[Name].emplace_back(N, &InstrOperand, Matcher);
+ } else {
+ NamedEdgeUses.try_emplace(Name);
+ NamedEdgeUses[Name].emplace_back(N, &InstrOperand, Matcher);
+ }
+
+ if (InstrOperand.isDef()) {
+ if (any_of(Roots, [&](const RootInfo &X) {
+ return X.getPatternSymbol() == Name;
+ })) {
+ N->setMatchRoot();
+ }
+ }
+
+ OpIdx++;
+ }
+
+ return true;
+ }
+ return false;
+}
+
+// Parse the wip_match_opcode placeholder that's temporarily present in lieu of
+// implementing macros or choices between two matchers.
+bool CombineRule::parseWipMatchOpcodeMatcher(const CodeGenTarget &Target,
+ StringInit *ArgName,
+ const Init &Arg) {
+ if (const DagInit *Matcher =
+ getDagWithSpecificOperator(Arg, "wip_match_opcode")) {
+ StringRef Name = ArgName ? ArgName->getValue() : "";
+
+ GIMatchDagInstr *N =
+ MatchDag.addInstrNode(makeDebugName(*this, Name), insertStrTab(Name),
+ MatchDag.getContext().makeEmptyOperandList());
+
+ if (any_of(Roots, [&](const RootInfo &X) {
+ return ArgName && X.getPatternSymbol() == ArgName->getValue();
+ })) {
+ N->setMatchRoot();
+ }
+
+ const auto &P = MatchDag.addPredicateNode<GIMatchDagOneOfOpcodesPredicate>(
+ makeNameForAnonPredicate(*this));
+ MatchDag.addPredicateDependency(N, nullptr, P, &P->getOperandInfo()["mi"]);
+ // Each argument is an opcode that will pass this predicate. Add them all to
+ // the predicate implementation
+ for (const auto &Arg : Matcher->getArgs()) {
+ Record *OpcodeDef = getDefOfSubClass(*Arg, "Instruction");
+ if (OpcodeDef) {
+ P->addOpcode(&Target.getInstruction(OpcodeDef));
+ continue;
+ }
+ PrintError(TheDef.getLoc(),
+ "Arguments to wip_match_opcode must be instructions");
+ return false;
+ }
+ return true;
+ }
+ return false;
+}
+bool CombineRule::parseMatcher(const CodeGenTarget &Target) {
+ StringMap<std::vector<VarInfo>> NamedEdgeDefs;
+ StringMap<std::vector<VarInfo>> NamedEdgeUses;
+ DagInit *Matchers = TheDef.getValueAsDag("Match");
+
+ if (Matchers->getOperatorAsDef(TheDef.getLoc())->getName() != "match") {
+ PrintError(TheDef.getLoc(), "Expected match operator");
+ return false;
+ }
+
+ if (Matchers->getNumArgs() == 0) {
+ PrintError(TheDef.getLoc(), "Matcher is empty");
+ return false;
+ }
+
+ // The match section consists of a list of matchers and predicates. Parse each
+ // one and add the equivalent GIMatchDag nodes, predicates, and edges.
+ for (unsigned I = 0; I < Matchers->getNumArgs(); ++I) {
+ if (parseInstructionMatcher(Target, Matchers->getArgName(I),
+ *Matchers->getArg(I), NamedEdgeDefs,
+ NamedEdgeUses))
+ continue;
+
+ if (parseWipMatchOpcodeMatcher(Target, Matchers->getArgName(I),
+ *Matchers->getArg(I)))
+ continue;
+
+
+ // Parse arbitrary C++ code we have in lieu of supporting MIR matching
+ if (const StringInit *StringI = dyn_cast<StringInit>(Matchers->getArg(I))) {
+ assert(!MatchingFixupCode &&
+ "Only one block of arbitrary code is currently permitted");
+ MatchingFixupCode = StringI;
+ MatchDag.setHasPostMatchPredicate(true);
+ continue;
+ }
+
+ PrintError(TheDef.getLoc(),
+ "Expected a subclass of GIMatchKind or a sub-dag whose "
+ "operator is either of a GIMatchKindWithArgs or Instruction");
+ PrintNote("Pattern was `" + Matchers->getArg(I)->getAsString() + "'");
+ return false;
+ }
+
+ // Add the cartesian product of use -> def edges.
+ bool FailedToAddEdges = false;
+ for (const auto &NameAndDefs : NamedEdgeDefs) {
+ if (NameAndDefs.getValue().size() > 1) {
+ PrintError(TheDef.getLoc(),
+ "Two different MachineInstrs cannot def the same vreg");
+ for (const auto &NameAndDefOp : NameAndDefs.getValue())
+ PrintNote("in " + to_string(*NameAndDefOp.N) + " created from " +
+ to_string(*NameAndDefOp.Matcher) + "");
+ FailedToAddEdges = true;
+ }
+ const auto &Uses = NamedEdgeUses[NameAndDefs.getKey()];
+ for (const VarInfo &DefVar : NameAndDefs.getValue()) {
+ for (const VarInfo &UseVar : Uses) {
+ MatchDag.addEdge(insertStrTab(NameAndDefs.getKey()), UseVar.N, UseVar.Op,
+ DefVar.N, DefVar.Op);
+ }
+ }
+ }
+ if (FailedToAddEdges)
+ return false;
+
+ // If a variable is referenced in multiple use contexts then we need a
+ // predicate to confirm they are the same operand. We can elide this if it's
+ // also referenced in a def context and we're traversing the def-use chain
+ // from the def to the uses but we can't know which direction we're going
+ // until after reorientToRoots().
+ for (const auto &NameAndUses : NamedEdgeUses) {
+ const auto &Uses = NameAndUses.getValue();
+ if (Uses.size() > 1) {
+ const auto &LeadingVar = Uses.front();
+ for (const auto &Var : ArrayRef<VarInfo>(Uses).drop_front()) {
+ // Add a predicate for each pair until we've covered the whole
+ // equivalence set. We could test the whole set in a single predicate
+ // but that means we can't test any equivalence until all the MO's are
+ // available which can lead to wasted work matching the DAG when this
+ // predicate can already be seen to have failed.
+ //
+ // We have a similar problem due to the need to wait for a particular MO
+ // before being able to test any of them. However, that is mitigated by
+ // the order in which we build the DAG. We build from the roots outwards
+ // so by using the first recorded use in all the predicates, we are
+ // making the dependency on one of the earliest visited references in
+ // the DAG. It's not guaranteed once the generated matcher is optimized
+ // (because the factoring the common portions of rules might change the
+ // visit order) but this should mean that these predicates depend on the
+ // first MO to become available.
+ const auto &P = MatchDag.addPredicateNode<GIMatchDagSameMOPredicate>(
+ makeNameForAnonPredicate(*this));
+ MatchDag.addPredicateDependency(LeadingVar.N, LeadingVar.Op, P,
+ &P->getOperandInfo()["mi0"]);
+ MatchDag.addPredicateDependency(Var.N, Var.Op, P,
+ &P->getOperandInfo()["mi1"]);
+ }
+ }
+ }
+ return true;
+}
+
+class GICombinerEmitter {
+ RecordKeeper &Records;
+ StringRef Name;
+ const CodeGenTarget &Target;
+ Record *Combiner;
+ std::vector<std::unique_ptr<CombineRule>> Rules;
+ GIMatchDagContext MatchDagCtx;
+
+ std::unique_ptr<CombineRule> makeCombineRule(const Record &R);
+
+ void gatherRules(std::vector<std::unique_ptr<CombineRule>> &ActiveRules,
+ const std::vector<Record *> &&RulesAndGroups);
+
+public:
+ explicit GICombinerEmitter(RecordKeeper &RK, const CodeGenTarget &Target,
+ StringRef Name, Record *Combiner);
+ ~GICombinerEmitter() {}
+
+ StringRef getClassName() const {
+ return Combiner->getValueAsString("Classname");
+ }
+ void run(raw_ostream &OS);
+
+ /// Emit the name matcher (guarded by #ifndef NDEBUG) used to disable rules in
+ /// response to the generated cl::opt.
+ void emitNameMatcher(raw_ostream &OS) const;
+
+ void generateCodeForTree(raw_ostream &OS, const GIMatchTree &Tree,
+ StringRef Indent) const;
+};
+
+GICombinerEmitter::GICombinerEmitter(RecordKeeper &RK,
+ const CodeGenTarget &Target,
+ StringRef Name, Record *Combiner)
+ : Records(RK), Name(Name), Target(Target), Combiner(Combiner) {}
+
+void GICombinerEmitter::emitNameMatcher(raw_ostream &OS) const {
+ std::vector<std::pair<std::string, std::string>> Cases;
+ Cases.reserve(Rules.size());
+
+ for (const CombineRule &EnumeratedRule : make_pointee_range(Rules)) {
+ std::string Code;
+ raw_string_ostream SS(Code);
+ SS << "return " << EnumeratedRule.getID() << ";\n";
+ Cases.push_back(
+ std::make_pair(std::string(EnumeratedRule.getName()), Code));
+ }
+
+ OS << "static std::optional<uint64_t> getRuleIdxForIdentifier(StringRef "
+ "RuleIdentifier) {\n"
+ << " uint64_t I;\n"
+ << " // getAtInteger(...) returns false on success\n"
+ << " bool Parsed = !RuleIdentifier.getAsInteger(0, I);\n"
+ << " if (Parsed)\n"
+ << " return I;\n\n"
+ << "#ifndef NDEBUG\n";
+ StringMatcher Matcher("RuleIdentifier", Cases, OS);
+ Matcher.Emit();
+ OS << "#endif // ifndef NDEBUG\n\n"
+ << " return std::nullopt;\n"
+ << "}\n";
+}
+
+std::unique_ptr<CombineRule>
+GICombinerEmitter::makeCombineRule(const Record &TheDef) {
+ std::unique_ptr<CombineRule> Rule =
+ std::make_unique<CombineRule>(Target, MatchDagCtx, NumPatternTotal, TheDef);
+
+ if (!Rule->parseDefs())
+ return nullptr;
+ if (!Rule->parseMatcher(Target))
+ return nullptr;
+
+ Rule->reorientToRoots();
+
+ LLVM_DEBUG({
+ dbgs() << "Parsed rule defs/match for '" << Rule->getName() << "'\n";
+ Rule->getMatchDag().dump();
+ Rule->getMatchDag().writeDOTGraph(dbgs(), Rule->getName());
+ });
+ if (StopAfterParse)
+ return Rule;
+
+ // For now, don't support traversing from def to use. We'll come back to
+ // this later once we have the algorithm changes to support it.
+ bool EmittedDefToUseError = false;
+ for (const auto &E : Rule->getMatchDag().edges()) {
+ if (E->isDefToUse()) {
+ if (!EmittedDefToUseError) {
+ PrintError(
+ TheDef.getLoc(),
+ "Generated state machine cannot lookup uses from a def (yet)");
+ EmittedDefToUseError = true;
+ }
+ PrintNote("Node " + to_string(*E->getFromMI()));
+ PrintNote("Node " + to_string(*E->getToMI()));
+ PrintNote("Edge " + to_string(*E));
+ }
+ }
+ if (EmittedDefToUseError)
+ return nullptr;
+
+ // For now, don't support multi-root rules. We'll come back to this later
+ // once we have the algorithm changes to support it.
+ if (Rule->getNumRoots() > 1) {
+ PrintError(TheDef.getLoc(), "Multi-root matches are not supported (yet)");
+ return nullptr;
+ }
+ return Rule;
+}
+
+/// Recurse into GICombineGroup's and flatten the ruleset into a simple list.
+void GICombinerEmitter::gatherRules(
+ std::vector<std::unique_ptr<CombineRule>> &ActiveRules,
+ const std::vector<Record *> &&RulesAndGroups) {
+ for (Record *R : RulesAndGroups) {
+ if (R->isValueUnset("Rules")) {
+ std::unique_ptr<CombineRule> Rule = makeCombineRule(*R);
+ if (Rule == nullptr) {
+ PrintError(R->getLoc(), "Failed to parse rule");
+ continue;
+ }
+ ActiveRules.emplace_back(std::move(Rule));
+ ++NumPatternTotal;
+ } else
+ gatherRules(ActiveRules, R->getValueAsListOfDefs("Rules"));
+ }
+}
+
+void GICombinerEmitter::generateCodeForTree(raw_ostream &OS,
+ const GIMatchTree &Tree,
+ StringRef Indent) const {
+ if (Tree.getPartitioner() != nullptr) {
+ Tree.getPartitioner()->generatePartitionSelectorCode(OS, Indent);
+ for (const auto &EnumChildren : enumerate(Tree.children())) {
+ OS << Indent << "if (Partition == " << EnumChildren.index() << " /* "
+ << format_partition_name(Tree, EnumChildren.index()) << " */) {\n";
+ generateCodeForTree(OS, EnumChildren.value(), (Indent + " ").str());
+ OS << Indent << "}\n";
+ }
+ return;
+ }
+
+ bool AnyFullyTested = false;
+ for (const auto &Leaf : Tree.possible_leaves()) {
+ OS << Indent << "// Leaf name: " << Leaf.getName() << "\n";
+
+ const CombineRule *Rule = Leaf.getTargetData<CombineRule>();
+ const Record &RuleDef = Rule->getDef();
+
+ OS << Indent << "// Rule: " << RuleDef.getName() << "\n"
+ << Indent << "if (!RuleConfig->isRuleDisabled(" << Rule->getID()
+ << ")) {\n";
+
+ CodeExpansions Expansions;
+ for (const auto &VarBinding : Leaf.var_bindings()) {
+ if (VarBinding.isInstr())
+ Expansions.declare(VarBinding.getName(),
+ "MIs[" + to_string(VarBinding.getInstrID()) + "]");
+ else
+ Expansions.declare(VarBinding.getName(),
+ "MIs[" + to_string(VarBinding.getInstrID()) +
+ "]->getOperand(" +
+ to_string(VarBinding.getOpIdx()) + ")");
+ }
+ Rule->declareExpansions(Expansions);
+
+ DagInit *Applyer = RuleDef.getValueAsDag("Apply");
+ if (Applyer->getOperatorAsDef(RuleDef.getLoc())->getName() !=
+ "apply") {
+ PrintError(RuleDef.getLoc(), "Expected 'apply' operator in Apply DAG");
+ return;
+ }
+
+ OS << Indent << " if (1\n";
+
+ // Emit code for C++ Predicates.
+ if (RuleDef.getValue("Predicates")) {
+ ListInit *Preds = RuleDef.getValueAsListInit("Predicates");
+ for (Init *I : Preds->getValues()) {
+ if (DefInit *Pred = dyn_cast<DefInit>(I)) {
+ Record *Def = Pred->getDef();
+ if (!Def->isSubClassOf("Predicate")) {
+ PrintError(Def->getLoc(), "Unknown 'Predicate' Type");
+ return;
+ }
+
+ StringRef CondString = Def->getValueAsString("CondString");
+ if (CondString.empty())
+ continue;
+
+ OS << Indent << " && (\n"
+ << Indent << " // Predicate: " << Def->getName() << "\n"
+ << Indent << " " << CondString << "\n"
+ << Indent << " )\n";
+ }
+ }
+ }
+
+ // Attempt to emit code for any untested predicates left over. Note that
+ // isFullyTested() will remain false even if we succeed here and therefore
+ // combine rule elision will not be performed. This is because we do not
+ // know if there's any connection between the predicates for each leaf and
+ // therefore can't tell if one makes another unreachable. Ideally, the
+ // partitioner(s) would be sufficiently complete to prevent us from having
+ // untested predicates left over.
+ for (const GIMatchDagPredicate *Predicate : Leaf.untested_predicates()) {
+ if (Predicate->generateCheckCode(OS, (Indent + " ").str(),
+ Expansions))
+ continue;
+ PrintError(RuleDef.getLoc(),
+ "Unable to test predicate used in rule");
+ PrintNote(SMLoc(),
+ "This indicates an incomplete implementation in tablegen");
+ Predicate->print(errs());
+ errs() << "\n";
+ OS << Indent
+ << "llvm_unreachable(\"TableGen did not emit complete code for this "
+ "path\");\n";
+ break;
+ }
+
+ if (Rule->getMatchingFixupCode() &&
+ !Rule->getMatchingFixupCode()->getValue().empty()) {
+ // FIXME: Single-use lambda's like this are a serious compile-time
+ // performance and memory issue. It's convenient for this early stage to
+ // defer some work to successive patches but we need to eliminate this
+ // before the ruleset grows to small-moderate size. Last time, it became
+ // a big problem for low-mem systems around the 500 rule mark but by the
+ // time we grow that large we should have merged the ISel match table
+ // mechanism with the Combiner.
+ OS << Indent << " && [&]() {\n"
+ << Indent << " "
+ << CodeExpander(Rule->getMatchingFixupCode()->getValue(), Expansions,
+ RuleDef.getLoc(), ShowExpansions)
+ << '\n'
+ << Indent << " return true;\n"
+ << Indent << " }()";
+ }
+ OS << Indent << " ) {\n" << Indent << " ";
+
+ if (const StringInit *Code = dyn_cast<StringInit>(Applyer->getArg(0))) {
+ OS << " LLVM_DEBUG(dbgs() << \"Applying rule '"
+ << RuleDef.getName()
+ << "'\\n\");\n"
+ << CodeExpander(Code->getAsUnquotedString(), Expansions,
+ RuleDef.getLoc(), ShowExpansions)
+ << '\n'
+ << Indent << " return true;\n"
+ << Indent << " }\n";
+ } else {
+ PrintError(RuleDef.getLoc(), "Expected apply code block");
+ return;
+ }
+
+ OS << Indent << "}\n";
+
+ assert(Leaf.isFullyTraversed());
+
+ // If we didn't have any predicates left over and we're not using the
+ // trap-door we have to support arbitrary C++ code while we're migrating to
+ // the declarative style then we know that subsequent leaves are
+ // unreachable.
+ if (Leaf.isFullyTested() &&
+ (!Rule->getMatchingFixupCode() ||
+ Rule->getMatchingFixupCode()->getValue().empty())) {
+ AnyFullyTested = true;
+ OS << Indent
+ << "llvm_unreachable(\"Combine rule elision was incorrect\");\n"
+ << Indent << "return false;\n";
+ }
+ }
+ if (!AnyFullyTested)
+ OS << Indent << "return false;\n";
+}
+
+static void emitAdditionalHelperMethodArguments(raw_ostream &OS,
+ Record *Combiner) {
+ for (Record *Arg : Combiner->getValueAsListOfDefs("AdditionalArguments"))
+ OS << ",\n " << Arg->getValueAsString("Type")
+ << " " << Arg->getValueAsString("Name");
+}
+
+void GICombinerEmitter::run(raw_ostream &OS) {
+ Records.startTimer("Gather rules");
+ gatherRules(Rules, Combiner->getValueAsListOfDefs("Rules"));
+ if (StopAfterParse) {
+ MatchDagCtx.print(errs());
+ PrintNote(Combiner->getLoc(),
+ "Terminating due to -gicombiner-stop-after-parse");
+ return;
+ }
+ if (ErrorsPrinted)
+ PrintFatalError(Combiner->getLoc(), "Failed to parse one or more rules");
+ LLVM_DEBUG(dbgs() << "Optimizing tree for " << Rules.size() << " rules\n");
+ std::unique_ptr<GIMatchTree> Tree;
+ Records.startTimer("Optimize combiner");
+ {
+ GIMatchTreeBuilder TreeBuilder(0);
+ for (const auto &Rule : Rules) {
+ bool HadARoot = false;
+ for (const auto &Root : enumerate(Rule->getMatchDag().roots())) {
+ TreeBuilder.addLeaf(Rule->getName(), Root.index(), Rule->getMatchDag(),
+ Rule.get());
+ HadARoot = true;
+ }
+ if (!HadARoot)
+ PrintFatalError(Rule->getDef().getLoc(), "All rules must have a root");
+ }
+
+ Tree = TreeBuilder.run();
+ }
+ if (StopAfterBuild) {
+ Tree->writeDOTGraph(outs());
+ PrintNote(Combiner->getLoc(),
+ "Terminating due to -gicombiner-stop-after-build");
+ return;
+ }
+
+ Records.startTimer("Emit combiner");
+ OS << "#ifdef " << Name.upper() << "_GENCOMBINERHELPER_DEPS\n"
+ << "#include \"llvm/ADT/SparseBitVector.h\"\n"
+ << "namespace llvm {\n"
+ << "extern cl::OptionCategory GICombinerOptionCategory;\n"
+ << "} // end namespace llvm\n"
+ << "#endif // ifdef " << Name.upper() << "_GENCOMBINERHELPER_DEPS\n\n";
+
+ OS << "#ifdef " << Name.upper() << "_GENCOMBINERHELPER_H\n"
+ << "class " << getClassName() << "RuleConfig {\n"
+ << " SparseBitVector<> DisabledRules;\n"
+ << "\n"
+ << "public:\n"
+ << " bool parseCommandLineOption();\n"
+ << " bool isRuleDisabled(unsigned ID) const;\n"
+ << " bool setRuleEnabled(StringRef RuleIdentifier);\n"
+ << " bool setRuleDisabled(StringRef RuleIdentifier);\n"
+ << "};\n"
+ << "\n"
+ << "class " << getClassName();
+ StringRef StateClass = Combiner->getValueAsString("StateClass");
+ if (!StateClass.empty())
+ OS << " : public " << StateClass;
+ OS << " {\n"
+ << " const " << getClassName() << "RuleConfig *RuleConfig;\n"
+ << "\n"
+ << "public:\n"
+ << " template <typename... Args>" << getClassName() << "(const "
+ << getClassName() << "RuleConfig &RuleConfig, Args &&... args) : ";
+ if (!StateClass.empty())
+ OS << StateClass << "(std::forward<Args>(args)...), ";
+ OS << "RuleConfig(&RuleConfig) {}\n"
+ << "\n"
+ << " bool tryCombineAll(\n"
+ << " GISelChangeObserver &Observer,\n"
+ << " MachineInstr &MI,\n"
+ << " MachineIRBuilder &B";
+ emitAdditionalHelperMethodArguments(OS, Combiner);
+ OS << ") const;\n";
+ OS << "};\n\n";
+
+ emitNameMatcher(OS);
+
+ OS << "static std::optional<std::pair<uint64_t, uint64_t>> "
+ "getRuleRangeForIdentifier(StringRef RuleIdentifier) {\n"
+ << " std::pair<StringRef, StringRef> RangePair = "
+ "RuleIdentifier.split('-');\n"
+ << " if (!RangePair.second.empty()) {\n"
+ << " const auto First = "
+ "getRuleIdxForIdentifier(RangePair.first);\n"
+ << " const auto Last = "
+ "getRuleIdxForIdentifier(RangePair.second);\n"
+ << " if (!First || !Last)\n"
+ << " return std::nullopt;\n"
+ << " if (First >= Last)\n"
+ << " report_fatal_error(\"Beginning of range should be before "
+ "end of range\");\n"
+ << " return {{*First, *Last + 1}};\n"
+ << " }\n"
+ << " if (RangePair.first == \"*\") {\n"
+ << " return {{0, " << Rules.size() << "}};\n"
+ << " }\n"
+ << " const auto I = getRuleIdxForIdentifier(RangePair.first);\n"
+ << " if (!I)\n"
+ << " return std::nullopt;\n"
+ << " return {{*I, *I + 1}};\n"
+ << "}\n\n";
+
+ for (bool Enabled : {true, false}) {
+ OS << "bool " << getClassName() << "RuleConfig::setRule"
+ << (Enabled ? "Enabled" : "Disabled") << "(StringRef RuleIdentifier) {\n"
+ << " auto MaybeRange = getRuleRangeForIdentifier(RuleIdentifier);\n"
+ << " if (!MaybeRange)\n"
+ << " return false;\n"
+ << " for (auto I = MaybeRange->first; I < MaybeRange->second; ++I)\n"
+ << " DisabledRules." << (Enabled ? "reset" : "set") << "(I);\n"
+ << " return true;\n"
+ << "}\n\n";
+ }
+
+ OS << "bool " << getClassName()
+ << "RuleConfig::isRuleDisabled(unsigned RuleID) const {\n"
+ << " return DisabledRules.test(RuleID);\n"
+ << "}\n";
+ OS << "#endif // ifdef " << Name.upper() << "_GENCOMBINERHELPER_H\n\n";
+
+ OS << "#ifdef " << Name.upper() << "_GENCOMBINERHELPER_CPP\n"
+ << "\n"
+ << "std::vector<std::string> " << Name << "Option;\n"
+ << "cl::list<std::string> " << Name << "DisableOption(\n"
+ << " \"" << Name.lower() << "-disable-rule\",\n"
+ << " cl::desc(\"Disable one or more combiner rules temporarily in "
+ << "the " << Name << " pass\"),\n"
+ << " cl::CommaSeparated,\n"
+ << " cl::Hidden,\n"
+ << " cl::cat(GICombinerOptionCategory),\n"
+ << " cl::callback([](const std::string &Str) {\n"
+ << " " << Name << "Option.push_back(Str);\n"
+ << " }));\n"
+ << "cl::list<std::string> " << Name << "OnlyEnableOption(\n"
+ << " \"" << Name.lower() << "-only-enable-rule\",\n"
+ << " cl::desc(\"Disable all rules in the " << Name
+ << " pass then re-enable the specified ones\"),\n"
+ << " cl::Hidden,\n"
+ << " cl::cat(GICombinerOptionCategory),\n"
+ << " cl::callback([](const std::string &CommaSeparatedArg) {\n"
+ << " StringRef Str = CommaSeparatedArg;\n"
+ << " " << Name << "Option.push_back(\"*\");\n"
+ << " do {\n"
+ << " auto X = Str.split(\",\");\n"
+ << " " << Name << "Option.push_back((\"!\" + X.first).str());\n"
+ << " Str = X.second;\n"
+ << " } while (!Str.empty());\n"
+ << " }));\n"
+ << "\n"
+ << "bool " << getClassName() << "RuleConfig::parseCommandLineOption() {\n"
+ << " for (StringRef Identifier : " << Name << "Option) {\n"
+ << " bool Enabled = Identifier.consume_front(\"!\");\n"
+ << " if (Enabled && !setRuleEnabled(Identifier))\n"
+ << " return false;\n"
+ << " if (!Enabled && !setRuleDisabled(Identifier))\n"
+ << " return false;\n"
+ << " }\n"
+ << " return true;\n"
+ << "}\n\n";
+
+ OS << "bool " << getClassName() << "::tryCombineAll(\n"
+ << " GISelChangeObserver &Observer,\n"
+ << " MachineInstr &MI,\n"
+ << " MachineIRBuilder &B";
+ emitAdditionalHelperMethodArguments(OS, Combiner);
+ OS << ") const {\n"
+ << " MachineBasicBlock *MBB = MI.getParent();\n"
+ << " MachineFunction *MF = MBB->getParent();\n"
+ << " MachineRegisterInfo &MRI = MF->getRegInfo();\n"
+ << " SmallVector<MachineInstr *, 8> MIs = {&MI};\n\n"
+ << " (void)MBB; (void)MF; (void)MRI; (void)RuleConfig;\n\n";
+
+ OS << " // Match data\n";
+ for (const auto &Rule : Rules)
+ for (const auto &I : Rule->matchdata_decls())
+ OS << " " << I.getType() << " " << I.getVariableName() << ";\n";
+ OS << "\n";
+
+ OS << " int Partition = -1;\n";
+ generateCodeForTree(OS, *Tree, " ");
+ OS << "\n return false;\n"
+ << "}\n"
+ << "#endif // ifdef " << Name.upper() << "_GENCOMBINERHELPER_CPP\n";
+}
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+
+namespace llvm {
+void EmitGICombiner(RecordKeeper &RK, raw_ostream &OS) {
+ CodeGenTarget Target(RK);
+ emitSourceFileHeader("Global Combiner", OS);
+
+ if (SelectedCombiners.empty())
+ PrintFatalError("No combiners selected with -combiners");
+ for (const auto &Combiner : SelectedCombiners) {
+ Record *CombinerDef = RK.getDef(Combiner);
+ if (!CombinerDef)
+ PrintFatalError("Could not find " + Combiner);
+ GICombinerEmitter(RK, Target, Combiner, CombinerDef).run(OS);
+ }
+ NumPatternTotalStatistic = NumPatternTotal;
+}
+
+} // namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/GlobalISel/CodeExpander.cpp b/contrib/libs/llvm16/utils/TableGen/GlobalISel/CodeExpander.cpp
new file mode 100644
index 0000000000..42b4aabf27
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/GlobalISel/CodeExpander.cpp
@@ -0,0 +1,83 @@
+//===- CodeExpander.cpp - Expand variables in a string --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file Expand the variables in a string.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeExpander.h"
+#include "CodeExpansions.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/Error.h"
+
+using namespace llvm;
+
+void CodeExpander::emit(raw_ostream &OS) const {
+ StringRef Current = Code;
+
+ while (!Current.empty()) {
+ size_t Pos = Current.find_first_of("$\n\\");
+ if (Pos == StringRef::npos) {
+ OS << Current;
+ Current = "";
+ continue;
+ }
+
+ OS << Current.substr(0, Pos);
+ Current = Current.substr(Pos);
+
+ if (Current.startswith("\n")) {
+ OS << "\n" << Indent;
+ Current = Current.drop_front(1);
+ continue;
+ }
+
+ if (Current.startswith("\\$") || Current.startswith("\\\\")) {
+ OS << Current[1];
+ Current = Current.drop_front(2);
+ continue;
+ }
+
+ if (Current.startswith("\\")) {
+ Current = Current.drop_front(1);
+ continue;
+ }
+
+ if (Current.startswith("${")) {
+ StringRef StartVar = Current;
+ Current = Current.drop_front(2);
+ StringRef Var;
+ std::tie(Var, Current) = Current.split("}");
+
+ // Warn if we split because no terminator was found.
+ StringRef EndVar = StartVar.drop_front(2 /* ${ */ + Var.size());
+ if (EndVar.empty()) {
+ PrintWarning(Loc, "Unterminated expansion '${" + Var + "'");
+ PrintNote("Code: [{" + Code + "}]");
+ }
+
+ auto ValueI = Expansions.find(Var);
+ if (ValueI == Expansions.end()) {
+ PrintError(Loc,
+ "Attempt to expand an undeclared variable '" + Var + "'");
+ PrintNote("Code: [{" + Code + "}]");
+ }
+ if (ShowExpansions)
+ OS << "/*$" << Var << "{*/";
+ OS << Expansions.lookup(Var);
+ if (ShowExpansions)
+ OS << "/*}*/";
+ continue;
+ }
+
+ PrintWarning(Loc, "Assuming missing escape character: \\$");
+ PrintNote("Code: [{" + Code + "}]");
+ OS << "$";
+ Current = Current.drop_front(1);
+ }
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/GlobalISel/CodeExpander.h b/contrib/libs/llvm16/utils/TableGen/GlobalISel/CodeExpander.h
new file mode 100644
index 0000000000..1291eb1ad9
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/GlobalISel/CodeExpander.h
@@ -0,0 +1,55 @@
+//===- CodeExpander.h - Expand variables in a string ----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file Expand the variables in a string.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_CODEEXPANDER_H
+#define LLVM_UTILS_TABLEGEN_CODEEXPANDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+class CodeExpansions;
+class SMLoc;
+class raw_ostream;
+
+/// Emit the given code with all '${foo}' placeholders expanded to their
+/// replacements.
+///
+/// It's an error to use an undefined expansion and expansion-like output that
+/// needs to be emitted verbatim can be escaped as '\${foo}'
+///
+/// The emitted code can be given a custom indent to enable both indentation by
+/// an arbitrary amount of whitespace and emission of the code as a comment.
+class CodeExpander {
+ StringRef Code;
+ const CodeExpansions &Expansions;
+ const ArrayRef<SMLoc> &Loc;
+ bool ShowExpansions;
+ StringRef Indent;
+
+public:
+ CodeExpander(StringRef Code, const CodeExpansions &Expansions,
+ const ArrayRef<SMLoc> &Loc, bool ShowExpansions,
+ StringRef Indent = " ")
+ : Code(Code), Expansions(Expansions), Loc(Loc),
+ ShowExpansions(ShowExpansions), Indent(Indent) {}
+
+ void emit(raw_ostream &OS) const;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const CodeExpander &Expander) {
+ Expander.emit(OS);
+ return OS;
+}
+} // end namespace llvm
+
+#endif // ifndef LLVM_UTILS_TABLEGEN_CODEEXPANDER_H
diff --git a/contrib/libs/llvm16/utils/TableGen/GlobalISel/CodeExpansions.h b/contrib/libs/llvm16/utils/TableGen/GlobalISel/CodeExpansions.h
new file mode 100644
index 0000000000..f536e801b2
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/GlobalISel/CodeExpansions.h
@@ -0,0 +1,43 @@
+//===- CodeExpansions.h - Record expansions for CodeExpander --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file Record the expansions to use in a CodeExpander.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/StringMap.h"
+
+#ifndef LLVM_UTILS_TABLEGEN_CODEEXPANSIONS_H
+#define LLVM_UTILS_TABLEGEN_CODEEXPANSIONS_H
+namespace llvm {
+class CodeExpansions {
+public:
+ using const_iterator = StringMap<std::string>::const_iterator;
+
+protected:
+ StringMap<std::string> Expansions;
+
+public:
+ void declare(StringRef Name, StringRef Expansion) {
+ // Duplicates are not inserted. The expansion refers to different
+ // MachineOperands using the same virtual register.
+ Expansions.try_emplace(Name, Expansion);
+ }
+
+ std::string lookup(StringRef Variable) const {
+ return Expansions.lookup(Variable);
+ }
+
+ const_iterator begin() const { return Expansions.begin(); }
+ const_iterator end() const { return Expansions.end(); }
+ const_iterator find(StringRef Variable) const {
+ return Expansions.find(Variable);
+ }
+};
+} // end namespace llvm
+#endif // ifndef LLVM_UTILS_TABLEGEN_CODEEXPANSIONS_H
diff --git a/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDag.cpp b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDag.cpp
new file mode 100644
index 0000000000..8be32d2eff
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDag.cpp
@@ -0,0 +1,138 @@
+//===- GIMatchDag.cpp - A DAG representation of a pattern to be matched ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "GIMatchDag.h"
+
+#include "llvm/Support/Format.h"
+#include "llvm/TableGen/Record.h"
+#include "../CodeGenInstruction.h"
+
+using namespace llvm;
+
+void GIMatchDag::writeDOTGraph(raw_ostream &OS, StringRef ID) const {
+ const auto writePorts = [&](StringRef Prefix,
+ const GIMatchDagOperandList &Operands) {
+ StringRef Separator = "";
+ OS << "{";
+ for (const auto &Op : enumerate(Operands)) {
+ OS << Separator << "<" << Prefix << format("%d", Op.index()) << ">"
+ << "#" << Op.index() << " $" << Op.value().getName();
+ Separator = "|";
+ }
+ OS << "}";
+ };
+
+ OS << "digraph \"" << ID << "\" {\n"
+ << " rankdir=\"BT\"\n";
+ for (const auto &N : InstrNodes) {
+ OS << " " << format("Node%p", &*N) << " [shape=record,label=\"{";
+ writePorts("s", N->getOperandInfo());
+ OS << "|" << N->getName();
+ if (N->getOpcodeAnnotation())
+ OS << "|" << N->getOpcodeAnnotation()->TheDef->getName();
+ if (N->isMatchRoot())
+ OS << "|Match starts here";
+ OS << "|";
+ SmallVector<std::pair<unsigned, StringRef>, 8> ToPrint;
+ for (const auto &Assignment : N->user_assigned_operand_names())
+ ToPrint.emplace_back(Assignment.first, Assignment.second);
+ llvm::sort(ToPrint);
+ StringRef Separator = "";
+ for (const auto &Assignment : ToPrint) {
+ OS << Separator << "$" << Assignment.second << "=getOperand("
+ << Assignment.first << ")";
+ Separator = ", ";
+ }
+ OS << llvm::format("|%p|", &N);
+ writePorts("d", N->getOperandInfo());
+ OS << "}\"";
+ if (N->isMatchRoot())
+ OS << ",color=red";
+ OS << "]\n";
+ }
+
+ for (const auto &E : Edges) {
+ const char *FromFmt = "Node%p:s%d:n";
+ const char *ToFmt = "Node%p:d%d:s";
+ if (E->getFromMO()->isDef() && !E->getToMO()->isDef())
+ std::swap(FromFmt, ToFmt);
+ auto From = format(FromFmt, E->getFromMI(), E->getFromMO()->getIdx());
+ auto To = format(ToFmt, E->getToMI(), E->getToMO()->getIdx());
+ if (E->getFromMO()->isDef() && !E->getToMO()->isDef())
+ std::swap(From, To);
+
+ OS << " " << From << " -> " << To << " [label=\"$" << E->getName();
+ if (E->getFromMO()->isDef() == E->getToMO()->isDef())
+ OS << " INVALID EDGE!";
+ OS << "\"";
+ if (E->getFromMO()->isDef() == E->getToMO()->isDef())
+ OS << ",color=red";
+ else if (E->getFromMO()->isDef() && !E->getToMO()->isDef())
+ OS << ",dir=back,arrowtail=crow";
+ OS << "]\n";
+ }
+
+ for (const auto &N : PredicateNodes) {
+ OS << " " << format("Pred%p", &*N) << " [shape=record,label=\"{";
+ writePorts("s", N->getOperandInfo());
+ OS << "|" << N->getName() << "|";
+ N->printDescription(OS);
+ OS << llvm::format("|%p|", &N);
+ writePorts("d", N->getOperandInfo());
+ OS << "}\",style=dotted]\n";
+ }
+
+ for (const auto &E : PredicateDependencies) {
+ const char *FromMIFmt = "Node%p:e";
+ const char *FromMOFmt = "Node%p:s%d:n";
+ const char *ToFmt = "Pred%p:d%d:s";
+ auto To = format(ToFmt, E->getPredicate(), E->getPredicateOp()->getIdx());
+ auto Style = "[style=dotted]";
+ if (E->getRequiredMO()) {
+ auto From =
+ format(FromMOFmt, E->getRequiredMI(), E->getRequiredMO()->getIdx());
+ OS << " " << From << " -> " << To << " " << Style << "\n";
+ continue;
+ }
+ auto From = format(FromMIFmt, E->getRequiredMI());
+ OS << " " << From << " -> " << To << " " << Style << "\n";
+ }
+
+ OS << "}\n";
+}
+
+LLVM_DUMP_METHOD void GIMatchDag::print(raw_ostream &OS) const {
+ OS << "matchdag {\n";
+ for (const auto &N : InstrNodes) {
+ OS << " ";
+ N->print(OS);
+ OS << "\n";
+ }
+ for (const auto &E : Edges) {
+ OS << " ";
+ E->print(OS);
+ OS << "\n";
+ }
+
+ for (const auto &P : PredicateNodes) {
+ OS << " ";
+ P->print(OS);
+ OS << "\n";
+ }
+ for (const auto &D : PredicateDependencies) {
+ OS << " ";
+ D->print(OS);
+ OS << "\n";
+ }
+ OS << "}\n";
+}
+
+raw_ostream &llvm::operator<<(raw_ostream &OS, const GIMatchDag &G) {
+ G.print(OS);
+ return OS;
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDag.h b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDag.h
new file mode 100644
index 0000000000..4c3c610aff
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDag.h
@@ -0,0 +1,240 @@
+//===- GIMatchDag.h - Represent a DAG to be matched -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_GIMATCHDAG_H
+#define LLVM_UTILS_TABLEGEN_GIMATCHDAG_H
+
+#include "GIMatchDagEdge.h"
+#include "GIMatchDagInstr.h"
+#include "GIMatchDagOperands.h"
+#include "GIMatchDagPredicate.h"
+#include "GIMatchDagPredicateDependencyEdge.h"
+
+namespace llvm {
+
+/// This class manages lifetimes for data associated with the GIMatchDag object.
+class GIMatchDagContext {
+ GIMatchDagOperandListContext OperandListCtx;
+
+public:
+ const GIMatchDagOperandList &makeEmptyOperandList() {
+ return OperandListCtx.makeEmptyOperandList();
+ }
+
+ const GIMatchDagOperandList &makeOperandList(const CodeGenInstruction &I) {
+ return OperandListCtx.makeOperandList(I);
+ }
+
+ const GIMatchDagOperandList &makeMIPredicateOperandList() {
+ return OperandListCtx.makeMIPredicateOperandList();
+ }
+
+
+ const GIMatchDagOperandList &makeTwoMOPredicateOperandList() {
+ return OperandListCtx.makeTwoMOPredicateOperandList();
+ }
+
+ void print(raw_ostream &OS) const {
+ OperandListCtx.print(OS);
+ }
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ LLVM_DUMP_METHOD void dump() const { print(errs()); }
+#endif // if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+};
+
+class GIMatchDag {
+public:
+ using InstrNodesVec = std::vector<std::unique_ptr<GIMatchDagInstr>>;
+ using instr_node_iterator = raw_pointer_iterator<InstrNodesVec::iterator>;
+ using const_instr_node_iterator =
+ raw_pointer_iterator<InstrNodesVec::const_iterator>;
+
+ using EdgesVec = std::vector<std::unique_ptr<GIMatchDagEdge>>;
+ using edge_iterator = raw_pointer_iterator<EdgesVec::iterator>;
+ using const_edge_iterator = raw_pointer_iterator<EdgesVec::const_iterator>;
+
+ using PredicateNodesVec = std::vector<std::unique_ptr<GIMatchDagPredicate>>;
+ using predicate_iterator = raw_pointer_iterator<PredicateNodesVec::iterator>;
+ using const_predicate_iterator =
+ raw_pointer_iterator<PredicateNodesVec::const_iterator>;
+
+ using PredicateDependencyEdgesVec =
+ std::vector<std::unique_ptr<GIMatchDagPredicateDependencyEdge>>;
+ using predicate_edge_iterator =
+ raw_pointer_iterator<PredicateDependencyEdgesVec::iterator>;
+ using const_predicate_edge_iterator =
+ raw_pointer_iterator<PredicateDependencyEdgesVec::const_iterator>;
+
+protected:
+ GIMatchDagContext &Ctx;
+ InstrNodesVec InstrNodes;
+ PredicateNodesVec PredicateNodes;
+ EdgesVec Edges;
+ PredicateDependencyEdgesVec PredicateDependencies;
+ std::vector<GIMatchDagInstr *> MatchRoots;
+ // FIXME: This is a temporary measure while we still accept arbitrary code
+ // blocks to fix up the matcher while it's being developed.
+ bool HasPostMatchPredicate = false;
+
+public:
+ GIMatchDag(GIMatchDagContext &Ctx) : Ctx(Ctx) {}
+ GIMatchDag(const GIMatchDag &) = delete;
+
+ GIMatchDagContext &getContext() const { return Ctx; }
+ edge_iterator edges_begin() {
+ return raw_pointer_iterator<EdgesVec::iterator>(Edges.begin());
+ }
+ edge_iterator edges_end() {
+ return raw_pointer_iterator<EdgesVec::iterator>(Edges.end());
+ }
+ const_edge_iterator edges_begin() const {
+ return raw_pointer_iterator<EdgesVec::const_iterator>(Edges.begin());
+ }
+ const_edge_iterator edges_end() const {
+ return raw_pointer_iterator<EdgesVec::const_iterator>(Edges.end());
+ }
+ iterator_range<edge_iterator> edges() {
+ return make_range(edges_begin(), edges_end());
+ }
+ iterator_range<const_edge_iterator> edges() const {
+ return make_range(edges_begin(), edges_end());
+ }
+ iterator_range<std::vector<GIMatchDagInstr *>::iterator> roots() {
+ return make_range(MatchRoots.begin(), MatchRoots.end());
+ }
+ iterator_range<std::vector<GIMatchDagInstr *>::const_iterator> roots() const {
+ return make_range(MatchRoots.begin(), MatchRoots.end());
+ }
+
+ instr_node_iterator instr_nodes_begin() {
+ return raw_pointer_iterator<InstrNodesVec::iterator>(InstrNodes.begin());
+ }
+ instr_node_iterator instr_nodes_end() {
+ return raw_pointer_iterator<InstrNodesVec::iterator>(InstrNodes.end());
+ }
+ const_instr_node_iterator instr_nodes_begin() const {
+ return raw_pointer_iterator<InstrNodesVec::const_iterator>(
+ InstrNodes.begin());
+ }
+ const_instr_node_iterator instr_nodes_end() const {
+ return raw_pointer_iterator<InstrNodesVec::const_iterator>(
+ InstrNodes.end());
+ }
+ iterator_range<instr_node_iterator> instr_nodes() {
+ return make_range(instr_nodes_begin(), instr_nodes_end());
+ }
+ iterator_range<const_instr_node_iterator> instr_nodes() const {
+ return make_range(instr_nodes_begin(), instr_nodes_end());
+ }
+ predicate_edge_iterator predicate_edges_begin() {
+ return raw_pointer_iterator<PredicateDependencyEdgesVec::iterator>(
+ PredicateDependencies.begin());
+ }
+ predicate_edge_iterator predicate_edges_end() {
+ return raw_pointer_iterator<PredicateDependencyEdgesVec::iterator>(
+ PredicateDependencies.end());
+ }
+ const_predicate_edge_iterator predicate_edges_begin() const {
+ return raw_pointer_iterator<PredicateDependencyEdgesVec::const_iterator>(
+ PredicateDependencies.begin());
+ }
+ const_predicate_edge_iterator predicate_edges_end() const {
+ return raw_pointer_iterator<PredicateDependencyEdgesVec::const_iterator>(
+ PredicateDependencies.end());
+ }
+ iterator_range<predicate_edge_iterator> predicate_edges() {
+ return make_range(predicate_edges_begin(), predicate_edges_end());
+ }
+ iterator_range<const_predicate_edge_iterator> predicate_edges() const {
+ return make_range(predicate_edges_begin(), predicate_edges_end());
+ }
+ predicate_iterator predicates_begin() {
+ return raw_pointer_iterator<PredicateNodesVec::iterator>(
+ PredicateNodes.begin());
+ }
+ predicate_iterator predicates_end() {
+ return raw_pointer_iterator<PredicateNodesVec::iterator>(
+ PredicateNodes.end());
+ }
+ const_predicate_iterator predicates_begin() const {
+ return raw_pointer_iterator<PredicateNodesVec::const_iterator>(
+ PredicateNodes.begin());
+ }
+ const_predicate_iterator predicates_end() const {
+ return raw_pointer_iterator<PredicateNodesVec::const_iterator>(
+ PredicateNodes.end());
+ }
+ iterator_range<predicate_iterator> predicates() {
+ return make_range(predicates_begin(), predicates_end());
+ }
+ iterator_range<const_predicate_iterator> predicates() const {
+ return make_range(predicates_begin(), predicates_end());
+ }
+
+ template <class... Args> GIMatchDagInstr *addInstrNode(Args &&... args) {
+ auto Obj =
+ std::make_unique<GIMatchDagInstr>(*this, std::forward<Args>(args)...);
+ auto ObjRaw = Obj.get();
+ InstrNodes.push_back(std::move(Obj));
+ return ObjRaw;
+ }
+
+ template <class T, class... Args>
+ T *addPredicateNode(Args &&... args) {
+ auto Obj = std::make_unique<T>(getContext(), std::forward<Args>(args)...);
+ auto ObjRaw = Obj.get();
+ PredicateNodes.push_back(std::move(Obj));
+ return ObjRaw;
+ }
+
+ template <class... Args> GIMatchDagEdge *addEdge(Args &&... args) {
+ auto Obj = std::make_unique<GIMatchDagEdge>(std::forward<Args>(args)...);
+ auto ObjRaw = Obj.get();
+ Edges.push_back(std::move(Obj));
+ return ObjRaw;
+ }
+
+ template <class... Args>
+ GIMatchDagPredicateDependencyEdge *addPredicateDependency(Args &&... args) {
+ auto Obj = std::make_unique<GIMatchDagPredicateDependencyEdge>(
+ std::forward<Args>(args)...);
+ auto ObjRaw = Obj.get();
+ PredicateDependencies.push_back(std::move(Obj));
+ return ObjRaw;
+ }
+
+ size_t getInstrNodeIdx(instr_node_iterator I) {
+ return std::distance(instr_nodes_begin(), I);
+ }
+ size_t getInstrNodeIdx(const_instr_node_iterator I) const {
+ return std::distance(instr_nodes_begin(), I);
+ }
+ size_t getNumInstrNodes() const { return InstrNodes.size(); }
+ size_t getNumEdges() const { return Edges.size(); }
+ size_t getNumPredicates() const { return PredicateNodes.size(); }
+
+ void setHasPostMatchPredicate(bool V) { HasPostMatchPredicate = V; }
+ bool hasPostMatchPredicate() const { return HasPostMatchPredicate; }
+
+ void addMatchRoot(GIMatchDagInstr *N) { MatchRoots.push_back(N); }
+
+ LLVM_DUMP_METHOD void print(raw_ostream &OS) const;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ LLVM_DUMP_METHOD void dump() const { print(errs()); }
+#endif // if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+
+ void writeDOTGraph(raw_ostream &OS, StringRef ID) const;
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const GIMatchDag &G);
+
+} // end namespace llvm
+
+#endif // ifndef LLVM_UTILS_TABLEGEN_GIMATCHDAG_H
diff --git a/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagEdge.cpp b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagEdge.cpp
new file mode 100644
index 0000000000..796479467d
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagEdge.cpp
@@ -0,0 +1,39 @@
+//===- GIMatchDagEdge.cpp - An edge describing a def/use lookup -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "GIMatchDagEdge.h"
+#include "GIMatchDagInstr.h"
+#include "GIMatchDagOperands.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+LLVM_DUMP_METHOD void GIMatchDagEdge::print(raw_ostream &OS) const {
+ OS << getFromMI()->getName() << "[" << getFromMO()->getName() << "] --["
+ << Name << "]--> " << getToMI()->getName() << "[" << getToMO()->getName()
+ << "]";
+}
+
+bool GIMatchDagEdge::isDefToUse() const {
+ // Def -> Def is invalid so we only need to check FromMO.
+ return FromMO->isDef();
+}
+
+void GIMatchDagEdge::reverse() {
+ std::swap(FromMI, ToMI);
+ std::swap(FromMO, ToMO);
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void GIMatchDagEdge::dump() const { print(errs()); }
+#endif // if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+
+raw_ostream &llvm::operator<<(raw_ostream &OS, const GIMatchDagEdge &E) {
+ E.print(OS);
+ return OS;
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagEdge.h b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagEdge.h
new file mode 100644
index 0000000000..8e845ff0a5
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagEdge.h
@@ -0,0 +1,70 @@
+//===- GIMatchDagEdge.h - Represent a shared operand list for nodes -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_GIMATCHDAGEDGE_H
+#define LLVM_UTILS_TABLEGEN_GIMATCHDAGEDGE_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+class raw_ostream;
+class GIMatchDagInstr;
+class GIMatchDagOperand;
+
+/// Represents an edge that connects two instructions together via a pair of
+/// operands. For example:
+/// %a = FOO ...
+/// %0 = BAR %a
+/// %1 = BAZ %a
+/// would have two edges for %a like so:
+/// BAR:Op#1 --[a]----> Op#0:FOO
+/// ^
+/// BAZ:Op#1 --[a]------/
+/// Ideally, all edges in the DAG are from a use to a def as this is a many
+/// to one edge but edges from defs to uses are supported too.
+class GIMatchDagEdge {
+ /// The name of the edge. For example,
+ /// (FOO $a, $b, $c)
+ /// (BAR $d, $e, $a)
+ /// will create an edge named 'a' to connect FOO to BAR. Although the name
+ /// refers to the edge, the canonical value of 'a' is the operand that defines
+ /// it.
+ StringRef Name;
+ const GIMatchDagInstr *FromMI;
+ const GIMatchDagOperand *FromMO;
+ const GIMatchDagInstr *ToMI;
+ const GIMatchDagOperand *ToMO;
+
+public:
+ GIMatchDagEdge(StringRef Name, const GIMatchDagInstr *FromMI, const GIMatchDagOperand *FromMO,
+ const GIMatchDagInstr *ToMI, const GIMatchDagOperand *ToMO)
+ : Name(Name), FromMI(FromMI), FromMO(FromMO), ToMI(ToMI), ToMO(ToMO) {}
+
+ StringRef getName() const { return Name; }
+ const GIMatchDagInstr *getFromMI() const { return FromMI; }
+ const GIMatchDagOperand *getFromMO() const { return FromMO; }
+ const GIMatchDagInstr *getToMI() const { return ToMI; }
+ const GIMatchDagOperand *getToMO() const { return ToMO; }
+
+ /// Flip the direction of the edge.
+ void reverse();
+
+ /// Does this edge run from a def to (one of many) uses?
+ bool isDefToUse() const;
+
+ LLVM_DUMP_METHOD void print(raw_ostream &OS) const;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ LLVM_DUMP_METHOD void dump() const;
+#endif // if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const GIMatchDagEdge &E);
+
+} // end namespace llvm
+#endif // ifndef LLVM_UTILS_TABLEGEN_GIMATCHDAGEDGE_H
diff --git a/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagInstr.cpp b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagInstr.cpp
new file mode 100644
index 0000000000..ad9fbea8f8
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagInstr.cpp
@@ -0,0 +1,48 @@
+//===- GIMatchDagInstr.cpp - A shared operand list for nodes --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "GIMatchDagInstr.h"
+#include "../CodeGenInstruction.h"
+#include "GIMatchDag.h"
+#include "llvm/TableGen/Record.h"
+
+using namespace llvm;
+
+void GIMatchDagInstr::print(raw_ostream &OS) const {
+ OS << "(";
+ if (const auto *Annotation = getOpcodeAnnotation())
+ OS << Annotation->TheDef->getName();
+ else
+ OS << "<unknown>";
+ OS << " ";
+ OperandInfo.print(OS);
+ OS << "):$" << Name;
+ if (!UserAssignedNamesForOperands.empty()) {
+ OS << " // ";
+ SmallVector<std::pair<unsigned, StringRef>, 8> ToPrint;
+ for (const auto &Assignment : UserAssignedNamesForOperands)
+ ToPrint.emplace_back(Assignment.first, Assignment.second);
+ llvm::sort(ToPrint);
+ StringRef Separator = "";
+ for (const auto &Assignment : ToPrint) {
+ OS << Separator << "$" << Assignment.second << "=getOperand("
+ << Assignment.first << ")";
+ Separator = ", ";
+ }
+ }
+}
+
+void GIMatchDagInstr::setMatchRoot() {
+ IsMatchRoot = true;
+ Dag.addMatchRoot(this);
+}
+
+raw_ostream &llvm::operator<<(raw_ostream &OS, const GIMatchDagInstr &N) {
+ N.print(OS);
+ return OS;
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagInstr.h b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagInstr.h
new file mode 100644
index 0000000000..5e60448b30
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagInstr.h
@@ -0,0 +1,118 @@
+//===- GIMatchDagInstr.h - Represent a instruction to be matched ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_GIMATCHDAGINSTR_H
+#define LLVM_UTILS_TABLEGEN_GIMATCHDAGINSTR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+class CodeGenInstruction;
+class GIMatchDag;
+class GIMatchDagOperandList;
+
+/// Represents an instruction in the match DAG. This object knows very little
+/// about the actual instruction to be matched as the bulk of that is in
+/// predicates that are associated with the match DAG. It merely knows the names
+/// and indices of any operands that need to be matched in order to allow edges
+/// to link to them.
+///
+/// Instances of this class objects are owned by the GIMatchDag and are not
+/// shareable between instances of GIMatchDag. This is because the Name,
+/// IsMatchRoot, and OpcodeAnnotation are likely to differ between GIMatchDag
+/// instances.
+class GIMatchDagInstr {
+public:
+ using const_user_assigned_operand_names_iterator =
+ DenseMap<unsigned, StringRef>::const_iterator;
+
+protected:
+ /// The match DAG this instruction belongs to.
+ GIMatchDag &Dag;
+
+ /// The name of the instruction in the pattern. For example:
+ /// (FOO $a, $b, $c):$name
+ /// will cause name to be assigned to this member. Anonymous instructions will
+ /// have a name assigned for debugging purposes.
+ StringRef Name;
+
+ /// The name of the instruction in the pattern as assigned by the user. For
+ /// example:
+ /// (FOO $a, $b, $c):$name
+ /// will cause name to be assigned to this member. If a name is not provided,
+ /// this will be empty. This name is used to bind variables from rules to the
+ /// matched instruction.
+ StringRef UserAssignedName;
+
+ /// The name of each operand (if any) that was assigned by the user. For
+ /// example:
+ /// (FOO $a, $b, $c):$name
+ /// will cause {0, "a"}, {1, "b"}, {2, "c} to be inserted into this map.
+ DenseMap<unsigned, StringRef> UserAssignedNamesForOperands;
+
+ /// The operand list for this instruction. This object may be shared with
+ /// other instructions of a similar 'shape'.
+ const GIMatchDagOperandList &OperandInfo;
+
+ /// For debugging purposes, it's helpful to have access to a description of
+ /// the Opcode. However, this object shouldn't use it for more than debugging
+ /// output since predicates are expected to be handled outside the DAG.
+ CodeGenInstruction *OpcodeAnnotation = nullptr;
+
+ /// When true, this instruction will be a starting point for a match attempt.
+ bool IsMatchRoot = false;
+
+public:
+ GIMatchDagInstr(GIMatchDag &Dag, StringRef Name, StringRef UserAssignedName,
+ const GIMatchDagOperandList &OperandInfo)
+ : Dag(Dag), Name(Name), UserAssignedName(UserAssignedName),
+ OperandInfo(OperandInfo) {}
+
+ const GIMatchDagOperandList &getOperandInfo() const { return OperandInfo; }
+ StringRef getName() const { return Name; }
+ StringRef getUserAssignedName() const { return UserAssignedName; }
+ void assignNameToOperand(unsigned Idx, StringRef Name) {
+ assert(UserAssignedNamesForOperands[Idx].empty() && "Cannot assign twice");
+ UserAssignedNamesForOperands[Idx] = Name;
+ }
+
+ const_user_assigned_operand_names_iterator
+ user_assigned_operand_names_begin() const {
+ return UserAssignedNamesForOperands.begin();
+ }
+ const_user_assigned_operand_names_iterator
+ user_assigned_operand_names_end() const {
+ return UserAssignedNamesForOperands.end();
+ }
+ iterator_range<const_user_assigned_operand_names_iterator>
+ user_assigned_operand_names() const {
+ return make_range(user_assigned_operand_names_begin(),
+ user_assigned_operand_names_end());
+ }
+
+ /// Mark this instruction as being a root of the match. This means that the
+ /// matcher will start from this node when attempting to match MIR.
+ void setMatchRoot();
+ bool isMatchRoot() const { return IsMatchRoot; }
+
+ void setOpcodeAnnotation(CodeGenInstruction *I) { OpcodeAnnotation = I; }
+ CodeGenInstruction *getOpcodeAnnotation() const { return OpcodeAnnotation; }
+
+ void print(raw_ostream &OS) const;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ LLVM_DUMP_METHOD void dump() const { print(errs()); }
+#endif // if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const GIMatchDagInstr &N);
+
+} // end namespace llvm
+#endif // ifndef LLVM_UTILS_TABLEGEN_GIMATCHDAGINSTR_H
diff --git a/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagOperands.cpp b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagOperands.cpp
new file mode 100644
index 0000000000..e79e4686b9
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagOperands.cpp
@@ -0,0 +1,153 @@
+//===- GIMatchDagOperands.cpp - A shared operand list for nodes -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "GIMatchDagOperands.h"
+
+#include "../CodeGenInstruction.h"
+
+using namespace llvm;
+
+void GIMatchDagOperand::Profile(FoldingSetNodeID &ID) const {
+ Profile(ID, Idx, Name, IsDef);
+}
+
+void GIMatchDagOperand::Profile(FoldingSetNodeID &ID, size_t Idx,
+ StringRef Name, bool IsDef) {
+ ID.AddInteger(Idx);
+ ID.AddString(Name);
+ ID.AddBoolean(IsDef);
+}
+
+void GIMatchDagOperandList::add(StringRef Name, unsigned Idx, bool IsDef) {
+ assert(Idx == Operands.size() && "Operands added in wrong order");
+ Operands.emplace_back(Operands.size(), Name, IsDef);
+ OperandsByName.try_emplace(Operands.back().getName(), Operands.size() - 1);
+}
+
+void GIMatchDagOperandList::Profile(FoldingSetNodeID &ID) const {
+ for (const auto &I : enumerate(Operands))
+ GIMatchDagOperand::Profile(ID, I.index(), I.value().getName(),
+ I.value().isDef());
+}
+
+void GIMatchDagOperandList::print(raw_ostream &OS) const {
+ if (Operands.empty()) {
+ OS << "<empty>";
+ return;
+ }
+ StringRef Separator = "";
+ for (const auto &I : Operands) {
+ OS << Separator << I.getIdx() << ":" << I.getName();
+ if (I.isDef())
+ OS << "<def>";
+ Separator = ", ";
+ }
+}
+
+const GIMatchDagOperandList::value_type &GIMatchDagOperandList::
+operator[](StringRef K) const {
+ const auto &I = OperandsByName.find(K);
+ assert(I != OperandsByName.end() && "Operand not found by name");
+ return Operands[I->second];
+}
+
+const GIMatchDagOperandList &
+GIMatchDagOperandListContext::makeEmptyOperandList() {
+ FoldingSetNodeID ID;
+
+ void *InsertPoint;
+ GIMatchDagOperandList *Value =
+ OperandLists.FindNodeOrInsertPos(ID, InsertPoint);
+ if (Value)
+ return *Value;
+
+ std::unique_ptr<GIMatchDagOperandList> NewValue =
+ std::make_unique<GIMatchDagOperandList>();
+ OperandLists.InsertNode(NewValue.get(), InsertPoint);
+ OperandListsOwner.push_back(std::move(NewValue));
+ return *OperandListsOwner.back().get();
+}
+
+const GIMatchDagOperandList &
+GIMatchDagOperandListContext::makeOperandList(const CodeGenInstruction &I) {
+ FoldingSetNodeID ID;
+ for (unsigned i = 0; i < I.Operands.size(); ++i)
+ GIMatchDagOperand::Profile(ID, i, I.Operands[i].Name,
+ i < I.Operands.NumDefs);
+
+ void *InsertPoint;
+ GIMatchDagOperandList *Value =
+ OperandLists.FindNodeOrInsertPos(ID, InsertPoint);
+ if (Value)
+ return *Value;
+
+ std::unique_ptr<GIMatchDagOperandList> NewValue =
+ std::make_unique<GIMatchDagOperandList>();
+ for (unsigned i = 0; i < I.Operands.size(); ++i)
+ NewValue->add(I.Operands[i].Name, i, i < I.Operands.NumDefs);
+ OperandLists.InsertNode(NewValue.get(), InsertPoint);
+ OperandListsOwner.push_back(std::move(NewValue));
+ return *OperandListsOwner.back().get();
+}
+
+const GIMatchDagOperandList &
+GIMatchDagOperandListContext::makeMIPredicateOperandList() {
+ FoldingSetNodeID ID;
+ GIMatchDagOperand::Profile(ID, 0, "$", true);
+ GIMatchDagOperand::Profile(ID, 1, "mi", false);
+
+ void *InsertPoint;
+ GIMatchDagOperandList *Value =
+ OperandLists.FindNodeOrInsertPos(ID, InsertPoint);
+ if (Value)
+ return *Value;
+
+ std::unique_ptr<GIMatchDagOperandList> NewValue =
+ std::make_unique<GIMatchDagOperandList>();
+ NewValue->add("$", 0, true);
+ NewValue->add("mi", 1, false);
+ OperandLists.InsertNode(NewValue.get(), InsertPoint);
+ OperandListsOwner.push_back(std::move(NewValue));
+ return *OperandListsOwner.back().get();
+}
+
+
+const GIMatchDagOperandList &
+GIMatchDagOperandListContext::makeTwoMOPredicateOperandList() {
+ FoldingSetNodeID ID;
+ GIMatchDagOperand::Profile(ID, 0, "$", true);
+ GIMatchDagOperand::Profile(ID, 1, "mi0", false);
+ GIMatchDagOperand::Profile(ID, 2, "mi1", false);
+
+ void *InsertPoint;
+ GIMatchDagOperandList *Value =
+ OperandLists.FindNodeOrInsertPos(ID, InsertPoint);
+ if (Value)
+ return *Value;
+
+ std::unique_ptr<GIMatchDagOperandList> NewValue =
+ std::make_unique<GIMatchDagOperandList>();
+ NewValue->add("$", 0, true);
+ NewValue->add("mi0", 1, false);
+ NewValue->add("mi1", 2, false);
+ OperandLists.InsertNode(NewValue.get(), InsertPoint);
+ OperandListsOwner.push_back(std::move(NewValue));
+ return *OperandListsOwner.back().get();
+}
+
+void GIMatchDagOperandListContext::print(raw_ostream &OS) const {
+ OS << "GIMatchDagOperandListContext {\n"
+ << " OperandLists {\n";
+ for (const auto &I : OperandListsOwner) {
+ OS << " ";
+ I->print(OS);
+ OS << "\n";
+ }
+ OS << " }\n"
+ << "}\n";
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagOperands.h b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagOperands.h
new file mode 100644
index 0000000000..c2d3057423
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagOperands.h
@@ -0,0 +1,133 @@
+//===- GIMatchDagOperands.h - Represent a shared operand list for nodes ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_GIMATCHDAGOPERANDS_H
+#define LLVM_UTILS_TABLEGEN_GIMATCHDAGOPERANDS_H
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <vector>
+
+namespace llvm {
+class CodeGenInstruction;
+/// Describes an operand of a MachineInstr w.r.t the DAG Matching. This
+/// information is derived from CodeGenInstruction::Operands but is more
+/// readily available for context-less access as we don't need to know which
+/// instruction it's used with or know how many defs that instruction had.
+///
+/// There may be multiple GIMatchDagOperand's with the same contents. However,
+/// they are uniqued within the set of instructions that have the same overall
+/// operand list. For example, given:
+/// Inst1 operands ($dst:<def>, $src1, $src2)
+/// Inst2 operands ($dst:<def>, $src1, $src2)
+/// Inst3 operands ($dst:<def>, $src)
+/// $src1 will have a single instance of GIMatchDagOperand shared by Inst1 and
+/// Inst2, as will $src2. $dst however, will have two instances one shared
+/// between Inst1 and Inst2 and one unique to Inst3. We could potentially
+/// fully de-dupe the GIMatchDagOperand instances but the saving is not expected
+/// to be worth the overhead.
+///
+/// The result of this is that the address of the object can be relied upon to
+/// trivially identify commonality between two instructions which will be useful
+/// when generating the matcher. When the pointers differ, the contents can be
+/// inspected instead.
+class GIMatchDagOperand {
+ unsigned Idx;
+ StringRef Name;
+ bool IsDef;
+
+public:
+ GIMatchDagOperand(unsigned Idx, StringRef Name, bool IsDef)
+ : Idx(Idx), Name(Name), IsDef(IsDef) {}
+
+ unsigned getIdx() const { return Idx; }
+ StringRef getName() const { return Name; }
+ bool isDef() const { return IsDef; }
+
+ /// This object isn't a FoldingSetNode but it's part of one. See FoldingSet
+ /// for details on the Profile function.
+ void Profile(FoldingSetNodeID &ID) const;
+
+ /// A helper that behaves like Profile() but is also usable without the object.
+ /// We use size_t here to match enumerate<...>::index(). If we don't match
+ /// that the hashes won't be equal.
+ static void Profile(FoldingSetNodeID &ID, size_t Idx, StringRef Name,
+ bool IsDef);
+};
+
+/// A list of GIMatchDagOperands for an instruction without any association with
+/// a particular instruction.
+///
+/// An important detail to be aware of with this class is that they are shared
+/// with other instructions of a similar 'shape'. For example, all the binary
+/// instructions are likely to share a single GIMatchDagOperandList. This is
+/// primarily a memory optimization as it's fairly common to have a large number
+/// of instructions but only a few 'shapes'.
+///
+/// See GIMatchDagOperandList::Profile() for the details on how they are folded.
+class GIMatchDagOperandList : public FoldingSetNode {
+public:
+ using value_type = GIMatchDagOperand;
+
+protected:
+ using vector_type = SmallVector<GIMatchDagOperand, 3>;
+
+public:
+ using iterator = vector_type::iterator;
+ using const_iterator = vector_type::const_iterator;
+
+protected:
+ vector_type Operands;
+ StringMap<unsigned> OperandsByName;
+
+public:
+ void add(StringRef Name, unsigned Idx, bool IsDef);
+
+ /// See FoldingSet for details.
+ void Profile(FoldingSetNodeID &ID) const;
+
+ iterator begin() { return Operands.begin(); }
+ const_iterator begin() const { return Operands.begin(); }
+ iterator end() { return Operands.end(); }
+ const_iterator end() const { return Operands.end(); }
+
+ const value_type &operator[](unsigned I) const { return Operands[I]; }
+ const value_type &operator[](StringRef K) const;
+
+ void print(raw_ostream &OS) const;
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ LLVM_DUMP_METHOD void dump() const { print(errs()); }
+#endif // if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+};
+
+/// This is the portion of GIMatchDagContext that directly relates to
+/// GIMatchDagOperandList and GIMatchDagOperandList.
+class GIMatchDagOperandListContext {
+ FoldingSet<GIMatchDagOperandList> OperandLists;
+ std::vector<std::unique_ptr<GIMatchDagOperandList>> OperandListsOwner;
+
+public:
+ const GIMatchDagOperandList &makeEmptyOperandList();
+ const GIMatchDagOperandList &makeOperandList(const CodeGenInstruction &I);
+ const GIMatchDagOperandList &makeMIPredicateOperandList();
+ const GIMatchDagOperandList &makeTwoMOPredicateOperandList();
+
+ void print(raw_ostream &OS) const;
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ LLVM_DUMP_METHOD void dump() const { print(errs()); }
+#endif // if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+};
+
+} // end namespace llvm
+#endif // ifndef LLVM_UTILS_TABLEGEN_GIMATCHDAGOPERANDS_H
diff --git a/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagPredicate.cpp b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagPredicate.cpp
new file mode 100644
index 0000000000..6a9e33ac51
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagPredicate.cpp
@@ -0,0 +1,69 @@
+//===- GIMatchDagPredicate.cpp - Represent a predicate to check -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "GIMatchDagPredicate.h"
+
+#include "llvm/TableGen/Record.h"
+
+#include "../CodeGenInstruction.h"
+#include "GIMatchDag.h"
+
+using namespace llvm;
+
+void GIMatchDagPredicate::print(raw_ostream &OS) const {
+ OS << "<<";
+ printDescription(OS);
+ OS << ">>:$" << Name;
+}
+
+void GIMatchDagPredicate::printDescription(raw_ostream &OS) const { OS << ""; }
+
+GIMatchDagOpcodePredicate::GIMatchDagOpcodePredicate(
+ GIMatchDagContext &Ctx, StringRef Name, const CodeGenInstruction &Instr)
+ : GIMatchDagPredicate(GIMatchDagPredicateKind_Opcode, Name,
+ Ctx.makeMIPredicateOperandList()),
+ Instr(Instr) {}
+
+void GIMatchDagOpcodePredicate::printDescription(raw_ostream &OS) const {
+ OS << "$mi.getOpcode() == " << Instr.TheDef->getName();
+}
+
+GIMatchDagOneOfOpcodesPredicate::GIMatchDagOneOfOpcodesPredicate(
+ GIMatchDagContext &Ctx, StringRef Name)
+ : GIMatchDagPredicate(GIMatchDagPredicateKind_OneOfOpcodes, Name,
+ Ctx.makeMIPredicateOperandList()) {}
+
+void GIMatchDagOneOfOpcodesPredicate::printDescription(raw_ostream &OS) const {
+ OS << "$mi.getOpcode() == oneof(";
+ StringRef Separator = "";
+ for (const CodeGenInstruction *Instr : Instrs) {
+ OS << Separator << Instr->TheDef->getName();
+ Separator = ",";
+ }
+ OS << ")";
+}
+
+GIMatchDagSameMOPredicate::GIMatchDagSameMOPredicate(GIMatchDagContext &Ctx,
+ StringRef Name)
+ : GIMatchDagPredicate(GIMatchDagPredicateKind_SameMO, Name,
+ Ctx.makeTwoMOPredicateOperandList()) {}
+
+void GIMatchDagSameMOPredicate::printDescription(raw_ostream &OS) const {
+ OS << "$mi0 == $mi1";
+}
+
+raw_ostream &llvm::operator<<(raw_ostream &OS, const GIMatchDagPredicate &N) {
+ N.print(OS);
+ return OS;
+}
+
+raw_ostream &llvm::operator<<(raw_ostream &OS,
+ const GIMatchDagOpcodePredicate &N) {
+ N.print(OS);
+ return OS;
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagPredicate.h b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagPredicate.h
new file mode 100644
index 0000000000..96fef21b76
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagPredicate.h
@@ -0,0 +1,145 @@
+//===- GIMatchDagPredicate - Represent a predicate to check ---------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_GIMATCHDAGPREDICATE_H
+#define LLVM_UTILS_TABLEGEN_GIMATCHDAGPREDICATE_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+#include "llvm/Support/raw_ostream.h"
+#endif
+
+namespace llvm {
+class CodeExpansions;
+class CodeGenInstruction;
+class GIMatchDagOperandList;
+class GIMatchDagContext;
+class raw_ostream;
+
+/// Represents a predicate on the match DAG. This records the details of the
+/// predicate. The dependencies are stored in the GIMatchDag as edges.
+///
+/// Instances of this class objects are owned by the GIMatchDag and are not
+/// shareable between instances of GIMatchDag.
+class GIMatchDagPredicate {
+public:
+ enum GIMatchDagPredicateKind {
+ GIMatchDagPredicateKind_Opcode,
+ GIMatchDagPredicateKind_OneOfOpcodes,
+ GIMatchDagPredicateKind_SameMO,
+ };
+
+protected:
+ const GIMatchDagPredicateKind Kind;
+
+ /// The name of the predicate. For example:
+ /// (FOO $a:s32, $b, $c)
+ /// will cause 's32' to be assigned to this member for the $a predicate.
+ /// Similarly, the opcode predicate will cause 'FOO' to be assigned to this
+ /// member. Anonymous instructions will have a name assigned for debugging
+ /// purposes.
+ StringRef Name;
+
+ /// The operand list for this predicate. This object may be shared with
+ /// other predicates of a similar 'shape'.
+ const GIMatchDagOperandList &OperandInfo;
+
+public:
+ GIMatchDagPredicate(GIMatchDagPredicateKind Kind, StringRef Name,
+ const GIMatchDagOperandList &OperandInfo)
+ : Kind(Kind), Name(Name), OperandInfo(OperandInfo) {}
+ virtual ~GIMatchDagPredicate() {}
+
+ GIMatchDagPredicateKind getKind() const { return Kind; }
+
+ StringRef getName() const { return Name; }
+ const GIMatchDagOperandList &getOperandInfo() const { return OperandInfo; }
+
+ // Generate C++ code to check this predicate. If a partitioner has already
+ // tested this predicate then this function won't be called. If this function
+ // is called, it must emit code and return true to indicate that it did so. If
+ // it ever returns false, then the caller will abort due to an untested
+ // predicate.
+ virtual bool generateCheckCode(raw_ostream &OS, StringRef Indent,
+ const CodeExpansions &Expansions) const {
+ return false;
+ }
+
+ virtual void print(raw_ostream &OS) const;
+ virtual void printDescription(raw_ostream &OS) const;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ virtual LLVM_DUMP_METHOD void dump() const { print(errs()); }
+#endif // if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+};
+
+class GIMatchDagOpcodePredicate : public GIMatchDagPredicate {
+ const CodeGenInstruction &Instr;
+
+public:
+ GIMatchDagOpcodePredicate(GIMatchDagContext &Ctx, StringRef Name,
+ const CodeGenInstruction &Instr);
+
+ static bool classof(const GIMatchDagPredicate *P) {
+ return P->getKind() == GIMatchDagPredicateKind_Opcode;
+ }
+
+ const CodeGenInstruction *getInstr() const { return &Instr; }
+
+ void printDescription(raw_ostream &OS) const override;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ LLVM_DUMP_METHOD void dump() const override { print(errs()); }
+#endif // if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+};
+
+class GIMatchDagOneOfOpcodesPredicate : public GIMatchDagPredicate {
+ SmallVector<const CodeGenInstruction *, 4> Instrs;
+
+public:
+ GIMatchDagOneOfOpcodesPredicate(GIMatchDagContext &Ctx, StringRef Name);
+
+ void addOpcode(const CodeGenInstruction *Instr) { Instrs.push_back(Instr); }
+
+ static bool classof(const GIMatchDagPredicate *P) {
+ return P->getKind() == GIMatchDagPredicateKind_OneOfOpcodes;
+ }
+
+ const SmallVectorImpl<const CodeGenInstruction *> &getInstrs() const {
+ return Instrs;
+ }
+
+ void printDescription(raw_ostream &OS) const override;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ LLVM_DUMP_METHOD void dump() const override { print(errs()); }
+#endif // if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+};
+
+class GIMatchDagSameMOPredicate : public GIMatchDagPredicate {
+public:
+ GIMatchDagSameMOPredicate(GIMatchDagContext &Ctx, StringRef Name);
+
+ static bool classof(const GIMatchDagPredicate *P) {
+ return P->getKind() == GIMatchDagPredicateKind_SameMO;
+ }
+
+ void printDescription(raw_ostream &OS) const override;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ LLVM_DUMP_METHOD void dump() const override { print(errs()); }
+#endif // if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const GIMatchDagPredicate &N);
+raw_ostream &operator<<(raw_ostream &OS, const GIMatchDagOpcodePredicate &N);
+
+} // end namespace llvm
+#endif // ifndef LLVM_UTILS_TABLEGEN_GIMATCHDAGPREDICATE_H
diff --git a/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagPredicateDependencyEdge.cpp b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagPredicateDependencyEdge.cpp
new file mode 100644
index 0000000000..921cbaf9c4
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagPredicateDependencyEdge.cpp
@@ -0,0 +1,38 @@
+//===- GIMatchDagPredicateDependencyEdge.cpp - Have inputs before check ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "GIMatchDagPredicateDependencyEdge.h"
+
+#include "GIMatchDagInstr.h"
+#include "GIMatchDagOperands.h"
+#include "GIMatchDagPredicate.h"
+
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+LLVM_DUMP_METHOD void
+GIMatchDagPredicateDependencyEdge::print(raw_ostream &OS) const {
+ OS << getRequiredMI()->getName();
+ if (getRequiredMO())
+ OS << "[" << getRequiredMO()->getName() << "]";
+ OS << " ==> " << getPredicate()->getName() << "["
+ << getPredicateOp()->getName() << "]";
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void GIMatchDagPredicateDependencyEdge::dump() const {
+ print(errs());
+}
+#endif // if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+
+raw_ostream &llvm::operator<<(raw_ostream &OS,
+ const GIMatchDagPredicateDependencyEdge &E) {
+ E.print(OS);
+ return OS;
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagPredicateDependencyEdge.h b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagPredicateDependencyEdge.h
new file mode 100644
index 0000000000..af91afc607
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchDagPredicateDependencyEdge.h
@@ -0,0 +1,61 @@
+//===- GIMatchDagPredicateDependencyEdge - Ensure predicates have inputs --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_GIMATCHDAGPREDICATEEDGE_H
+#define LLVM_UTILS_TABLEGEN_GIMATCHDAGPREDICATEEDGE_H
+
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+class GIMatchDagInstr;
+class GIMatchDagPredicate;
+class GIMatchDagOperand;
+
+class raw_ostream;
+
+/// Represents a dependency that must be met to evaluate a predicate.
+///
+/// Instances of this class objects are owned by the GIMatchDag and are not
+/// shareable between instances of GIMatchDag.
+class GIMatchDagPredicateDependencyEdge {
+ /// The MI that must be available in order to test the predicate.
+ const GIMatchDagInstr *RequiredMI;
+ /// The MO that must be available in order to test the predicate. May be
+ /// nullptr when only the MI is required.
+ const GIMatchDagOperand *RequiredMO;
+ /// The Predicate that requires information from RequiredMI/RequiredMO.
+ const GIMatchDagPredicate *Predicate;
+ /// The Predicate operand that requires information from
+ /// RequiredMI/RequiredMO.
+ const GIMatchDagOperand *PredicateOp;
+
+public:
+ GIMatchDagPredicateDependencyEdge(const GIMatchDagInstr *RequiredMI,
+ const GIMatchDagOperand *RequiredMO,
+ const GIMatchDagPredicate *Predicate,
+ const GIMatchDagOperand *PredicateOp)
+ : RequiredMI(RequiredMI), RequiredMO(RequiredMO), Predicate(Predicate),
+ PredicateOp(PredicateOp) {}
+
+ const GIMatchDagInstr *getRequiredMI() const { return RequiredMI; }
+ const GIMatchDagOperand *getRequiredMO() const { return RequiredMO; }
+ const GIMatchDagPredicate *getPredicate() const { return Predicate; }
+ const GIMatchDagOperand *getPredicateOp() const { return PredicateOp; }
+
+ void print(raw_ostream &OS) const;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ LLVM_DUMP_METHOD void dump() const;
+#endif // if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+};
+
+raw_ostream &operator<<(raw_ostream &OS,
+ const GIMatchDagPredicateDependencyEdge &N);
+
+} // end namespace llvm
+#endif // ifndef LLVM_UTILS_TABLEGEN_GIMATCHDAGPREDICATEEDGE_H
diff --git a/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchTree.cpp b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchTree.cpp
new file mode 100644
index 0000000000..d98884493e
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchTree.cpp
@@ -0,0 +1,786 @@
+//===- GIMatchTree.cpp - A decision tree to match GIMatchDag's ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "GIMatchTree.h"
+#include "GIMatchDagPredicate.h"
+
+#include "../CodeGenInstruction.h"
+
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/ScopedPrinter.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+
+#define DEBUG_TYPE "gimatchtree"
+
+using namespace llvm;
+
+void GIMatchTree::writeDOTGraph(raw_ostream &OS) const {
+ OS << "digraph \"matchtree\" {\n";
+ writeDOTGraphNode(OS);
+ OS << "}\n";
+}
+
+void GIMatchTree::writeDOTGraphNode(raw_ostream &OS) const {
+ OS << format(" Node%p", this) << " [shape=record,label=\"{";
+ if (Partitioner) {
+ Partitioner->emitDescription(OS);
+ OS << "|" << Partitioner->getNumPartitions() << " partitions|";
+ } else
+ OS << "No partitioner|";
+ bool IsFullyTraversed = true;
+ bool IsFullyTested = true;
+ StringRef Separator = "";
+ for (const auto &Leaf : PossibleLeaves) {
+ OS << Separator << Leaf.getName();
+ Separator = ",";
+ if (!Leaf.isFullyTraversed())
+ IsFullyTraversed = false;
+ if (!Leaf.isFullyTested())
+ IsFullyTested = false;
+ }
+ if (!Partitioner && !IsFullyTraversed)
+ OS << "|Not fully traversed";
+ if (!Partitioner && !IsFullyTested) {
+ OS << "|Not fully tested";
+ if (IsFullyTraversed) {
+ for (const GIMatchTreeLeafInfo &Leaf : PossibleLeaves) {
+ if (Leaf.isFullyTested())
+ continue;
+ OS << "\\n" << Leaf.getName() << ": " << &Leaf;
+ for (const GIMatchDagPredicate *P : Leaf.untested_predicates())
+ OS << *P;
+ }
+ }
+ }
+ OS << "}\"";
+ if (!Partitioner &&
+ (!IsFullyTraversed || !IsFullyTested || PossibleLeaves.size() > 1))
+ OS << ",color=red";
+ OS << "]\n";
+ for (const auto &C : Children)
+ C.writeDOTGraphNode(OS);
+ writeDOTGraphEdges(OS);
+}
+
+void GIMatchTree::writeDOTGraphEdges(raw_ostream &OS) const {
+ for (const auto &Child : enumerate(Children)) {
+ OS << format(" Node%p", this) << " -> " << format("Node%p", &Child.value())
+ << " [label=\"#" << Child.index() << " ";
+ Partitioner->emitPartitionName(OS, Child.index());
+ OS << "\"]\n";
+ }
+}
+
+GIMatchTreeBuilderLeafInfo::GIMatchTreeBuilderLeafInfo(
+ GIMatchTreeBuilder &Builder, StringRef Name, unsigned RootIdx,
+ const GIMatchDag &MatchDag, void *Data)
+ : Builder(Builder), Info(Name, RootIdx, Data), MatchDag(MatchDag),
+ RemainingInstrNodes(BitVector(MatchDag.getNumInstrNodes(), true)),
+ RemainingEdges(BitVector(MatchDag.getNumEdges(), true)),
+ RemainingPredicates(BitVector(MatchDag.getNumPredicates(), true)),
+ TraversableEdges(MatchDag.getNumEdges()),
+ TestablePredicates(MatchDag.getNumPredicates()) {
+ // Number all the predicates in this DAG
+ for (auto &P : enumerate(MatchDag.predicates())) {
+ PredicateIDs.insert(std::make_pair(P.value(), P.index()));
+ }
+
+ // Number all the predicate dependencies in this DAG and set up a bitvector
+ // for each predicate indicating the unsatisfied dependencies.
+ for (auto &Dep : enumerate(MatchDag.predicate_edges())) {
+ PredicateDepIDs.insert(std::make_pair(Dep.value(), Dep.index()));
+ }
+ UnsatisfiedPredDepsForPred.resize(MatchDag.getNumPredicates(),
+ BitVector(PredicateDepIDs.size()));
+ for (auto &Dep : enumerate(MatchDag.predicate_edges())) {
+ unsigned ID = PredicateIDs.lookup(Dep.value()->getPredicate());
+ UnsatisfiedPredDepsForPred[ID].set(Dep.index());
+ }
+}
+
+void GIMatchTreeBuilderLeafInfo::declareInstr(const GIMatchDagInstr *Instr, unsigned ID) {
+ // Record the assignment of this instr to the given ID.
+ auto InfoI = InstrNodeToInfo.insert(std::make_pair(
+ Instr, GIMatchTreeInstrInfo(ID, Instr)));
+ InstrIDToInfo.insert(std::make_pair(ID, &InfoI.first->second));
+
+ if (Instr == nullptr)
+ return;
+
+ if (!Instr->getUserAssignedName().empty())
+ Info.bindInstrVariable(Instr->getUserAssignedName(), ID);
+ for (const auto &VarBinding : Instr->user_assigned_operand_names())
+ Info.bindOperandVariable(VarBinding.second, ID, VarBinding.first);
+
+ // Clear the bit indicating we haven't visited this instr.
+ const auto &NodeI = find(MatchDag.instr_nodes(), Instr);
+ assert(NodeI != MatchDag.instr_nodes_end() && "Instr isn't in this DAG");
+ unsigned InstrIdx = MatchDag.getInstrNodeIdx(NodeI);
+ RemainingInstrNodes.reset(InstrIdx);
+
+ // When we declare an instruction, we don't expose any traversable edges just
+ // yet. A partitioner has to check they exist and are registers before they
+ // are traversable.
+
+ // When we declare an instruction, we potentially activate some predicates.
+ // Mark the dependencies that are now satisfied as a result of this
+ // instruction and mark any predicates whose dependencies are fully
+ // satisfied.
+ for (auto &Dep : enumerate(MatchDag.predicate_edges())) {
+ if (Dep.value()->getRequiredMI() == Instr &&
+ Dep.value()->getRequiredMO() == nullptr) {
+ for (auto &DepsFor : enumerate(UnsatisfiedPredDepsForPred)) {
+ DepsFor.value().reset(Dep.index());
+ if (DepsFor.value().none())
+ TestablePredicates.set(DepsFor.index());
+ }
+ }
+ }
+}
+
+void GIMatchTreeBuilderLeafInfo::declareOperand(unsigned InstrID,
+ unsigned OpIdx) {
+ const GIMatchDagInstr *Instr = InstrIDToInfo.lookup(InstrID)->getInstrNode();
+
+ OperandIDToInfo.insert(std::make_pair(
+ std::make_pair(InstrID, OpIdx),
+ GIMatchTreeOperandInfo(Instr, OpIdx)));
+
+ // When an operand becomes reachable, we potentially activate some traversals.
+ // Record the edges that can now be followed as a result of this
+ // instruction.
+ for (auto &E : enumerate(MatchDag.edges())) {
+ if (E.value()->getFromMI() == Instr &&
+ E.value()->getFromMO()->getIdx() == OpIdx) {
+ TraversableEdges.set(E.index());
+ }
+ }
+
+ // When an operand becomes reachable, we potentially activate some predicates.
+ // Clear the dependencies that are now satisfied as a result of this
+ // operand and activate any predicates whose dependencies are fully
+ // satisfied.
+ for (auto &Dep : enumerate(MatchDag.predicate_edges())) {
+ if (Dep.value()->getRequiredMI() == Instr && Dep.value()->getRequiredMO() &&
+ Dep.value()->getRequiredMO()->getIdx() == OpIdx) {
+ for (auto &DepsFor : enumerate(UnsatisfiedPredDepsForPred)) {
+ DepsFor.value().reset(Dep.index());
+ if (DepsFor.value().none())
+ TestablePredicates.set(DepsFor.index());
+ }
+ }
+ }
+}
+
+void GIMatchTreeBuilder::addPartitionersForInstr(unsigned InstrIdx) {
+ // Find the partitioners that can be used now that this node is
+ // uncovered. Our choices are:
+ // - Test the opcode
+ addPartitioner(std::make_unique<GIMatchTreeOpcodePartitioner>(InstrIdx));
+}
+
+void GIMatchTreeBuilder::addPartitionersForOperand(unsigned InstrID,
+ unsigned OpIdx) {
+ LLVM_DEBUG(dbgs() << "Add partitioners for Instrs[" << InstrID
+ << "].getOperand(" << OpIdx << ")\n");
+ addPartitioner(
+ std::make_unique<GIMatchTreeVRegDefPartitioner>(InstrID, OpIdx));
+}
+
+void GIMatchTreeBuilder::filterRedundantPartitioners() {
+ // TODO: Filter partitioners for facts that are already known
+ // - If we know the opcode, we can elide the num operand check so long as
+ // the instruction has a fixed number of operands.
+ // - If we know an exact number of operands then we can elide further number
+ // of operand checks.
+ // - If the current min number of operands exceeds the one we want to check
+ // then we can elide it.
+}
+
+void GIMatchTreeBuilder::evaluatePartitioners() {
+ // Determine the partitioning the partitioner would produce
+ for (auto &Partitioner : Partitioners) {
+ LLVM_DEBUG(dbgs() << " Weighing up ";
+ Partitioner->emitDescription(dbgs()); dbgs() << "\n");
+ Partitioner->repartition(Leaves);
+ LLVM_DEBUG(Partitioner->emitPartitionResults(dbgs()));
+ }
+}
+
+void GIMatchTreeBuilder::runStep() {
+ LLVM_DEBUG(dbgs() << "Building match tree node for " << TreeNode << "\n");
+ LLVM_DEBUG(dbgs() << " Rules reachable at this node:\n");
+ for (const auto &Leaf : Leaves) {
+ LLVM_DEBUG(dbgs() << " " << Leaf.getName() << " (" << &Leaf.getInfo() << "\n");
+ TreeNode->addPossibleLeaf(Leaf.getInfo(), Leaf.isFullyTraversed(),
+ Leaf.isFullyTested());
+ }
+
+ LLVM_DEBUG(dbgs() << " Partitioners available at this node:\n");
+#ifndef NDEBUG
+ for (const auto &Partitioner : Partitioners)
+ LLVM_DEBUG(dbgs() << " "; Partitioner->emitDescription(dbgs());
+ dbgs() << "\n");
+#endif // ifndef NDEBUG
+
+ // Check for unreachable rules. Rules are unreachable if they are preceeded by
+ // a fully tested rule.
+ // Note: This is only true for the current algorithm, if we allow the
+ // algorithm to compare equally valid rules then they will become
+ // reachable.
+ {
+ auto FullyTestedLeafI = Leaves.end();
+ for (auto LeafI = Leaves.begin(), LeafE = Leaves.end();
+ LeafI != LeafE; ++LeafI) {
+ if (LeafI->isFullyTraversed() && LeafI->isFullyTested())
+ FullyTestedLeafI = LeafI;
+ else if (FullyTestedLeafI != Leaves.end()) {
+ PrintError("Leaf " + LeafI->getName() + " is unreachable");
+ PrintNote("Leaf " + FullyTestedLeafI->getName() +
+ " will have already matched");
+ }
+ }
+ }
+
+ LLVM_DEBUG(dbgs() << " Eliminating redundant partitioners:\n");
+ filterRedundantPartitioners();
+ LLVM_DEBUG(dbgs() << " Partitioners remaining:\n");
+#ifndef NDEBUG
+ for (const auto &Partitioner : Partitioners)
+ LLVM_DEBUG(dbgs() << " "; Partitioner->emitDescription(dbgs());
+ dbgs() << "\n");
+#endif // ifndef NDEBUG
+
+ if (Partitioners.empty()) {
+ // Nothing left to do but check we really did identify a single rule.
+ if (Leaves.size() > 1) {
+ LLVM_DEBUG(dbgs() << "Leaf contains multiple rules, drop after the first "
+ "fully tested rule\n");
+ auto FirstFullyTested =
+ llvm::find_if(Leaves, [](const GIMatchTreeBuilderLeafInfo &X) {
+ return X.isFullyTraversed() && X.isFullyTested() &&
+ !X.getMatchDag().hasPostMatchPredicate();
+ });
+ if (FirstFullyTested != Leaves.end())
+ FirstFullyTested++;
+
+#ifndef NDEBUG
+ for (auto &Leaf : make_range(Leaves.begin(), FirstFullyTested))
+ LLVM_DEBUG(dbgs() << " Kept " << Leaf.getName() << "\n");
+ for (const auto &Leaf : make_range(FirstFullyTested, Leaves.end()))
+ LLVM_DEBUG(dbgs() << " Dropped " << Leaf.getName() << "\n");
+#endif // ifndef NDEBUG
+ TreeNode->dropLeavesAfter(
+ std::distance(Leaves.begin(), FirstFullyTested));
+ }
+ for (const auto &Leaf : Leaves) {
+ if (!Leaf.isFullyTraversed()) {
+ PrintError("Leaf " + Leaf.getName() + " is not fully traversed");
+ PrintNote("This indicates a missing partitioner within tblgen");
+ Leaf.dump(errs());
+ for (unsigned InstrIdx : Leaf.untested_instrs())
+ PrintNote("Instr " + llvm::to_string(*Leaf.getInstr(InstrIdx)));
+ for (unsigned EdgeIdx : Leaf.untested_edges())
+ PrintNote("Edge " + llvm::to_string(*Leaf.getEdge(EdgeIdx)));
+ }
+ }
+
+ // Copy out information about untested predicates so the user of the tree
+ // can deal with them.
+ for (auto LeafPair : zip(Leaves, TreeNode->possible_leaves())) {
+ const GIMatchTreeBuilderLeafInfo &BuilderLeaf = std::get<0>(LeafPair);
+ GIMatchTreeLeafInfo &TreeLeaf = std::get<1>(LeafPair);
+ if (!BuilderLeaf.isFullyTested())
+ for (unsigned PredicateIdx : BuilderLeaf.untested_predicates())
+ TreeLeaf.addUntestedPredicate(BuilderLeaf.getPredicate(PredicateIdx));
+ }
+ return;
+ }
+
+ LLVM_DEBUG(dbgs() << " Weighing up partitioners:\n");
+ evaluatePartitioners();
+
+ // Select the best partitioner by its ability to partition
+ // - Prefer partitioners that don't distinguish between partitions. This
+ // is to fail early on decisions that must go a single way.
+ auto PartitionerI = std::max_element(
+ Partitioners.begin(), Partitioners.end(),
+ [](const std::unique_ptr<GIMatchTreePartitioner> &A,
+ const std::unique_ptr<GIMatchTreePartitioner> &B) {
+ // We generally want partitioners that subdivide the
+ // ruleset as much as possible since these take fewer
+ // checks to converge on a particular rule. However,
+ // it's important to note that one leaf can end up in
+ // multiple partitions if the check isn't mutually
+ // exclusive (e.g. getVRegDef() vs isReg()).
+ // We therefore minimize average leaves per partition.
+ return (double)A->getNumLeavesWithDupes() / A->getNumPartitions() >
+ (double)B->getNumLeavesWithDupes() / B->getNumPartitions();
+ });
+
+ // Select a partitioner and partition the ruleset
+ // Note that it's possible for a single rule to end up in multiple
+ // partitions. For example, an opcode test on a rule without an opcode
+ // predicate will result in it being passed to all partitions.
+ std::unique_ptr<GIMatchTreePartitioner> Partitioner = std::move(*PartitionerI);
+ Partitioners.erase(PartitionerI);
+ LLVM_DEBUG(dbgs() << " Selected partitioner: ";
+ Partitioner->emitDescription(dbgs()); dbgs() << "\n");
+
+ assert(Partitioner->getNumPartitions() > 0 &&
+ "Must always partition into at least one partition");
+
+ TreeNode->setNumChildren(Partitioner->getNumPartitions());
+ for (auto &C : enumerate(TreeNode->children())) {
+ SubtreeBuilders.emplace_back(&C.value(), NextInstrID);
+ Partitioner->applyForPartition(C.index(), *this, SubtreeBuilders.back());
+ }
+
+ TreeNode->setPartitioner(std::move(Partitioner));
+
+ // Recurse into the subtree builders. Each one must get a copy of the
+ // remaining partitioners as each path has to check everything.
+ for (auto &SubtreeBuilder : SubtreeBuilders) {
+ for (const auto &Partitioner : Partitioners)
+ SubtreeBuilder.addPartitioner(Partitioner->clone());
+ SubtreeBuilder.runStep();
+ }
+}
+
+std::unique_ptr<GIMatchTree> GIMatchTreeBuilder::run() {
+ unsigned NewInstrID = allocInstrID();
+ // Start by recording the root instruction as instr #0 and set up the initial
+ // partitioners.
+ for (auto &Leaf : Leaves) {
+ LLVM_DEBUG(Leaf.getMatchDag().writeDOTGraph(dbgs(), Leaf.getName()));
+ GIMatchDagInstr *Root =
+ *(Leaf.getMatchDag().roots().begin() + Leaf.getRootIdx());
+ Leaf.declareInstr(Root, NewInstrID);
+ }
+
+ addPartitionersForInstr(NewInstrID);
+
+ std::unique_ptr<GIMatchTree> TreeRoot = std::make_unique<GIMatchTree>();
+ TreeNode = TreeRoot.get();
+ runStep();
+
+ return TreeRoot;
+}
+
+void GIMatchTreeOpcodePartitioner::emitPartitionName(raw_ostream &OS, unsigned Idx) const {
+ if (PartitionToInstr[Idx] == nullptr) {
+ OS << "* or nullptr";
+ return;
+ }
+ OS << PartitionToInstr[Idx]->Namespace
+ << "::" << PartitionToInstr[Idx]->TheDef->getName();
+}
+
+void GIMatchTreeOpcodePartitioner::repartition(
+ GIMatchTreeBuilder::LeafVec &Leaves) {
+ Partitions.clear();
+ InstrToPartition.clear();
+ PartitionToInstr.clear();
+ TestedPredicates.clear();
+
+ for (const auto &Leaf : enumerate(Leaves)) {
+ bool AllOpcodes = true;
+ GIMatchTreeInstrInfo *InstrInfo = Leaf.value().getInstrInfo(InstrID);
+ BitVector TestedPredicatesForLeaf(
+ Leaf.value().getMatchDag().getNumPredicates());
+
+ // If the instruction isn't declared then we don't care about it. Ignore
+ // it for now and add it to all partitions later once we know what
+ // partitions we have.
+ if (!InstrInfo) {
+ LLVM_DEBUG(dbgs() << " " << Leaf.value().getName()
+ << " doesn't care about Instr[" << InstrID << "]\n");
+ assert(TestedPredicatesForLeaf.size() == Leaf.value().getMatchDag().getNumPredicates());
+ TestedPredicates.push_back(TestedPredicatesForLeaf);
+ continue;
+ }
+
+ // If the opcode is available to test then any opcode predicates will have
+ // been enabled too.
+ for (unsigned PIdx : Leaf.value().TestablePredicates.set_bits()) {
+ const auto &P = Leaf.value().getPredicate(PIdx);
+ SmallVector<const CodeGenInstruction *, 1> OpcodesForThisPredicate;
+ if (const auto *OpcodeP = dyn_cast<const GIMatchDagOpcodePredicate>(P)) {
+ // We've found _an_ opcode predicate, but we don't know if it's
+ // checking this instruction yet.
+ bool IsThisPredicate = false;
+ for (const auto &PDep : Leaf.value().getMatchDag().predicate_edges()) {
+ if (PDep->getRequiredMI() == InstrInfo->getInstrNode() &&
+ PDep->getRequiredMO() == nullptr && PDep->getPredicate() == P) {
+ IsThisPredicate = true;
+ break;
+ }
+ }
+ if (!IsThisPredicate)
+ continue;
+
+ // If we get here twice then we've somehow ended up with two opcode
+ // predicates for one instruction in the same DAG. That should be
+ // impossible.
+ assert(AllOpcodes && "Conflicting opcode predicates");
+ const CodeGenInstruction *Expected = OpcodeP->getInstr();
+ OpcodesForThisPredicate.push_back(Expected);
+ }
+
+ if (const auto *OpcodeP =
+ dyn_cast<const GIMatchDagOneOfOpcodesPredicate>(P)) {
+ // We've found _an_ oneof(opcodes) predicate, but we don't know if it's
+ // checking this instruction yet.
+ bool IsThisPredicate = false;
+ for (const auto &PDep : Leaf.value().getMatchDag().predicate_edges()) {
+ if (PDep->getRequiredMI() == InstrInfo->getInstrNode() &&
+ PDep->getRequiredMO() == nullptr && PDep->getPredicate() == P) {
+ IsThisPredicate = true;
+ break;
+ }
+ }
+ if (!IsThisPredicate)
+ continue;
+
+ // If we get here twice then we've somehow ended up with two opcode
+ // predicates for one instruction in the same DAG. That should be
+ // impossible.
+ assert(AllOpcodes && "Conflicting opcode predicates");
+ append_range(OpcodesForThisPredicate, OpcodeP->getInstrs());
+ }
+
+ for (const CodeGenInstruction *Expected : OpcodesForThisPredicate) {
+ // Mark this predicate as one we're testing.
+ TestedPredicatesForLeaf.set(PIdx);
+
+ // Partitions must be numbered 0, 1, .., N but instructions don't meet
+ // that requirement. Assign a partition number to each opcode if we
+ // lack one ...
+ auto Partition = InstrToPartition.find(Expected);
+ if (Partition == InstrToPartition.end()) {
+ BitVector Contents(Leaves.size());
+ Partition = InstrToPartition
+ .insert(std::make_pair(Expected, Partitions.size()))
+ .first;
+ PartitionToInstr.push_back(Expected);
+ Partitions.insert(std::make_pair(Partitions.size(), Contents));
+ }
+ // ... and mark this leaf as being in that partition.
+ Partitions.find(Partition->second)->second.set(Leaf.index());
+ AllOpcodes = false;
+ LLVM_DEBUG(dbgs() << " " << Leaf.value().getName()
+ << " is in partition " << Partition->second << "\n");
+ }
+
+ // TODO: This is where we would handle multiple choices of opcode
+ // the end result will be that this leaf ends up in multiple
+ // partitions similarly to AllOpcodes.
+ }
+
+ // If we never check the opcode, add it to every partition.
+ if (AllOpcodes) {
+ // Add a partition for the default case if we don't already have one.
+ if (InstrToPartition.insert(std::make_pair(nullptr, 0)).second) {
+ PartitionToInstr.push_back(nullptr);
+ BitVector Contents(Leaves.size());
+ Partitions.insert(std::make_pair(Partitions.size(), Contents));
+ }
+ LLVM_DEBUG(dbgs() << " " << Leaf.value().getName()
+ << " is in all partitions (opcode not checked)\n");
+ for (auto &Partition : Partitions)
+ Partition.second.set(Leaf.index());
+ }
+
+ assert(TestedPredicatesForLeaf.size() == Leaf.value().getMatchDag().getNumPredicates());
+ TestedPredicates.push_back(TestedPredicatesForLeaf);
+ }
+
+ if (Partitions.size() == 0) {
+ // Add a partition for the default case if we don't already have one.
+ if (InstrToPartition.insert(std::make_pair(nullptr, 0)).second) {
+ PartitionToInstr.push_back(nullptr);
+ BitVector Contents(Leaves.size());
+ Partitions.insert(std::make_pair(Partitions.size(), Contents));
+ }
+ }
+
+ // Add any leaves that don't care about this instruction to all partitions.
+ for (const auto &Leaf : enumerate(Leaves)) {
+ GIMatchTreeInstrInfo *InstrInfo = Leaf.value().getInstrInfo(InstrID);
+ if (!InstrInfo) {
+ // Add a partition for the default case if we don't already have one.
+ if (InstrToPartition.insert(std::make_pair(nullptr, 0)).second) {
+ PartitionToInstr.push_back(nullptr);
+ BitVector Contents(Leaves.size());
+ Partitions.insert(std::make_pair(Partitions.size(), Contents));
+ }
+ for (auto &Partition : Partitions)
+ Partition.second.set(Leaf.index());
+ }
+ }
+
+}
+
+void GIMatchTreeOpcodePartitioner::applyForPartition(
+ unsigned PartitionIdx, GIMatchTreeBuilder &Builder, GIMatchTreeBuilder &SubBuilder) {
+ LLVM_DEBUG(dbgs() << " Making partition " << PartitionIdx << "\n");
+ const CodeGenInstruction *CGI = PartitionToInstr[PartitionIdx];
+
+ BitVector PossibleLeaves = getPossibleLeavesForPartition(PartitionIdx);
+ // Consume any predicates we handled.
+ for (auto &EnumeratedLeaf : enumerate(Builder.getPossibleLeaves())) {
+ if (!PossibleLeaves[EnumeratedLeaf.index()])
+ continue;
+
+ auto &Leaf = EnumeratedLeaf.value();
+ const auto &TestedPredicatesForLeaf =
+ TestedPredicates[EnumeratedLeaf.index()];
+
+ for (unsigned PredIdx : TestedPredicatesForLeaf.set_bits()) {
+ LLVM_DEBUG(dbgs() << " " << Leaf.getName() << " tested predicate #"
+ << PredIdx << " of " << TestedPredicatesForLeaf.size()
+ << " " << *Leaf.getPredicate(PredIdx) << "\n");
+ Leaf.RemainingPredicates.reset(PredIdx);
+ Leaf.TestablePredicates.reset(PredIdx);
+ }
+ SubBuilder.addLeaf(Leaf);
+ }
+
+ // Nothing to do, we don't know anything about this instruction as a result
+ // of this partitioner.
+ if (CGI == nullptr)
+ return;
+
+ GIMatchTreeBuilder::LeafVec &NewLeaves = SubBuilder.getPossibleLeaves();
+ // Find all the operands we know to exist and are referenced. This will
+ // usually be all the referenced operands but there are some cases where
+ // instructions are variadic. Such operands must be handled by partitioners
+ // that check the number of operands.
+ BitVector ReferencedOperands(1);
+ for (auto &Leaf : NewLeaves) {
+ GIMatchTreeInstrInfo *InstrInfo = Leaf.getInstrInfo(InstrID);
+ // Skip any leaves that don't care about this instruction.
+ if (!InstrInfo)
+ continue;
+ const GIMatchDagInstr *Instr = InstrInfo->getInstrNode();
+ for (auto &E : enumerate(Leaf.getMatchDag().edges())) {
+ if (E.value()->getFromMI() == Instr &&
+ E.value()->getFromMO()->getIdx() < CGI->Operands.size()) {
+ ReferencedOperands.resize(E.value()->getFromMO()->getIdx() + 1);
+ ReferencedOperands.set(E.value()->getFromMO()->getIdx());
+ }
+ }
+ }
+ for (auto &Leaf : NewLeaves) {
+ // Skip any leaves that don't care about this instruction.
+ if (!Leaf.getInstrInfo(InstrID))
+ continue;
+
+ for (unsigned OpIdx : ReferencedOperands.set_bits()) {
+ Leaf.declareOperand(InstrID, OpIdx);
+ }
+ }
+ for (unsigned OpIdx : ReferencedOperands.set_bits()) {
+ SubBuilder.addPartitionersForOperand(InstrID, OpIdx);
+ }
+}
+
+void GIMatchTreeOpcodePartitioner::emitPartitionResults(
+ raw_ostream &OS) const {
+ OS << "Partitioning by opcode would produce " << Partitions.size()
+ << " partitions\n";
+ for (const auto &Partition : InstrToPartition) {
+ if (Partition.first == nullptr)
+ OS << "Default: ";
+ else
+ OS << Partition.first->TheDef->getName() << ": ";
+ StringRef Separator = "";
+ for (unsigned I : Partitions.find(Partition.second)->second.set_bits()) {
+ OS << Separator << I;
+ Separator = ", ";
+ }
+ OS << "\n";
+ }
+}
+
+void GIMatchTreeOpcodePartitioner::generatePartitionSelectorCode(
+ raw_ostream &OS, StringRef Indent) const {
+ // Make sure not to emit empty switch or switch with just default
+ if (PartitionToInstr.size() == 1 && PartitionToInstr[0] == nullptr) {
+ OS << Indent << "Partition = 0;\n";
+ } else if (PartitionToInstr.size()) {
+ OS << Indent << "Partition = -1;\n"
+ << Indent << "switch (MIs[" << InstrID << "]->getOpcode()) {\n";
+ for (const auto &EnumInstr : enumerate(PartitionToInstr)) {
+ if (EnumInstr.value() == nullptr)
+ OS << Indent << "default:";
+ else
+ OS << Indent << "case " << EnumInstr.value()->Namespace
+ << "::" << EnumInstr.value()->TheDef->getName() << ":";
+ OS << " Partition = " << EnumInstr.index() << "; break;\n";
+ }
+ OS << Indent << "}\n";
+ }
+ OS << Indent
+ << "// Default case but without conflicting with potential default case "
+ "in selection.\n"
+ << Indent << "if (Partition == -1) return false;\n";
+}
+
+void GIMatchTreeVRegDefPartitioner::addToPartition(bool Result,
+ unsigned LeafIdx) {
+ auto I = ResultToPartition.find(Result);
+ if (I == ResultToPartition.end()) {
+ ResultToPartition.insert(std::make_pair(Result, PartitionToResult.size()));
+ PartitionToResult.push_back(Result);
+ }
+ I = ResultToPartition.find(Result);
+ auto P = Partitions.find(I->second);
+ if (P == Partitions.end())
+ P = Partitions.insert(std::make_pair(I->second, BitVector())).first;
+ P->second.resize(LeafIdx + 1);
+ P->second.set(LeafIdx);
+}
+
+void GIMatchTreeVRegDefPartitioner::repartition(
+ GIMatchTreeBuilder::LeafVec &Leaves) {
+ Partitions.clear();
+
+ for (const auto &Leaf : enumerate(Leaves)) {
+ GIMatchTreeInstrInfo *InstrInfo = Leaf.value().getInstrInfo(InstrID);
+ BitVector TraversedEdgesForLeaf(Leaf.value().getMatchDag().getNumEdges());
+
+ // If the instruction isn't declared then we don't care about it. Ignore
+ // it for now and add it to all partitions later once we know what
+ // partitions we have.
+ if (!InstrInfo) {
+ TraversedEdges.push_back(TraversedEdgesForLeaf);
+ continue;
+ }
+
+ // If this node has an use -> def edge from this operand then this
+ // instruction must be in partition 1 (isVRegDef()).
+ bool WantsEdge = false;
+ for (unsigned EIdx : Leaf.value().TraversableEdges.set_bits()) {
+ const auto &E = Leaf.value().getEdge(EIdx);
+ if (E->getFromMI() != InstrInfo->getInstrNode() ||
+ E->getFromMO()->getIdx() != OpIdx || E->isDefToUse())
+ continue;
+
+ // We're looking at the right edge. This leaf wants a vreg def so we'll
+ // put it in partition 1.
+ addToPartition(true, Leaf.index());
+ TraversedEdgesForLeaf.set(EIdx);
+ WantsEdge = true;
+ }
+
+ bool isNotReg = false;
+ if (!WantsEdge && isNotReg) {
+ // If this leaf doesn't have an edge and we _don't_ want a register,
+ // then add it to partition 0.
+ addToPartition(false, Leaf.index());
+ } else if (!WantsEdge) {
+ // If this leaf doesn't have an edge and we don't know what we want,
+ // then add it to partition 0 and 1.
+ addToPartition(false, Leaf.index());
+ addToPartition(true, Leaf.index());
+ }
+
+ TraversedEdges.push_back(TraversedEdgesForLeaf);
+ }
+
+ // Add any leaves that don't care about this instruction to all partitions.
+ for (const auto &Leaf : enumerate(Leaves)) {
+ GIMatchTreeInstrInfo *InstrInfo = Leaf.value().getInstrInfo(InstrID);
+ if (!InstrInfo)
+ for (auto &Partition : Partitions) {
+ Partition.second.resize(Leaf.index() + 1);
+ Partition.second.set(Leaf.index());
+ }
+ }
+}
+
+void GIMatchTreeVRegDefPartitioner::applyForPartition(
+ unsigned PartitionIdx, GIMatchTreeBuilder &Builder,
+ GIMatchTreeBuilder &SubBuilder) {
+ BitVector PossibleLeaves = getPossibleLeavesForPartition(PartitionIdx);
+
+ std::vector<BitVector> TraversedEdgesByNewLeaves;
+ // Consume any edges we handled.
+ for (auto &EnumeratedLeaf : enumerate(Builder.getPossibleLeaves())) {
+ if (!PossibleLeaves[EnumeratedLeaf.index()])
+ continue;
+
+ auto &Leaf = EnumeratedLeaf.value();
+ const auto &TraversedEdgesForLeaf = TraversedEdges[EnumeratedLeaf.index()];
+ TraversedEdgesByNewLeaves.push_back(TraversedEdgesForLeaf);
+ Leaf.RemainingEdges.reset(TraversedEdgesForLeaf);
+ Leaf.TraversableEdges.reset(TraversedEdgesForLeaf);
+ SubBuilder.addLeaf(Leaf);
+ }
+
+ // Nothing to do. The only thing we know is that it isn't a vreg-def.
+ if (PartitionToResult[PartitionIdx] == false)
+ return;
+
+ NewInstrID = SubBuilder.allocInstrID();
+
+ GIMatchTreeBuilder::LeafVec &NewLeaves = SubBuilder.getPossibleLeaves();
+ for (const auto I : zip(NewLeaves, TraversedEdgesByNewLeaves)) {
+ auto &Leaf = std::get<0>(I);
+ auto &TraversedEdgesForLeaf = std::get<1>(I);
+ GIMatchTreeInstrInfo *InstrInfo = Leaf.getInstrInfo(InstrID);
+ // Skip any leaves that don't care about this instruction.
+ if (!InstrInfo)
+ continue;
+ for (unsigned EIdx : TraversedEdgesForLeaf.set_bits()) {
+ const GIMatchDagEdge *E = Leaf.getEdge(EIdx);
+ Leaf.declareInstr(E->getToMI(), NewInstrID);
+ }
+ }
+ SubBuilder.addPartitionersForInstr(NewInstrID);
+}
+
+void GIMatchTreeVRegDefPartitioner::emitPartitionResults(
+ raw_ostream &OS) const {
+ OS << "Partitioning by vreg-def would produce " << Partitions.size()
+ << " partitions\n";
+ for (const auto &Partition : Partitions) {
+ OS << Partition.first << " (";
+ emitPartitionName(OS, Partition.first);
+ OS << "): ";
+ StringRef Separator = "";
+ for (unsigned I : Partition.second.set_bits()) {
+ OS << Separator << I;
+ Separator = ", ";
+ }
+ OS << "\n";
+ }
+}
+
+void GIMatchTreeVRegDefPartitioner::generatePartitionSelectorCode(
+ raw_ostream &OS, StringRef Indent) const {
+ OS << Indent << "Partition = -1;\n"
+ << Indent << "if (MIs.size() <= " << NewInstrID << ") MIs.resize("
+ << (NewInstrID + 1) << ");\n"
+ << Indent << "MIs[" << NewInstrID << "] = nullptr;\n"
+ << Indent << "if (MIs[" << InstrID << "]->getOperand(" << OpIdx
+ << ").isReg())\n"
+ << Indent << " MIs[" << NewInstrID << "] = MRI.getVRegDef(MIs[" << InstrID
+ << "]->getOperand(" << OpIdx << ").getReg());\n";
+
+ for (const auto &Pair : ResultToPartition)
+ OS << Indent << "if (MIs[" << NewInstrID << "] "
+ << (Pair.first ? "!=" : "==")
+ << " nullptr) Partition = " << Pair.second << ";\n";
+
+ OS << Indent << "if (Partition == -1) return false;\n";
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchTree.h b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchTree.h
new file mode 100644
index 0000000000..0ce4060fe7
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/GlobalISel/GIMatchTree.h
@@ -0,0 +1,626 @@
+//===- GIMatchTree.h - A decision tree to match GIMatchDag's --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_GIMATCHTREE_H
+#define LLVM_UTILS_TABLEGEN_GIMATCHTREE_H
+
+#include "GIMatchDag.h"
+#include "llvm/ADT/BitVector.h"
+
+namespace llvm {
+class raw_ostream;
+
+class GIMatchTreeBuilder;
+class GIMatchTreePartitioner;
+
+/// Describes the binding of a variable to the matched MIR
+class GIMatchTreeVariableBinding {
+ /// The name of the variable described by this binding.
+ StringRef Name;
+ // The matched instruction it is bound to.
+ unsigned InstrID;
+ // The matched operand (if appropriate) it is bound to.
+ std::optional<unsigned> OpIdx;
+
+public:
+ GIMatchTreeVariableBinding(StringRef Name, unsigned InstrID,
+ std::optional<unsigned> OpIdx = std::nullopt)
+ : Name(Name), InstrID(InstrID), OpIdx(OpIdx) {}
+
+ bool isInstr() const { return !OpIdx; }
+ StringRef getName() const { return Name; }
+ unsigned getInstrID() const { return InstrID; }
+ unsigned getOpIdx() const {
+ assert(OpIdx && "Is not an operand binding");
+ return *OpIdx;
+ }
+};
+
+/// Associates a matchable with a leaf of the decision tree.
+class GIMatchTreeLeafInfo {
+public:
+ using const_var_binding_iterator =
+ std::vector<GIMatchTreeVariableBinding>::const_iterator;
+ using UntestedPredicatesTy = SmallVector<const GIMatchDagPredicate *, 1>;
+ using const_untested_predicates_iterator = UntestedPredicatesTy::const_iterator;
+
+protected:
+ /// A name for the matchable. This is primarily for debugging.
+ StringRef Name;
+ /// Where rules have multiple roots, this is which root we're starting from.
+ unsigned RootIdx;
+ /// Opaque data the caller of the tree building code understands.
+ void *Data;
+ /// Has the decision tree covered every edge traversal? If it hasn't then this
+ /// is an unrecoverable error indicating there's something wrong with the
+ /// partitioners.
+ bool IsFullyTraversed;
+ /// Has the decision tree covered every predicate test? If it has, then
+ /// subsequent matchables on the same leaf are unreachable. If it hasn't, the
+ /// code that requested the GIMatchTree is responsible for finishing off any
+ /// remaining predicates.
+ bool IsFullyTested;
+ /// The variable bindings associated with this leaf so far.
+ std::vector<GIMatchTreeVariableBinding> VarBindings;
+ /// Any predicates left untested by the time we reach this leaf.
+ UntestedPredicatesTy UntestedPredicates;
+
+public:
+ GIMatchTreeLeafInfo() { llvm_unreachable("Cannot default-construct"); }
+ GIMatchTreeLeafInfo(StringRef Name, unsigned RootIdx, void *Data)
+ : Name(Name), RootIdx(RootIdx), Data(Data), IsFullyTraversed(false),
+ IsFullyTested(false) {}
+
+ StringRef getName() const { return Name; }
+ unsigned getRootIdx() const { return RootIdx; }
+ template <class Ty> Ty *getTargetData() const {
+ return static_cast<Ty *>(Data);
+ }
+ bool isFullyTraversed() const { return IsFullyTraversed; }
+ void setIsFullyTraversed(bool V) { IsFullyTraversed = V; }
+ bool isFullyTested() const { return IsFullyTested; }
+ void setIsFullyTested(bool V) { IsFullyTested = V; }
+
+ void bindInstrVariable(StringRef Name, unsigned InstrID) {
+ VarBindings.emplace_back(Name, InstrID);
+ }
+ void bindOperandVariable(StringRef Name, unsigned InstrID, unsigned OpIdx) {
+ VarBindings.emplace_back(Name, InstrID, OpIdx);
+ }
+
+ const_var_binding_iterator var_bindings_begin() const {
+ return VarBindings.begin();
+ }
+ const_var_binding_iterator var_bindings_end() const {
+ return VarBindings.end();
+ }
+ iterator_range<const_var_binding_iterator> var_bindings() const {
+ return make_range(VarBindings.begin(), VarBindings.end());
+ }
+ iterator_range<const_untested_predicates_iterator> untested_predicates() const {
+ return make_range(UntestedPredicates.begin(), UntestedPredicates.end());
+ }
+ void addUntestedPredicate(const GIMatchDagPredicate *P) {
+ UntestedPredicates.push_back(P);
+ }
+};
+
+/// The nodes of a decision tree used to perform the match.
+/// This will be used to generate the C++ code or state machine equivalent.
+///
+/// It should be noted that some nodes of this tree (most notably nodes handling
+/// def -> use edges) will need to iterate over several possible matches. As
+/// such, code generated from this will sometimes need to support backtracking.
+class GIMatchTree {
+ using LeafVector = std::vector<GIMatchTreeLeafInfo>;
+
+ /// The partitioner that has been chosen for this node. This may be nullptr if
+ /// a partitioner hasn't been chosen yet or if the node is a leaf.
+ std::unique_ptr<GIMatchTreePartitioner> Partitioner;
+ /// All the leaves that are possible for this node of the tree.
+ /// Note: This should be emptied after the tree is built when there are
+ /// children but this currently isn't done to aid debuggability of the DOT
+ /// graph for the decision tree.
+ LeafVector PossibleLeaves;
+ /// The children of this node. The index into this array must match the index
+ /// chosen by the partitioner.
+ std::vector<GIMatchTree> Children;
+
+ void writeDOTGraphNode(raw_ostream &OS) const;
+ void writeDOTGraphEdges(raw_ostream &OS) const;
+
+public:
+ void writeDOTGraph(raw_ostream &OS) const;
+
+ void setNumChildren(unsigned Num) { Children.resize(Num); }
+ void addPossibleLeaf(const GIMatchTreeLeafInfo &V, bool IsFullyTraversed,
+ bool IsFullyTested) {
+ PossibleLeaves.push_back(V);
+ PossibleLeaves.back().setIsFullyTraversed(IsFullyTraversed);
+ PossibleLeaves.back().setIsFullyTested(IsFullyTested);
+ }
+ void dropLeavesAfter(size_t Length) {
+ if (PossibleLeaves.size() > Length)
+ PossibleLeaves.resize(Length);
+ }
+ void setPartitioner(std::unique_ptr<GIMatchTreePartitioner> &&V) {
+ Partitioner = std::move(V);
+ }
+ GIMatchTreePartitioner *getPartitioner() const { return Partitioner.get(); }
+
+ std::vector<GIMatchTree>::iterator children_begin() {
+ return Children.begin();
+ }
+ std::vector<GIMatchTree>::iterator children_end() { return Children.end(); }
+ iterator_range<std::vector<GIMatchTree>::iterator> children() {
+ return make_range(children_begin(), children_end());
+ }
+ std::vector<GIMatchTree>::const_iterator children_begin() const {
+ return Children.begin();
+ }
+ std::vector<GIMatchTree>::const_iterator children_end() const {
+ return Children.end();
+ }
+ iterator_range<std::vector<GIMatchTree>::const_iterator> children() const {
+ return make_range(children_begin(), children_end());
+ }
+
+ LeafVector::const_iterator possible_leaves_begin() const {
+ return PossibleLeaves.begin();
+ }
+ LeafVector::const_iterator possible_leaves_end() const {
+ return PossibleLeaves.end();
+ }
+ iterator_range<LeafVector::const_iterator>
+ possible_leaves() const {
+ return make_range(possible_leaves_begin(), possible_leaves_end());
+ }
+ LeafVector::iterator possible_leaves_begin() {
+ return PossibleLeaves.begin();
+ }
+ LeafVector::iterator possible_leaves_end() {
+ return PossibleLeaves.end();
+ }
+ iterator_range<LeafVector::iterator> possible_leaves() {
+ return make_range(possible_leaves_begin(), possible_leaves_end());
+ }
+};
+
+/// Record information that is known about the instruction bound to this ID and
+/// GIMatchDagInstrNode. Every rule gets its own set of
+/// GIMatchTreeInstrInfo to bind the shared IDs to an instr node in its
+/// DAG.
+///
+/// For example, if we know that there are 3 operands. We can record it here to
+/// elide duplicate checks.
+class GIMatchTreeInstrInfo {
+ /// The instruction ID for the matched instruction.
+ unsigned ID;
+ /// The corresponding instruction node in the MatchDAG.
+ const GIMatchDagInstr *InstrNode;
+
+public:
+ GIMatchTreeInstrInfo(unsigned ID, const GIMatchDagInstr *InstrNode)
+ : ID(ID), InstrNode(InstrNode) {}
+
+ unsigned getID() const { return ID; }
+ const GIMatchDagInstr *getInstrNode() const { return InstrNode; }
+};
+
+/// Record information that is known about the operand bound to this ID, OpIdx,
+/// and GIMatchDagInstrNode. Every rule gets its own set of
+/// GIMatchTreeOperandInfo to bind the shared IDs to an operand of an
+/// instr node from its DAG.
+///
+/// For example, if we know that there the operand is a register. We can record
+/// it here to elide duplicate checks.
+class GIMatchTreeOperandInfo {
+ /// The corresponding instruction node in the MatchDAG that the operand
+ /// belongs to.
+ const GIMatchDagInstr *InstrNode;
+ unsigned OpIdx;
+
+public:
+ GIMatchTreeOperandInfo(const GIMatchDagInstr *InstrNode, unsigned OpIdx)
+ : InstrNode(InstrNode), OpIdx(OpIdx) {}
+
+ const GIMatchDagInstr *getInstrNode() const { return InstrNode; }
+ unsigned getOpIdx() const { return OpIdx; }
+};
+
+/// Represent a leaf of the match tree and any working data we need to build the
+/// tree.
+///
+/// It's important to note that each rule can have multiple
+/// GIMatchTreeBuilderLeafInfo's since the partitioners do not always partition
+/// into mutually-exclusive partitions. For example:
+/// R1: (FOO ..., ...)
+/// R2: (oneof(FOO, BAR) ..., ...)
+/// will partition by opcode into two partitions FOO=>[R1, R2], and BAR=>[R2]
+///
+/// As an optimization, all instructions, edges, and predicates in the DAGs are
+/// numbered and tracked in BitVectors. As such, the GIMatchDAG must not be
+/// modified once construction of the tree has begun.
+class GIMatchTreeBuilderLeafInfo {
+protected:
+ GIMatchTreeBuilder &Builder;
+ GIMatchTreeLeafInfo Info;
+ const GIMatchDag &MatchDag;
+ /// The association between GIMatchDagInstr* and GIMatchTreeInstrInfo.
+ /// The primary reason for this members existence is to allow the use of
+ /// InstrIDToInfo.lookup() since that requires that the value is
+ /// default-constructible.
+ DenseMap<const GIMatchDagInstr *, GIMatchTreeInstrInfo> InstrNodeToInfo;
+ /// The instruction information for a given ID in the context of this
+ /// particular leaf.
+ DenseMap<unsigned, GIMatchTreeInstrInfo *> InstrIDToInfo;
+ /// The operand information for a given ID and OpIdx in the context of this
+ /// particular leaf.
+ DenseMap<std::pair<unsigned, unsigned>, GIMatchTreeOperandInfo>
+ OperandIDToInfo;
+
+public:
+ /// The remaining instrs/edges/predicates to visit
+ BitVector RemainingInstrNodes;
+ BitVector RemainingEdges;
+ BitVector RemainingPredicates;
+
+ // The remaining predicate dependencies for each predicate
+ std::vector<BitVector> UnsatisfiedPredDepsForPred;
+
+ /// The edges/predicates we can visit as a result of the declare*() calls we
+ /// have already made. We don't need an instrs version since edges imply the
+ /// instr.
+ BitVector TraversableEdges;
+ BitVector TestablePredicates;
+
+ /// Map predicates from the DAG to their position in the DAG predicate
+ /// iterators.
+ DenseMap<GIMatchDagPredicate *, unsigned> PredicateIDs;
+ /// Map predicate dependency edges from the DAG to their position in the DAG
+ /// predicate dependency iterators.
+ DenseMap<GIMatchDagPredicateDependencyEdge *, unsigned> PredicateDepIDs;
+
+public:
+ GIMatchTreeBuilderLeafInfo(GIMatchTreeBuilder &Builder, StringRef Name,
+ unsigned RootIdx, const GIMatchDag &MatchDag,
+ void *Data);
+
+ StringRef getName() const { return Info.getName(); }
+ GIMatchTreeLeafInfo &getInfo() { return Info; }
+ const GIMatchTreeLeafInfo &getInfo() const { return Info; }
+ const GIMatchDag &getMatchDag() const { return MatchDag; }
+ unsigned getRootIdx() const { return Info.getRootIdx(); }
+
+ /// Has this DAG been fully traversed. This must be true by the time the tree
+ /// builder finishes.
+ bool isFullyTraversed() const {
+ // We don't need UnsatisfiedPredDepsForPred because RemainingPredicates
+ // can't be all-zero without satisfying all the dependencies. The same is
+ // almost true for Edges and Instrs but it's possible to have Instrs without
+ // Edges.
+ return RemainingInstrNodes.none() && RemainingEdges.none();
+ }
+
+ /// Has this DAG been fully tested. This hould be true by the time the tree
+ /// builder finishes but clients can finish any untested predicates left over
+ /// if it's not true.
+ bool isFullyTested() const {
+ // We don't need UnsatisfiedPredDepsForPred because RemainingPredicates
+ // can't be all-zero without satisfying all the dependencies. The same is
+ // almost true for Edges and Instrs but it's possible to have Instrs without
+ // Edges.
+ return RemainingInstrNodes.none() && RemainingEdges.none() &&
+ RemainingPredicates.none();
+ }
+
+ const GIMatchDagInstr *getInstr(unsigned Idx) const {
+ return *(MatchDag.instr_nodes_begin() + Idx);
+ }
+ const GIMatchDagEdge *getEdge(unsigned Idx) const {
+ return *(MatchDag.edges_begin() + Idx);
+ }
+ GIMatchDagEdge *getEdge(unsigned Idx) {
+ return *(MatchDag.edges_begin() + Idx);
+ }
+ const GIMatchDagPredicate *getPredicate(unsigned Idx) const {
+ return *(MatchDag.predicates_begin() + Idx);
+ }
+ iterator_range<llvm::BitVector::const_set_bits_iterator>
+ untested_instrs() const {
+ return RemainingInstrNodes.set_bits();
+ }
+ iterator_range<llvm::BitVector::const_set_bits_iterator>
+ untested_edges() const {
+ return RemainingEdges.set_bits();
+ }
+ iterator_range<llvm::BitVector::const_set_bits_iterator>
+ untested_predicates() const {
+ return RemainingPredicates.set_bits();
+ }
+
+ /// Bind an instr node to the given ID and clear any blocking dependencies
+ /// that were waiting for it.
+ void declareInstr(const GIMatchDagInstr *Instr, unsigned ID);
+
+ /// Bind an operand to the given ID and OpIdx and clear any blocking
+ /// dependencies that were waiting for it.
+ void declareOperand(unsigned InstrID, unsigned OpIdx);
+
+ GIMatchTreeInstrInfo *getInstrInfo(unsigned ID) const {
+ return InstrIDToInfo.lookup(ID);
+ }
+
+ void dump(raw_ostream &OS) const {
+ OS << "Leaf " << getName() << " for root #" << getRootIdx() << "\n";
+ MatchDag.print(OS);
+ for (const auto &I : InstrIDToInfo)
+ OS << "Declared Instr #" << I.first << "\n";
+ for (const auto &I : OperandIDToInfo)
+ OS << "Declared Instr #" << I.first.first << ", Op #" << I.first.second
+ << "\n";
+ OS << RemainingInstrNodes.count() << " untested instrs of "
+ << RemainingInstrNodes.size() << "\n";
+ OS << RemainingEdges.count() << " untested edges of "
+ << RemainingEdges.size() << "\n";
+ OS << RemainingPredicates.count() << " untested predicates of "
+ << RemainingPredicates.size() << "\n";
+
+ OS << TraversableEdges.count() << " edges could be traversed\n";
+ OS << TestablePredicates.count() << " predicates could be tested\n";
+ }
+};
+
+/// The tree builder has a fairly tough job. It's purpose is to merge all the
+/// DAGs from the ruleset into a decision tree that walks all of them
+/// simultaneously and identifies the rule that was matched. In addition to
+/// that, it also needs to find the most efficient order to make decisions
+/// without violating any dependencies and ensure that every DAG covers every
+/// instr/edge/predicate.
+class GIMatchTreeBuilder {
+public:
+ using LeafVec = std::vector<GIMatchTreeBuilderLeafInfo>;
+
+protected:
+ /// The leaves that the resulting decision tree will distinguish.
+ LeafVec Leaves;
+ /// The tree node being constructed.
+ GIMatchTree *TreeNode;
+ /// The builders for each subtree resulting from the current decision.
+ std::vector<GIMatchTreeBuilder> SubtreeBuilders;
+ /// The possible partitioners we could apply right now.
+ std::vector<std::unique_ptr<GIMatchTreePartitioner>> Partitioners;
+ /// The next instruction ID to allocate when requested by the chosen
+ /// Partitioner.
+ unsigned NextInstrID;
+
+ /// Use any context we have stored to cull partitioners that only test things
+ /// we already know. At the time of writing, there's no need to do anything
+ /// here but it will become important once, for example, there is a
+ /// num-operands and an opcode partitioner. This is because applying an opcode
+ /// partitioner (usually) makes the number of operands known which makes
+ /// additional checking pointless.
+ void filterRedundantPartitioners();
+
+ /// Evaluate the available partioners and select the best one at the moment.
+ void evaluatePartitioners();
+
+ /// Construct the current tree node.
+ void runStep();
+
+public:
+ GIMatchTreeBuilder(unsigned NextInstrID) : NextInstrID(NextInstrID) {}
+ GIMatchTreeBuilder(GIMatchTree *TreeNode, unsigned NextInstrID)
+ : TreeNode(TreeNode), NextInstrID(NextInstrID) {}
+
+ void addLeaf(StringRef Name, unsigned RootIdx, const GIMatchDag &MatchDag,
+ void *Data) {
+ Leaves.emplace_back(*this, Name, RootIdx, MatchDag, Data);
+ }
+ void addLeaf(const GIMatchTreeBuilderLeafInfo &L) { Leaves.push_back(L); }
+ void addPartitioner(std::unique_ptr<GIMatchTreePartitioner> P) {
+ Partitioners.push_back(std::move(P));
+ }
+ void addPartitionersForInstr(unsigned InstrIdx);
+ void addPartitionersForOperand(unsigned InstrID, unsigned OpIdx);
+
+ LeafVec &getPossibleLeaves() { return Leaves; }
+
+ unsigned allocInstrID() { return NextInstrID++; }
+
+ /// Construct the decision tree.
+ std::unique_ptr<GIMatchTree> run();
+};
+
+/// Partitioners are the core of the tree builder and are unfortunately rather
+/// tricky to write.
+class GIMatchTreePartitioner {
+protected:
+ /// The partitions resulting from applying the partitioner to the possible
+ /// leaves. The keys must be consecutive integers starting from 0. This can
+ /// lead to some unfortunate situations where partitioners test a predicate
+ /// and use 0 for success and 1 for failure if the ruleset encounters a
+ /// success case first but is necessary to assign the partition to one of the
+ /// tree nodes children. As a result, you usually need some kind of
+ /// indirection to map the natural keys (e.g. ptrs/bools) to this linear
+ /// sequence. The values are a bitvector indicating which leaves belong to
+ /// this partition.
+ DenseMap<unsigned, BitVector> Partitions;
+
+public:
+ virtual ~GIMatchTreePartitioner() {}
+ virtual std::unique_ptr<GIMatchTreePartitioner> clone() const = 0;
+
+ /// Determines which partitions the given leaves belong to. A leaf may belong
+ /// to multiple partitions in which case it will be duplicated during
+ /// applyForPartition().
+ ///
+ /// This function can be rather complicated. A few particular things to be
+ /// aware of include:
+ /// * One leaf can be assigned to multiple partitions when there's some
+ /// ambiguity.
+ /// * Not all DAG's for the leaves may be able to perform the test. For
+ /// example, the opcode partitiioner must account for one DAG being a
+ /// superset of another such as [(ADD ..., ..., ...)], and [(MUL t, ...,
+ /// ...), (ADD ..., t, ...)]
+ /// * Attaching meaning to a particular partition index will generally not
+ /// work due to the '0, 1, ..., n' requirement. You might encounter cases
+ /// where only partition 1 is seen, leaving a missing 0.
+ /// * Finding a specific predicate such as the opcode predicate for a specific
+ /// instruction is non-trivial. It's often O(NumPredicates), leading to
+ /// O(NumPredicates*NumRules) when applied to the whole ruleset. The good
+ /// news there is that n is typically small thanks to predicate dependencies
+ /// limiting how many are testable at once. Also, with opcode and type
+ /// predicates being so frequent the value of m drops very fast too. It
+ /// wouldn't be terribly surprising to see a 10k ruleset drop down to an
+ /// average of 100 leaves per partition after a single opcode partitioner.
+ /// * The same goes for finding specific edges. The need to traverse them in
+ /// dependency order dramatically limits the search space at any given
+ /// moment.
+ /// * If you need to add a leaf to all partitions, make sure you don't forget
+ /// them when adding partitions later.
+ virtual void repartition(GIMatchTreeBuilder::LeafVec &Leaves) = 0;
+
+ /// Delegate the leaves for a given partition to the corresponding subbuilder,
+ /// update any recorded context for this partition (e.g. allocate instr id's
+ /// for instrs recorder by the current node), and clear any blocking
+ /// dependencies this partitioner resolved.
+ virtual void applyForPartition(unsigned PartitionIdx,
+ GIMatchTreeBuilder &Builder,
+ GIMatchTreeBuilder &SubBuilder) = 0;
+
+ /// Return a BitVector indicating which leaves should be transferred to the
+ /// specified partition. Note that the same leaf can be indicated for multiple
+ /// partitions.
+ BitVector getPossibleLeavesForPartition(unsigned Idx) {
+ const auto &I = Partitions.find(Idx);
+ assert(I != Partitions.end() && "Requested non-existant partition");
+ return I->second;
+ }
+
+ size_t getNumPartitions() const { return Partitions.size(); }
+ size_t getNumLeavesWithDupes() const {
+ size_t S = 0;
+ for (const auto &P : Partitions)
+ S += P.second.size();
+ return S;
+ }
+
+ /// Emit a brief description of the partitioner suitable for debug printing or
+ /// use in a DOT graph.
+ virtual void emitDescription(raw_ostream &OS) const = 0;
+ /// Emit a label for the given partition suitable for debug printing or use in
+ /// a DOT graph.
+ virtual void emitPartitionName(raw_ostream &OS, unsigned Idx) const = 0;
+
+ /// Emit a long description of how the partitioner partitions the leaves.
+ virtual void emitPartitionResults(raw_ostream &OS) const = 0;
+
+ /// Generate code to select between partitions based on the MIR being matched.
+ /// This is typically a switch statement that picks a partition index.
+ virtual void generatePartitionSelectorCode(raw_ostream &OS,
+ StringRef Indent) const = 0;
+};
+
+/// Partition according to the opcode of the instruction.
+///
+/// Numbers CodeGenInstr ptrs for use as partition ID's. One special partition,
+/// nullptr, represents the case where the instruction isn't known.
+///
+/// * If the opcode can be tested and is a single opcode, create the partition
+/// for that opcode and assign the leaf to it. This partition no longer needs
+/// to test the opcode, and many details about the instruction will usually
+/// become known (e.g. number of operands for non-variadic instrs) via the
+/// CodeGenInstr ptr.
+/// * (not implemented yet) If the opcode can be tested and is a choice of
+/// opcodes, then the leaf can be treated like the single-opcode case but must
+/// be added to all relevant partitions and not quite as much becomes known as
+/// a result. That said, multiple-choice opcodes are likely similar enough
+/// (because if they aren't then handling them together makes little sense)
+/// that plenty still becomes known. The main implementation issue with this
+/// is having a description to represent the commonality between instructions.
+/// * If the opcode is not tested, the leaf must be added to all partitions
+/// including the wildcard nullptr partition. What becomes known as a result
+/// varies between partitions.
+/// * If the instruction to be tested is not declared then add the leaf to all
+/// partitions. This occurs when we encounter one rule that is a superset of
+/// the other and we are still matching the remainder of the superset. The
+/// result is that the cases that don't match the superset will match the
+/// subset rule, while the ones that do match the superset will match either
+/// (which one is algorithm dependent but will usually be the superset).
+class GIMatchTreeOpcodePartitioner : public GIMatchTreePartitioner {
+ unsigned InstrID;
+ DenseMap<const CodeGenInstruction *, unsigned> InstrToPartition;
+ std::vector<const CodeGenInstruction *> PartitionToInstr;
+ std::vector<BitVector> TestedPredicates;
+
+public:
+ GIMatchTreeOpcodePartitioner(unsigned InstrID) : InstrID(InstrID) {}
+
+ std::unique_ptr<GIMatchTreePartitioner> clone() const override {
+ return std::make_unique<GIMatchTreeOpcodePartitioner>(*this);
+ }
+
+ void emitDescription(raw_ostream &OS) const override {
+ OS << "MI[" << InstrID << "].getOpcode()";
+ }
+
+ void emitPartitionName(raw_ostream &OS, unsigned Idx) const override;
+
+ void repartition(GIMatchTreeBuilder::LeafVec &Leaves) override;
+ void applyForPartition(unsigned Idx, GIMatchTreeBuilder &SubBuilder,
+ GIMatchTreeBuilder &Builder) override;
+
+ void emitPartitionResults(raw_ostream &OS) const override;
+
+ void generatePartitionSelectorCode(raw_ostream &OS,
+ StringRef Indent) const override;
+};
+
+class GIMatchTreeVRegDefPartitioner : public GIMatchTreePartitioner {
+ unsigned NewInstrID = -1;
+ unsigned InstrID;
+ unsigned OpIdx;
+ std::vector<BitVector> TraversedEdges;
+ DenseMap<unsigned, unsigned> ResultToPartition;
+ BitVector PartitionToResult;
+
+ void addToPartition(bool Result, unsigned LeafIdx);
+
+public:
+ GIMatchTreeVRegDefPartitioner(unsigned InstrID, unsigned OpIdx)
+ : InstrID(InstrID), OpIdx(OpIdx) {}
+
+ std::unique_ptr<GIMatchTreePartitioner> clone() const override {
+ return std::make_unique<GIMatchTreeVRegDefPartitioner>(*this);
+ }
+
+ void emitDescription(raw_ostream &OS) const override {
+ OS << "MI[" << NewInstrID << "] = getVRegDef(MI[" << InstrID
+ << "].getOperand(" << OpIdx << "))";
+ }
+
+ void emitPartitionName(raw_ostream &OS, unsigned Idx) const override {
+ bool Result = PartitionToResult[Idx];
+ if (Result)
+ OS << "true";
+ else
+ OS << "false";
+ }
+
+ void repartition(GIMatchTreeBuilder::LeafVec &Leaves) override;
+ void applyForPartition(unsigned PartitionIdx, GIMatchTreeBuilder &Builder,
+ GIMatchTreeBuilder &SubBuilder) override;
+ void emitPartitionResults(raw_ostream &OS) const override;
+
+ void generatePartitionSelectorCode(raw_ostream &OS,
+ StringRef Indent) const override;
+};
+
+} // end namespace llvm
+#endif // ifndef LLVM_UTILS_TABLEGEN_GIMATCHTREE_H
diff --git a/contrib/libs/llvm16/utils/TableGen/GlobalISel/ya.make b/contrib/libs/llvm16/utils/TableGen/GlobalISel/ya.make
new file mode 100644
index 0000000000..ba338c7cc0
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/GlobalISel/ya.make
@@ -0,0 +1,34 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+LICENSE(Apache-2.0 WITH LLVM-exception)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/libs/llvm16
+ contrib/libs/llvm16/lib/Support
+ contrib/libs/llvm16/lib/TableGen
+)
+
+ADDINCL(
+ contrib/libs/llvm16/utils/TableGen/GlobalISel
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+SRCS(
+ CodeExpander.cpp
+ GIMatchDag.cpp
+ GIMatchDagEdge.cpp
+ GIMatchDagInstr.cpp
+ GIMatchDagOperands.cpp
+ GIMatchDagPredicate.cpp
+ GIMatchDagPredicateDependencyEdge.cpp
+ GIMatchTree.cpp
+)
+
+END()
diff --git a/contrib/libs/llvm16/utils/TableGen/GlobalISelEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/GlobalISelEmitter.cpp
new file mode 100644
index 0000000000..c79c79948a
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/GlobalISelEmitter.cpp
@@ -0,0 +1,6314 @@
+//===- GlobalISelEmitter.cpp - Generate an instruction selector -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This tablegen backend emits code for use by the GlobalISel instruction
+/// selector. See include/llvm/CodeGen/TargetGlobalISel.td.
+///
+/// This file analyzes the patterns recognized by the SelectionDAGISel tablegen
+/// backend, filters out the ones that are unsupported, maps
+/// SelectionDAG-specific constructs to their GlobalISel counterpart
+/// (when applicable: MVT to LLT; SDNode to generic Instruction).
+///
+/// Not all patterns are supported: pass the tablegen invocation
+/// "-warn-on-skipped-patterns" to emit a warning when a pattern is skipped,
+/// as well as why.
+///
+/// The generated file defines a single method:
+/// bool <Target>InstructionSelector::selectImpl(MachineInstr &I) const;
+/// intended to be used in InstructionSelector::select as the first-step
+/// selector for the patterns that don't require complex C++.
+///
+/// FIXME: We'll probably want to eventually define a base
+/// "TargetGenInstructionSelector" class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenDAGPatterns.h"
+#include "CodeGenInstruction.h"
+#include "SubtargetFeatureInfo.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/CodeGenCoverage.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/LowLevelTypeImpl.h"
+#include "llvm/Support/MachineValueType.h"
+#include "llvm/Support/ScopedPrinter.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include <numeric>
+#include <string>
+using namespace llvm;
+
+#define DEBUG_TYPE "gisel-emitter"
+
+STATISTIC(NumPatternTotal, "Total number of patterns");
+STATISTIC(NumPatternImported, "Number of patterns imported from SelectionDAG");
+STATISTIC(NumPatternImportsSkipped, "Number of SelectionDAG imports skipped");
+STATISTIC(NumPatternsTested, "Number of patterns executed according to coverage information");
+STATISTIC(NumPatternEmitted, "Number of patterns emitted");
+
+cl::OptionCategory GlobalISelEmitterCat("Options for -gen-global-isel");
+
+static cl::opt<bool> WarnOnSkippedPatterns(
+ "warn-on-skipped-patterns",
+ cl::desc("Explain why a pattern was skipped for inclusion "
+ "in the GlobalISel selector"),
+ cl::init(false), cl::cat(GlobalISelEmitterCat));
+
+static cl::opt<bool> GenerateCoverage(
+ "instrument-gisel-coverage",
+ cl::desc("Generate coverage instrumentation for GlobalISel"),
+ cl::init(false), cl::cat(GlobalISelEmitterCat));
+
+static cl::opt<std::string> UseCoverageFile(
+ "gisel-coverage-file", cl::init(""),
+ cl::desc("Specify file to retrieve coverage information from"),
+ cl::cat(GlobalISelEmitterCat));
+
+static cl::opt<bool> OptimizeMatchTable(
+ "optimize-match-table",
+ cl::desc("Generate an optimized version of the match table"),
+ cl::init(true), cl::cat(GlobalISelEmitterCat));
+
+namespace {
+//===- Helper functions ---------------------------------------------------===//
+
+/// Get the name of the enum value used to number the predicate function.
+std::string getEnumNameForPredicate(const TreePredicateFn &Predicate) {
+ if (Predicate.hasGISelPredicateCode())
+ return "GIPFP_MI_" + Predicate.getFnName();
+ return "GIPFP_" + Predicate.getImmTypeIdentifier().str() + "_" +
+ Predicate.getFnName();
+}
+
+/// Get the opcode used to check this predicate.
+std::string getMatchOpcodeForImmPredicate(const TreePredicateFn &Predicate) {
+ return "GIM_Check" + Predicate.getImmTypeIdentifier().str() + "ImmPredicate";
+}
+
+/// This class stands in for LLT wherever we want to tablegen-erate an
+/// equivalent at compiler run-time.
+class LLTCodeGen {
+private:
+ LLT Ty;
+
+public:
+ LLTCodeGen() = default;
+ LLTCodeGen(const LLT &Ty) : Ty(Ty) {}
+
+ std::string getCxxEnumValue() const {
+ std::string Str;
+ raw_string_ostream OS(Str);
+
+ emitCxxEnumValue(OS);
+ return Str;
+ }
+
+ void emitCxxEnumValue(raw_ostream &OS) const {
+ if (Ty.isScalar()) {
+ OS << "GILLT_s" << Ty.getSizeInBits();
+ return;
+ }
+ if (Ty.isVector()) {
+ OS << (Ty.isScalable() ? "GILLT_nxv" : "GILLT_v")
+ << Ty.getElementCount().getKnownMinValue() << "s"
+ << Ty.getScalarSizeInBits();
+ return;
+ }
+ if (Ty.isPointer()) {
+ OS << "GILLT_p" << Ty.getAddressSpace();
+ if (Ty.getSizeInBits() > 0)
+ OS << "s" << Ty.getSizeInBits();
+ return;
+ }
+ llvm_unreachable("Unhandled LLT");
+ }
+
+ void emitCxxConstructorCall(raw_ostream &OS) const {
+ if (Ty.isScalar()) {
+ OS << "LLT::scalar(" << Ty.getSizeInBits() << ")";
+ return;
+ }
+ if (Ty.isVector()) {
+ OS << "LLT::vector("
+ << (Ty.isScalable() ? "ElementCount::getScalable("
+ : "ElementCount::getFixed(")
+ << Ty.getElementCount().getKnownMinValue() << "), "
+ << Ty.getScalarSizeInBits() << ")";
+ return;
+ }
+ if (Ty.isPointer() && Ty.getSizeInBits() > 0) {
+ OS << "LLT::pointer(" << Ty.getAddressSpace() << ", "
+ << Ty.getSizeInBits() << ")";
+ return;
+ }
+ llvm_unreachable("Unhandled LLT");
+ }
+
+ const LLT &get() const { return Ty; }
+
+ /// This ordering is used for std::unique() and llvm::sort(). There's no
+ /// particular logic behind the order but either A < B or B < A must be
+ /// true if A != B.
+ bool operator<(const LLTCodeGen &Other) const {
+ if (Ty.isValid() != Other.Ty.isValid())
+ return Ty.isValid() < Other.Ty.isValid();
+ if (!Ty.isValid())
+ return false;
+
+ if (Ty.isVector() != Other.Ty.isVector())
+ return Ty.isVector() < Other.Ty.isVector();
+ if (Ty.isScalar() != Other.Ty.isScalar())
+ return Ty.isScalar() < Other.Ty.isScalar();
+ if (Ty.isPointer() != Other.Ty.isPointer())
+ return Ty.isPointer() < Other.Ty.isPointer();
+
+ if (Ty.isPointer() && Ty.getAddressSpace() != Other.Ty.getAddressSpace())
+ return Ty.getAddressSpace() < Other.Ty.getAddressSpace();
+
+ if (Ty.isVector() && Ty.getElementCount() != Other.Ty.getElementCount())
+ return std::make_tuple(Ty.isScalable(),
+ Ty.getElementCount().getKnownMinValue()) <
+ std::make_tuple(Other.Ty.isScalable(),
+ Other.Ty.getElementCount().getKnownMinValue());
+
+ assert((!Ty.isVector() || Ty.isScalable() == Other.Ty.isScalable()) &&
+ "Unexpected mismatch of scalable property");
+ return Ty.isVector()
+ ? std::make_tuple(Ty.isScalable(),
+ Ty.getSizeInBits().getKnownMinValue()) <
+ std::make_tuple(
+ Other.Ty.isScalable(),
+ Other.Ty.getSizeInBits().getKnownMinValue())
+ : Ty.getSizeInBits().getFixedValue() <
+ Other.Ty.getSizeInBits().getFixedValue();
+ }
+
+ bool operator==(const LLTCodeGen &B) const { return Ty == B.Ty; }
+};
+
+// Track all types that are used so we can emit the corresponding enum.
+std::set<LLTCodeGen> KnownTypes;
+
+class InstructionMatcher;
+/// Convert an MVT to an equivalent LLT if possible, or the invalid LLT() for
+/// MVTs that don't map cleanly to an LLT (e.g., iPTR, *any, ...).
+static std::optional<LLTCodeGen> MVTToLLT(MVT::SimpleValueType SVT) {
+ MVT VT(SVT);
+
+ if (VT.isVector() && !VT.getVectorElementCount().isScalar())
+ return LLTCodeGen(
+ LLT::vector(VT.getVectorElementCount(), VT.getScalarSizeInBits()));
+
+ if (VT.isInteger() || VT.isFloatingPoint())
+ return LLTCodeGen(LLT::scalar(VT.getSizeInBits()));
+
+ return std::nullopt;
+}
+
+static std::string explainPredicates(const TreePatternNode *N) {
+ std::string Explanation;
+ StringRef Separator = "";
+ for (const TreePredicateCall &Call : N->getPredicateCalls()) {
+ const TreePredicateFn &P = Call.Fn;
+ Explanation +=
+ (Separator + P.getOrigPatFragRecord()->getRecord()->getName()).str();
+ Separator = ", ";
+
+ if (P.isAlwaysTrue())
+ Explanation += " always-true";
+ if (P.isImmediatePattern())
+ Explanation += " immediate";
+
+ if (P.isUnindexed())
+ Explanation += " unindexed";
+
+ if (P.isNonExtLoad())
+ Explanation += " non-extload";
+ if (P.isAnyExtLoad())
+ Explanation += " extload";
+ if (P.isSignExtLoad())
+ Explanation += " sextload";
+ if (P.isZeroExtLoad())
+ Explanation += " zextload";
+
+ if (P.isNonTruncStore())
+ Explanation += " non-truncstore";
+ if (P.isTruncStore())
+ Explanation += " truncstore";
+
+ if (Record *VT = P.getMemoryVT())
+ Explanation += (" MemVT=" + VT->getName()).str();
+ if (Record *VT = P.getScalarMemoryVT())
+ Explanation += (" ScalarVT(MemVT)=" + VT->getName()).str();
+
+ if (ListInit *AddrSpaces = P.getAddressSpaces()) {
+ raw_string_ostream OS(Explanation);
+ OS << " AddressSpaces=[";
+
+ StringRef AddrSpaceSeparator;
+ for (Init *Val : AddrSpaces->getValues()) {
+ IntInit *IntVal = dyn_cast<IntInit>(Val);
+ if (!IntVal)
+ continue;
+
+ OS << AddrSpaceSeparator << IntVal->getValue();
+ AddrSpaceSeparator = ", ";
+ }
+
+ OS << ']';
+ }
+
+ int64_t MinAlign = P.getMinAlignment();
+ if (MinAlign > 0)
+ Explanation += " MinAlign=" + utostr(MinAlign);
+
+ if (P.isAtomicOrderingMonotonic())
+ Explanation += " monotonic";
+ if (P.isAtomicOrderingAcquire())
+ Explanation += " acquire";
+ if (P.isAtomicOrderingRelease())
+ Explanation += " release";
+ if (P.isAtomicOrderingAcquireRelease())
+ Explanation += " acq_rel";
+ if (P.isAtomicOrderingSequentiallyConsistent())
+ Explanation += " seq_cst";
+ if (P.isAtomicOrderingAcquireOrStronger())
+ Explanation += " >=acquire";
+ if (P.isAtomicOrderingWeakerThanAcquire())
+ Explanation += " <acquire";
+ if (P.isAtomicOrderingReleaseOrStronger())
+ Explanation += " >=release";
+ if (P.isAtomicOrderingWeakerThanRelease())
+ Explanation += " <release";
+ }
+ return Explanation;
+}
+
+std::string explainOperator(Record *Operator) {
+ if (Operator->isSubClassOf("SDNode"))
+ return (" (" + Operator->getValueAsString("Opcode") + ")").str();
+
+ if (Operator->isSubClassOf("Intrinsic"))
+ return (" (Operator is an Intrinsic, " + Operator->getName() + ")").str();
+
+ if (Operator->isSubClassOf("ComplexPattern"))
+ return (" (Operator is an unmapped ComplexPattern, " + Operator->getName() +
+ ")")
+ .str();
+
+ if (Operator->isSubClassOf("SDNodeXForm"))
+ return (" (Operator is an unmapped SDNodeXForm, " + Operator->getName() +
+ ")")
+ .str();
+
+ return (" (Operator " + Operator->getName() + " not understood)").str();
+}
+
+/// Helper function to let the emitter report skip reason error messages.
+static Error failedImport(const Twine &Reason) {
+ return make_error<StringError>(Reason, inconvertibleErrorCode());
+}
+
+static Error isTrivialOperatorNode(const TreePatternNode *N) {
+ std::string Explanation;
+ std::string Separator;
+
+ bool HasUnsupportedPredicate = false;
+ for (const TreePredicateCall &Call : N->getPredicateCalls()) {
+ const TreePredicateFn &Predicate = Call.Fn;
+
+ if (Predicate.isAlwaysTrue())
+ continue;
+
+ if (Predicate.isImmediatePattern())
+ continue;
+
+ if (Predicate.hasNoUse())
+ continue;
+
+ if (Predicate.isNonExtLoad() || Predicate.isAnyExtLoad() ||
+ Predicate.isSignExtLoad() || Predicate.isZeroExtLoad())
+ continue;
+
+ if (Predicate.isNonTruncStore() || Predicate.isTruncStore())
+ continue;
+
+ if (Predicate.isLoad() && Predicate.getMemoryVT())
+ continue;
+
+ if (Predicate.isLoad() || Predicate.isStore()) {
+ if (Predicate.isUnindexed())
+ continue;
+ }
+
+ if (Predicate.isLoad() || Predicate.isStore() || Predicate.isAtomic()) {
+ const ListInit *AddrSpaces = Predicate.getAddressSpaces();
+ if (AddrSpaces && !AddrSpaces->empty())
+ continue;
+
+ if (Predicate.getMinAlignment() > 0)
+ continue;
+ }
+
+ if (Predicate.isAtomic() && Predicate.getMemoryVT())
+ continue;
+
+ if (Predicate.isAtomic() &&
+ (Predicate.isAtomicOrderingMonotonic() ||
+ Predicate.isAtomicOrderingAcquire() ||
+ Predicate.isAtomicOrderingRelease() ||
+ Predicate.isAtomicOrderingAcquireRelease() ||
+ Predicate.isAtomicOrderingSequentiallyConsistent() ||
+ Predicate.isAtomicOrderingAcquireOrStronger() ||
+ Predicate.isAtomicOrderingWeakerThanAcquire() ||
+ Predicate.isAtomicOrderingReleaseOrStronger() ||
+ Predicate.isAtomicOrderingWeakerThanRelease()))
+ continue;
+
+ if (Predicate.hasGISelPredicateCode())
+ continue;
+
+ HasUnsupportedPredicate = true;
+ Explanation = Separator + "Has a predicate (" + explainPredicates(N) + ")";
+ Separator = ", ";
+ Explanation += (Separator + "first-failing:" +
+ Predicate.getOrigPatFragRecord()->getRecord()->getName())
+ .str();
+ break;
+ }
+
+ if (!HasUnsupportedPredicate)
+ return Error::success();
+
+ return failedImport(Explanation);
+}
+
+static Record *getInitValueAsRegClass(Init *V) {
+ if (DefInit *VDefInit = dyn_cast<DefInit>(V)) {
+ if (VDefInit->getDef()->isSubClassOf("RegisterOperand"))
+ return VDefInit->getDef()->getValueAsDef("RegClass");
+ if (VDefInit->getDef()->isSubClassOf("RegisterClass"))
+ return VDefInit->getDef();
+ }
+ return nullptr;
+}
+
+std::string
+getNameForFeatureBitset(const std::vector<Record *> &FeatureBitset) {
+ std::string Name = "GIFBS";
+ for (const auto &Feature : FeatureBitset)
+ Name += ("_" + Feature->getName()).str();
+ return Name;
+}
+
+static std::string getScopedName(unsigned Scope, const std::string &Name) {
+ return ("pred:" + Twine(Scope) + ":" + Name).str();
+}
+
+//===- MatchTable Helpers -------------------------------------------------===//
+
+class MatchTable;
+
+/// A record to be stored in a MatchTable.
+///
+/// This class represents any and all output that may be required to emit the
+/// MatchTable. Instances are most often configured to represent an opcode or
+/// value that will be emitted to the table with some formatting but it can also
+/// represent commas, comments, and other formatting instructions.
+struct MatchTableRecord {
+ enum RecordFlagsBits {
+ MTRF_None = 0x0,
+ /// Causes EmitStr to be formatted as comment when emitted.
+ MTRF_Comment = 0x1,
+ /// Causes the record value to be followed by a comma when emitted.
+ MTRF_CommaFollows = 0x2,
+ /// Causes the record value to be followed by a line break when emitted.
+ MTRF_LineBreakFollows = 0x4,
+ /// Indicates that the record defines a label and causes an additional
+ /// comment to be emitted containing the index of the label.
+ MTRF_Label = 0x8,
+ /// Causes the record to be emitted as the index of the label specified by
+ /// LabelID along with a comment indicating where that label is.
+ MTRF_JumpTarget = 0x10,
+ /// Causes the formatter to add a level of indentation before emitting the
+ /// record.
+ MTRF_Indent = 0x20,
+ /// Causes the formatter to remove a level of indentation after emitting the
+ /// record.
+ MTRF_Outdent = 0x40,
+ };
+
+ /// When MTRF_Label or MTRF_JumpTarget is used, indicates a label id to
+ /// reference or define.
+ unsigned LabelID;
+ /// The string to emit. Depending on the MTRF_* flags it may be a comment, a
+ /// value, a label name.
+ std::string EmitStr;
+
+private:
+ /// The number of MatchTable elements described by this record. Comments are 0
+ /// while values are typically 1. Values >1 may occur when we need to emit
+ /// values that exceed the size of a MatchTable element.
+ unsigned NumElements;
+
+public:
+ /// A bitfield of RecordFlagsBits flags.
+ unsigned Flags;
+
+ /// The actual run-time value, if known
+ int64_t RawValue;
+
+ MatchTableRecord(std::optional<unsigned> LabelID_, StringRef EmitStr,
+ unsigned NumElements, unsigned Flags,
+ int64_t RawValue = std::numeric_limits<int64_t>::min())
+ : LabelID(LabelID_.value_or(~0u)), EmitStr(EmitStr),
+ NumElements(NumElements), Flags(Flags), RawValue(RawValue) {
+ assert((!LabelID_ || LabelID != ~0u) &&
+ "This value is reserved for non-labels");
+ }
+ MatchTableRecord(const MatchTableRecord &Other) = default;
+ MatchTableRecord(MatchTableRecord &&Other) = default;
+
+ /// Useful if a Match Table Record gets optimized out
+ void turnIntoComment() {
+ Flags |= MTRF_Comment;
+ Flags &= ~MTRF_CommaFollows;
+ NumElements = 0;
+ }
+
+ /// For Jump Table generation purposes
+ bool operator<(const MatchTableRecord &Other) const {
+ return RawValue < Other.RawValue;
+ }
+ int64_t getRawValue() const { return RawValue; }
+
+ void emit(raw_ostream &OS, bool LineBreakNextAfterThis,
+ const MatchTable &Table) const;
+ unsigned size() const { return NumElements; }
+};
+
+class Matcher;
+
+/// Holds the contents of a generated MatchTable to enable formatting and the
+/// necessary index tracking needed to support GIM_Try.
+class MatchTable {
+ /// An unique identifier for the table. The generated table will be named
+ /// MatchTable${ID}.
+ unsigned ID;
+ /// The records that make up the table. Also includes comments describing the
+ /// values being emitted and line breaks to format it.
+ std::vector<MatchTableRecord> Contents;
+ /// The currently defined labels.
+ DenseMap<unsigned, unsigned> LabelMap;
+ /// Tracks the sum of MatchTableRecord::NumElements as the table is built.
+ unsigned CurrentSize = 0;
+ /// A unique identifier for a MatchTable label.
+ unsigned CurrentLabelID = 0;
+ /// Determines if the table should be instrumented for rule coverage tracking.
+ bool IsWithCoverage;
+
+public:
+ static MatchTableRecord LineBreak;
+ static MatchTableRecord Comment(StringRef Comment) {
+ return MatchTableRecord(std::nullopt, Comment, 0,
+ MatchTableRecord::MTRF_Comment);
+ }
+ static MatchTableRecord Opcode(StringRef Opcode, int IndentAdjust = 0) {
+ unsigned ExtraFlags = 0;
+ if (IndentAdjust > 0)
+ ExtraFlags |= MatchTableRecord::MTRF_Indent;
+ if (IndentAdjust < 0)
+ ExtraFlags |= MatchTableRecord::MTRF_Outdent;
+
+ return MatchTableRecord(std::nullopt, Opcode, 1,
+ MatchTableRecord::MTRF_CommaFollows | ExtraFlags);
+ }
+ static MatchTableRecord NamedValue(StringRef NamedValue) {
+ return MatchTableRecord(std::nullopt, NamedValue, 1,
+ MatchTableRecord::MTRF_CommaFollows);
+ }
+ static MatchTableRecord NamedValue(StringRef NamedValue, int64_t RawValue) {
+ return MatchTableRecord(std::nullopt, NamedValue, 1,
+ MatchTableRecord::MTRF_CommaFollows, RawValue);
+ }
+ static MatchTableRecord NamedValue(StringRef Namespace,
+ StringRef NamedValue) {
+ return MatchTableRecord(std::nullopt, (Namespace + "::" + NamedValue).str(),
+ 1, MatchTableRecord::MTRF_CommaFollows);
+ }
+ static MatchTableRecord NamedValue(StringRef Namespace, StringRef NamedValue,
+ int64_t RawValue) {
+ return MatchTableRecord(std::nullopt, (Namespace + "::" + NamedValue).str(),
+ 1, MatchTableRecord::MTRF_CommaFollows, RawValue);
+ }
+ static MatchTableRecord IntValue(int64_t IntValue) {
+ return MatchTableRecord(std::nullopt, llvm::to_string(IntValue), 1,
+ MatchTableRecord::MTRF_CommaFollows);
+ }
+ static MatchTableRecord Label(unsigned LabelID) {
+ return MatchTableRecord(LabelID, "Label " + llvm::to_string(LabelID), 0,
+ MatchTableRecord::MTRF_Label |
+ MatchTableRecord::MTRF_Comment |
+ MatchTableRecord::MTRF_LineBreakFollows);
+ }
+ static MatchTableRecord JumpTarget(unsigned LabelID) {
+ return MatchTableRecord(LabelID, "Label " + llvm::to_string(LabelID), 1,
+ MatchTableRecord::MTRF_JumpTarget |
+ MatchTableRecord::MTRF_Comment |
+ MatchTableRecord::MTRF_CommaFollows);
+ }
+
+ static MatchTable buildTable(ArrayRef<Matcher *> Rules, bool WithCoverage);
+
+ MatchTable(bool WithCoverage, unsigned ID = 0)
+ : ID(ID), IsWithCoverage(WithCoverage) {}
+
+ bool isWithCoverage() const { return IsWithCoverage; }
+
+ void push_back(const MatchTableRecord &Value) {
+ if (Value.Flags & MatchTableRecord::MTRF_Label)
+ defineLabel(Value.LabelID);
+ Contents.push_back(Value);
+ CurrentSize += Value.size();
+ }
+
+ unsigned allocateLabelID() { return CurrentLabelID++; }
+
+ void defineLabel(unsigned LabelID) {
+ LabelMap.insert(std::make_pair(LabelID, CurrentSize));
+ }
+
+ unsigned getLabelIndex(unsigned LabelID) const {
+ const auto I = LabelMap.find(LabelID);
+ assert(I != LabelMap.end() && "Use of undeclared label");
+ return I->second;
+ }
+
+ void emitUse(raw_ostream &OS) const { OS << "MatchTable" << ID; }
+
+ void emitDeclaration(raw_ostream &OS) const {
+ unsigned Indentation = 4;
+ OS << " constexpr static int64_t MatchTable" << ID << "[] = {";
+ LineBreak.emit(OS, true, *this);
+ OS << std::string(Indentation, ' ');
+
+ for (auto I = Contents.begin(), E = Contents.end(); I != E;
+ ++I) {
+ bool LineBreakIsNext = false;
+ const auto &NextI = std::next(I);
+
+ if (NextI != E) {
+ if (NextI->EmitStr == "" &&
+ NextI->Flags == MatchTableRecord::MTRF_LineBreakFollows)
+ LineBreakIsNext = true;
+ }
+
+ if (I->Flags & MatchTableRecord::MTRF_Indent)
+ Indentation += 2;
+
+ I->emit(OS, LineBreakIsNext, *this);
+ if (I->Flags & MatchTableRecord::MTRF_LineBreakFollows)
+ OS << std::string(Indentation, ' ');
+
+ if (I->Flags & MatchTableRecord::MTRF_Outdent)
+ Indentation -= 2;
+ }
+ OS << "};\n";
+ }
+};
+
+MatchTableRecord MatchTable::LineBreak = {
+ std::nullopt, "" /* Emit String */, 0 /* Elements */,
+ MatchTableRecord::MTRF_LineBreakFollows};
+
+void MatchTableRecord::emit(raw_ostream &OS, bool LineBreakIsNextAfterThis,
+ const MatchTable &Table) const {
+ bool UseLineComment =
+ LineBreakIsNextAfterThis || (Flags & MTRF_LineBreakFollows);
+ if (Flags & (MTRF_JumpTarget | MTRF_CommaFollows))
+ UseLineComment = false;
+
+ if (Flags & MTRF_Comment)
+ OS << (UseLineComment ? "// " : "/*");
+
+ OS << EmitStr;
+ if (Flags & MTRF_Label)
+ OS << ": @" << Table.getLabelIndex(LabelID);
+
+ if ((Flags & MTRF_Comment) && !UseLineComment)
+ OS << "*/";
+
+ if (Flags & MTRF_JumpTarget) {
+ if (Flags & MTRF_Comment)
+ OS << " ";
+ OS << Table.getLabelIndex(LabelID);
+ }
+
+ if (Flags & MTRF_CommaFollows) {
+ OS << ",";
+ if (!LineBreakIsNextAfterThis && !(Flags & MTRF_LineBreakFollows))
+ OS << " ";
+ }
+
+ if (Flags & MTRF_LineBreakFollows)
+ OS << "\n";
+}
+
+MatchTable &operator<<(MatchTable &Table, const MatchTableRecord &Value) {
+ Table.push_back(Value);
+ return Table;
+}
+
+//===- Matchers -----------------------------------------------------------===//
+
+class OperandMatcher;
+class MatchAction;
+class PredicateMatcher;
+
+class Matcher {
+public:
+ virtual ~Matcher() = default;
+ virtual void optimize() {}
+ virtual void emit(MatchTable &Table) = 0;
+
+ virtual bool hasFirstCondition() const = 0;
+ virtual const PredicateMatcher &getFirstCondition() const = 0;
+ virtual std::unique_ptr<PredicateMatcher> popFirstCondition() = 0;
+};
+
+MatchTable MatchTable::buildTable(ArrayRef<Matcher *> Rules,
+ bool WithCoverage) {
+ MatchTable Table(WithCoverage);
+ for (Matcher *Rule : Rules)
+ Rule->emit(Table);
+
+ return Table << MatchTable::Opcode("GIM_Reject") << MatchTable::LineBreak;
+}
+
+class GroupMatcher final : public Matcher {
+ /// Conditions that form a common prefix of all the matchers contained.
+ SmallVector<std::unique_ptr<PredicateMatcher>, 1> Conditions;
+
+ /// All the nested matchers, sharing a common prefix.
+ std::vector<Matcher *> Matchers;
+
+ /// An owning collection for any auxiliary matchers created while optimizing
+ /// nested matchers contained.
+ std::vector<std::unique_ptr<Matcher>> MatcherStorage;
+
+public:
+ /// Add a matcher to the collection of nested matchers if it meets the
+ /// requirements, and return true. If it doesn't, do nothing and return false.
+ ///
+ /// Expected to preserve its argument, so it could be moved out later on.
+ bool addMatcher(Matcher &Candidate);
+
+ /// Mark the matcher as fully-built and ensure any invariants expected by both
+ /// optimize() and emit(...) methods. Generally, both sequences of calls
+ /// are expected to lead to a sensible result:
+ ///
+ /// addMatcher(...)*; finalize(); optimize(); emit(...); and
+ /// addMatcher(...)*; finalize(); emit(...);
+ ///
+ /// or generally
+ ///
+ /// addMatcher(...)*; finalize(); { optimize()*; emit(...); }*
+ ///
+ /// Multiple calls to optimize() are expected to be handled gracefully, though
+ /// optimize() is not expected to be idempotent. Multiple calls to finalize()
+ /// aren't generally supported. emit(...) is expected to be non-mutating and
+ /// producing the exact same results upon repeated calls.
+ ///
+ /// addMatcher() calls after the finalize() call are not supported.
+ ///
+ /// finalize() and optimize() are both allowed to mutate the contained
+ /// matchers, so moving them out after finalize() is not supported.
+ void finalize();
+ void optimize() override;
+ void emit(MatchTable &Table) override;
+
+ /// Could be used to move out the matchers added previously, unless finalize()
+ /// has been already called. If any of the matchers are moved out, the group
+ /// becomes safe to destroy, but not safe to re-use for anything else.
+ iterator_range<std::vector<Matcher *>::iterator> matchers() {
+ return make_range(Matchers.begin(), Matchers.end());
+ }
+ size_t size() const { return Matchers.size(); }
+ bool empty() const { return Matchers.empty(); }
+
+ std::unique_ptr<PredicateMatcher> popFirstCondition() override {
+ assert(!Conditions.empty() &&
+ "Trying to pop a condition from a condition-less group");
+ std::unique_ptr<PredicateMatcher> P = std::move(Conditions.front());
+ Conditions.erase(Conditions.begin());
+ return P;
+ }
+ const PredicateMatcher &getFirstCondition() const override {
+ assert(!Conditions.empty() &&
+ "Trying to get a condition from a condition-less group");
+ return *Conditions.front();
+ }
+ bool hasFirstCondition() const override { return !Conditions.empty(); }
+
+private:
+ /// See if a candidate matcher could be added to this group solely by
+ /// analyzing its first condition.
+ bool candidateConditionMatches(const PredicateMatcher &Predicate) const;
+};
+
+class SwitchMatcher : public Matcher {
+ /// All the nested matchers, representing distinct switch-cases. The first
+ /// conditions (as Matcher::getFirstCondition() reports) of all the nested
+ /// matchers must share the same type and path to a value they check, in other
+ /// words, be isIdenticalDownToValue, but have different values they check
+ /// against.
+ std::vector<Matcher *> Matchers;
+
+ /// The representative condition, with a type and a path (InsnVarID and OpIdx
+ /// in most cases) shared by all the matchers contained.
+ std::unique_ptr<PredicateMatcher> Condition = nullptr;
+
+ /// Temporary set used to check that the case values don't repeat within the
+ /// same switch.
+ std::set<MatchTableRecord> Values;
+
+ /// An owning collection for any auxiliary matchers created while optimizing
+ /// nested matchers contained.
+ std::vector<std::unique_ptr<Matcher>> MatcherStorage;
+
+public:
+ bool addMatcher(Matcher &Candidate);
+
+ void finalize();
+ void emit(MatchTable &Table) override;
+
+ iterator_range<std::vector<Matcher *>::iterator> matchers() {
+ return make_range(Matchers.begin(), Matchers.end());
+ }
+ size_t size() const { return Matchers.size(); }
+ bool empty() const { return Matchers.empty(); }
+
+ std::unique_ptr<PredicateMatcher> popFirstCondition() override {
+ // SwitchMatcher doesn't have a common first condition for its cases, as all
+ // the cases only share a kind of a value (a type and a path to it) they
+ // match, but deliberately differ in the actual value they match.
+ llvm_unreachable("Trying to pop a condition from a condition-less group");
+ }
+ const PredicateMatcher &getFirstCondition() const override {
+ llvm_unreachable("Trying to pop a condition from a condition-less group");
+ }
+ bool hasFirstCondition() const override { return false; }
+
+private:
+ /// See if the predicate type has a Switch-implementation for it.
+ static bool isSupportedPredicateType(const PredicateMatcher &Predicate);
+
+ bool candidateConditionMatches(const PredicateMatcher &Predicate) const;
+
+ /// emit()-helper
+ static void emitPredicateSpecificOpcodes(const PredicateMatcher &P,
+ MatchTable &Table);
+};
+
+/// Generates code to check that a match rule matches.
+class RuleMatcher : public Matcher {
+public:
+ using ActionList = std::list<std::unique_ptr<MatchAction>>;
+ using action_iterator = ActionList::iterator;
+
+protected:
+ /// A list of matchers that all need to succeed for the current rule to match.
+ /// FIXME: This currently supports a single match position but could be
+ /// extended to support multiple positions to support div/rem fusion or
+ /// load-multiple instructions.
+ using MatchersTy = std::vector<std::unique_ptr<InstructionMatcher>> ;
+ MatchersTy Matchers;
+
+ /// A list of actions that need to be taken when all predicates in this rule
+ /// have succeeded.
+ ActionList Actions;
+
+ using DefinedInsnVariablesMap = std::map<InstructionMatcher *, unsigned>;
+
+ /// A map of instruction matchers to the local variables
+ DefinedInsnVariablesMap InsnVariableIDs;
+
+ using MutatableInsnSet = SmallPtrSet<InstructionMatcher *, 4>;
+
+ // The set of instruction matchers that have not yet been claimed for mutation
+ // by a BuildMI.
+ MutatableInsnSet MutatableInsns;
+
+ /// A map of named operands defined by the matchers that may be referenced by
+ /// the renderers.
+ StringMap<OperandMatcher *> DefinedOperands;
+
+ /// A map of anonymous physical register operands defined by the matchers that
+ /// may be referenced by the renderers.
+ DenseMap<Record *, OperandMatcher *> PhysRegOperands;
+
+ /// ID for the next instruction variable defined with implicitlyDefineInsnVar()
+ unsigned NextInsnVarID;
+
+ /// ID for the next output instruction allocated with allocateOutputInsnID()
+ unsigned NextOutputInsnID;
+
+ /// ID for the next temporary register ID allocated with allocateTempRegID()
+ unsigned NextTempRegID;
+
+ std::vector<Record *> RequiredFeatures;
+ std::vector<std::unique_ptr<PredicateMatcher>> EpilogueMatchers;
+
+ ArrayRef<SMLoc> SrcLoc;
+
+ typedef std::tuple<Record *, unsigned, unsigned>
+ DefinedComplexPatternSubOperand;
+ typedef StringMap<DefinedComplexPatternSubOperand>
+ DefinedComplexPatternSubOperandMap;
+ /// A map of Symbolic Names to ComplexPattern sub-operands.
+ DefinedComplexPatternSubOperandMap ComplexSubOperands;
+ /// A map used to for multiple referenced error check of ComplexSubOperand.
+ /// ComplexSubOperand can't be referenced multiple from different operands,
+ /// however multiple references from same operand are allowed since that is
+ /// how 'same operand checks' are generated.
+ StringMap<std::string> ComplexSubOperandsParentName;
+
+ uint64_t RuleID;
+ static uint64_t NextRuleID;
+
+public:
+ RuleMatcher(ArrayRef<SMLoc> SrcLoc)
+ : NextInsnVarID(0), NextOutputInsnID(0), NextTempRegID(0), SrcLoc(SrcLoc),
+ RuleID(NextRuleID++) {}
+ RuleMatcher(RuleMatcher &&Other) = default;
+ RuleMatcher &operator=(RuleMatcher &&Other) = default;
+
+ uint64_t getRuleID() const { return RuleID; }
+
+ InstructionMatcher &addInstructionMatcher(StringRef SymbolicName);
+ void addRequiredFeature(Record *Feature);
+ const std::vector<Record *> &getRequiredFeatures() const;
+
+ template <class Kind, class... Args> Kind &addAction(Args &&... args);
+ template <class Kind, class... Args>
+ action_iterator insertAction(action_iterator InsertPt, Args &&... args);
+
+ /// Define an instruction without emitting any code to do so.
+ unsigned implicitlyDefineInsnVar(InstructionMatcher &Matcher);
+
+ unsigned getInsnVarID(InstructionMatcher &InsnMatcher) const;
+ DefinedInsnVariablesMap::const_iterator defined_insn_vars_begin() const {
+ return InsnVariableIDs.begin();
+ }
+ DefinedInsnVariablesMap::const_iterator defined_insn_vars_end() const {
+ return InsnVariableIDs.end();
+ }
+ iterator_range<typename DefinedInsnVariablesMap::const_iterator>
+ defined_insn_vars() const {
+ return make_range(defined_insn_vars_begin(), defined_insn_vars_end());
+ }
+
+ MutatableInsnSet::const_iterator mutatable_insns_begin() const {
+ return MutatableInsns.begin();
+ }
+ MutatableInsnSet::const_iterator mutatable_insns_end() const {
+ return MutatableInsns.end();
+ }
+ iterator_range<typename MutatableInsnSet::const_iterator>
+ mutatable_insns() const {
+ return make_range(mutatable_insns_begin(), mutatable_insns_end());
+ }
+ void reserveInsnMatcherForMutation(InstructionMatcher *InsnMatcher) {
+ bool R = MutatableInsns.erase(InsnMatcher);
+ assert(R && "Reserving a mutatable insn that isn't available");
+ (void)R;
+ }
+
+ action_iterator actions_begin() { return Actions.begin(); }
+ action_iterator actions_end() { return Actions.end(); }
+ iterator_range<action_iterator> actions() {
+ return make_range(actions_begin(), actions_end());
+ }
+
+ void defineOperand(StringRef SymbolicName, OperandMatcher &OM);
+
+ void definePhysRegOperand(Record *Reg, OperandMatcher &OM);
+
+ Error defineComplexSubOperand(StringRef SymbolicName, Record *ComplexPattern,
+ unsigned RendererID, unsigned SubOperandID,
+ StringRef ParentSymbolicName) {
+ std::string ParentName(ParentSymbolicName);
+ if (ComplexSubOperands.count(SymbolicName)) {
+ const std::string &RecordedParentName =
+ ComplexSubOperandsParentName[SymbolicName];
+ if (RecordedParentName != ParentName)
+ return failedImport("Error: Complex suboperand " + SymbolicName +
+ " referenced by different operands: " +
+ RecordedParentName + " and " + ParentName + ".");
+ // Complex suboperand referenced more than once from same the operand is
+ // used to generate 'same operand check'. Emitting of
+ // GIR_ComplexSubOperandRenderer for them is already handled.
+ return Error::success();
+ }
+
+ ComplexSubOperands[SymbolicName] =
+ std::make_tuple(ComplexPattern, RendererID, SubOperandID);
+ ComplexSubOperandsParentName[SymbolicName] = ParentName;
+
+ return Error::success();
+ }
+
+ std::optional<DefinedComplexPatternSubOperand>
+ getComplexSubOperand(StringRef SymbolicName) const {
+ const auto &I = ComplexSubOperands.find(SymbolicName);
+ if (I == ComplexSubOperands.end())
+ return std::nullopt;
+ return I->second;
+ }
+
+ InstructionMatcher &getInstructionMatcher(StringRef SymbolicName) const;
+ const OperandMatcher &getOperandMatcher(StringRef Name) const;
+ const OperandMatcher &getPhysRegOperandMatcher(Record *) const;
+
+ void optimize() override;
+ void emit(MatchTable &Table) override;
+
+ /// Compare the priority of this object and B.
+ ///
+ /// Returns true if this object is more important than B.
+ bool isHigherPriorityThan(const RuleMatcher &B) const;
+
+ /// Report the maximum number of temporary operands needed by the rule
+ /// matcher.
+ unsigned countRendererFns() const;
+
+ std::unique_ptr<PredicateMatcher> popFirstCondition() override;
+ const PredicateMatcher &getFirstCondition() const override;
+ LLTCodeGen getFirstConditionAsRootType();
+ bool hasFirstCondition() const override;
+ unsigned getNumOperands() const;
+ StringRef getOpcode() const;
+
+ // FIXME: Remove this as soon as possible
+ InstructionMatcher &insnmatchers_front() const { return *Matchers.front(); }
+
+ unsigned allocateOutputInsnID() { return NextOutputInsnID++; }
+ unsigned allocateTempRegID() { return NextTempRegID++; }
+
+ iterator_range<MatchersTy::iterator> insnmatchers() {
+ return make_range(Matchers.begin(), Matchers.end());
+ }
+ bool insnmatchers_empty() const { return Matchers.empty(); }
+ void insnmatchers_pop_front() { Matchers.erase(Matchers.begin()); }
+};
+
+uint64_t RuleMatcher::NextRuleID = 0;
+
+using action_iterator = RuleMatcher::action_iterator;
+
+template <class PredicateTy> class PredicateListMatcher {
+private:
+ /// Template instantiations should specialize this to return a string to use
+ /// for the comment emitted when there are no predicates.
+ std::string getNoPredicateComment() const;
+
+protected:
+ using PredicatesTy = std::deque<std::unique_ptr<PredicateTy>>;
+ PredicatesTy Predicates;
+
+ /// Track if the list of predicates was manipulated by one of the optimization
+ /// methods.
+ bool Optimized = false;
+
+public:
+ typename PredicatesTy::iterator predicates_begin() {
+ return Predicates.begin();
+ }
+ typename PredicatesTy::iterator predicates_end() {
+ return Predicates.end();
+ }
+ iterator_range<typename PredicatesTy::iterator> predicates() {
+ return make_range(predicates_begin(), predicates_end());
+ }
+ typename PredicatesTy::size_type predicates_size() const {
+ return Predicates.size();
+ }
+ bool predicates_empty() const { return Predicates.empty(); }
+
+ std::unique_ptr<PredicateTy> predicates_pop_front() {
+ std::unique_ptr<PredicateTy> Front = std::move(Predicates.front());
+ Predicates.pop_front();
+ Optimized = true;
+ return Front;
+ }
+
+ void prependPredicate(std::unique_ptr<PredicateTy> &&Predicate) {
+ Predicates.push_front(std::move(Predicate));
+ }
+
+ void eraseNullPredicates() {
+ const auto NewEnd =
+ std::stable_partition(Predicates.begin(), Predicates.end(),
+ std::logical_not<std::unique_ptr<PredicateTy>>());
+ if (NewEnd != Predicates.begin()) {
+ Predicates.erase(Predicates.begin(), NewEnd);
+ Optimized = true;
+ }
+ }
+
+ /// Emit MatchTable opcodes that tests whether all the predicates are met.
+ template <class... Args>
+ void emitPredicateListOpcodes(MatchTable &Table, Args &&... args) {
+ if (Predicates.empty() && !Optimized) {
+ Table << MatchTable::Comment(getNoPredicateComment())
+ << MatchTable::LineBreak;
+ return;
+ }
+
+ for (const auto &Predicate : predicates())
+ Predicate->emitPredicateOpcodes(Table, std::forward<Args>(args)...);
+ }
+
+ /// Provide a function to avoid emitting certain predicates. This is used to
+ /// defer some predicate checks until after others
+ using PredicateFilterFunc = std::function<bool(const PredicateTy&)>;
+
+ /// Emit MatchTable opcodes for predicates which satisfy \p
+ /// ShouldEmitPredicate. This should be called multiple times to ensure all
+ /// predicates are eventually added to the match table.
+ template <class... Args>
+ void emitFilteredPredicateListOpcodes(PredicateFilterFunc ShouldEmitPredicate,
+ MatchTable &Table, Args &&... args) {
+ if (Predicates.empty() && !Optimized) {
+ Table << MatchTable::Comment(getNoPredicateComment())
+ << MatchTable::LineBreak;
+ return;
+ }
+
+ for (const auto &Predicate : predicates()) {
+ if (ShouldEmitPredicate(*Predicate))
+ Predicate->emitPredicateOpcodes(Table, std::forward<Args>(args)...);
+ }
+ }
+};
+
+class PredicateMatcher {
+public:
+ /// This enum is used for RTTI and also defines the priority that is given to
+ /// the predicate when generating the matcher code. Kinds with higher priority
+ /// must be tested first.
+ ///
+ /// The relative priority of OPM_LLT, OPM_RegBank, and OPM_MBB do not matter
+ /// but OPM_Int must have priority over OPM_RegBank since constant integers
+ /// are represented by a virtual register defined by a G_CONSTANT instruction.
+ ///
+ /// Note: The relative priority between IPM_ and OPM_ does not matter, they
+ /// are currently not compared between each other.
+ enum PredicateKind {
+ IPM_Opcode,
+ IPM_NumOperands,
+ IPM_ImmPredicate,
+ IPM_Imm,
+ IPM_AtomicOrderingMMO,
+ IPM_MemoryLLTSize,
+ IPM_MemoryVsLLTSize,
+ IPM_MemoryAddressSpace,
+ IPM_MemoryAlignment,
+ IPM_VectorSplatImm,
+ IPM_NoUse,
+ IPM_GenericPredicate,
+ OPM_SameOperand,
+ OPM_ComplexPattern,
+ OPM_IntrinsicID,
+ OPM_CmpPredicate,
+ OPM_Instruction,
+ OPM_Int,
+ OPM_LiteralInt,
+ OPM_LLT,
+ OPM_PointerToAny,
+ OPM_RegBank,
+ OPM_MBB,
+ OPM_RecordNamedOperand,
+ };
+
+protected:
+ PredicateKind Kind;
+ unsigned InsnVarID;
+ unsigned OpIdx;
+
+public:
+ PredicateMatcher(PredicateKind Kind, unsigned InsnVarID, unsigned OpIdx = ~0)
+ : Kind(Kind), InsnVarID(InsnVarID), OpIdx(OpIdx) {}
+
+ unsigned getInsnVarID() const { return InsnVarID; }
+ unsigned getOpIdx() const { return OpIdx; }
+
+ virtual ~PredicateMatcher() = default;
+ /// Emit MatchTable opcodes that check the predicate for the given operand.
+ virtual void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const = 0;
+
+ PredicateKind getKind() const { return Kind; }
+
+ bool dependsOnOperands() const {
+ // Custom predicates really depend on the context pattern of the
+ // instruction, not just the individual instruction. This therefore
+ // implicitly depends on all other pattern constraints.
+ return Kind == IPM_GenericPredicate;
+ }
+
+ virtual bool isIdentical(const PredicateMatcher &B) const {
+ return B.getKind() == getKind() && InsnVarID == B.InsnVarID &&
+ OpIdx == B.OpIdx;
+ }
+
+ virtual bool isIdenticalDownToValue(const PredicateMatcher &B) const {
+ return hasValue() && PredicateMatcher::isIdentical(B);
+ }
+
+ virtual MatchTableRecord getValue() const {
+ assert(hasValue() && "Can not get a value of a value-less predicate!");
+ llvm_unreachable("Not implemented yet");
+ }
+ virtual bool hasValue() const { return false; }
+
+ /// Report the maximum number of temporary operands needed by the predicate
+ /// matcher.
+ virtual unsigned countRendererFns() const { return 0; }
+};
+
+/// Generates code to check a predicate of an operand.
+///
+/// Typical predicates include:
+/// * Operand is a particular register.
+/// * Operand is assigned a particular register bank.
+/// * Operand is an MBB.
+class OperandPredicateMatcher : public PredicateMatcher {
+public:
+ OperandPredicateMatcher(PredicateKind Kind, unsigned InsnVarID,
+ unsigned OpIdx)
+ : PredicateMatcher(Kind, InsnVarID, OpIdx) {}
+ virtual ~OperandPredicateMatcher() {}
+
+ /// Compare the priority of this object and B.
+ ///
+ /// Returns true if this object is more important than B.
+ virtual bool isHigherPriorityThan(const OperandPredicateMatcher &B) const;
+};
+
+template <>
+std::string
+PredicateListMatcher<OperandPredicateMatcher>::getNoPredicateComment() const {
+ return "No operand predicates";
+}
+
+/// Generates code to check that a register operand is defined by the same exact
+/// one as another.
+class SameOperandMatcher : public OperandPredicateMatcher {
+ std::string MatchingName;
+ unsigned OrigOpIdx;
+
+public:
+ SameOperandMatcher(unsigned InsnVarID, unsigned OpIdx, StringRef MatchingName,
+ unsigned OrigOpIdx)
+ : OperandPredicateMatcher(OPM_SameOperand, InsnVarID, OpIdx),
+ MatchingName(MatchingName), OrigOpIdx(OrigOpIdx) {}
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == OPM_SameOperand;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override;
+
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return OperandPredicateMatcher::isIdentical(B) &&
+ OrigOpIdx == cast<SameOperandMatcher>(&B)->OrigOpIdx &&
+ MatchingName == cast<SameOperandMatcher>(&B)->MatchingName;
+ }
+};
+
+/// Generates code to check that an operand is a particular LLT.
+class LLTOperandMatcher : public OperandPredicateMatcher {
+protected:
+ LLTCodeGen Ty;
+
+public:
+ static std::map<LLTCodeGen, unsigned> TypeIDValues;
+
+ static void initTypeIDValuesMap() {
+ TypeIDValues.clear();
+
+ unsigned ID = 0;
+ for (const LLTCodeGen &LLTy : KnownTypes)
+ TypeIDValues[LLTy] = ID++;
+ }
+
+ LLTOperandMatcher(unsigned InsnVarID, unsigned OpIdx, const LLTCodeGen &Ty)
+ : OperandPredicateMatcher(OPM_LLT, InsnVarID, OpIdx), Ty(Ty) {
+ KnownTypes.insert(Ty);
+ }
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == OPM_LLT;
+ }
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return OperandPredicateMatcher::isIdentical(B) &&
+ Ty == cast<LLTOperandMatcher>(&B)->Ty;
+ }
+ MatchTableRecord getValue() const override {
+ const auto VI = TypeIDValues.find(Ty);
+ if (VI == TypeIDValues.end())
+ return MatchTable::NamedValue(getTy().getCxxEnumValue());
+ return MatchTable::NamedValue(getTy().getCxxEnumValue(), VI->second);
+ }
+ bool hasValue() const override {
+ if (TypeIDValues.size() != KnownTypes.size())
+ initTypeIDValuesMap();
+ return TypeIDValues.count(Ty);
+ }
+
+ LLTCodeGen getTy() const { return Ty; }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIM_CheckType") << MatchTable::Comment("MI")
+ << MatchTable::IntValue(InsnVarID) << MatchTable::Comment("Op")
+ << MatchTable::IntValue(OpIdx) << MatchTable::Comment("Type")
+ << getValue() << MatchTable::LineBreak;
+ }
+};
+
+std::map<LLTCodeGen, unsigned> LLTOperandMatcher::TypeIDValues;
+
+/// Generates code to check that an operand is a pointer to any address space.
+///
+/// In SelectionDAG, the types did not describe pointers or address spaces. As a
+/// result, iN is used to describe a pointer of N bits to any address space and
+/// PatFrag predicates are typically used to constrain the address space. There's
+/// no reliable means to derive the missing type information from the pattern so
+/// imported rules must test the components of a pointer separately.
+///
+/// If SizeInBits is zero, then the pointer size will be obtained from the
+/// subtarget.
+class PointerToAnyOperandMatcher : public OperandPredicateMatcher {
+protected:
+ unsigned SizeInBits;
+
+public:
+ PointerToAnyOperandMatcher(unsigned InsnVarID, unsigned OpIdx,
+ unsigned SizeInBits)
+ : OperandPredicateMatcher(OPM_PointerToAny, InsnVarID, OpIdx),
+ SizeInBits(SizeInBits) {}
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == OPM_PointerToAny;
+ }
+
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return OperandPredicateMatcher::isIdentical(B) &&
+ SizeInBits == cast<PointerToAnyOperandMatcher>(&B)->SizeInBits;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIM_CheckPointerToAny")
+ << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::Comment("Op") << MatchTable::IntValue(OpIdx)
+ << MatchTable::Comment("SizeInBits")
+ << MatchTable::IntValue(SizeInBits) << MatchTable::LineBreak;
+ }
+};
+
+/// Generates code to record named operand in RecordedOperands list at StoreIdx.
+/// Predicates with 'let PredicateCodeUsesOperands = 1' get RecordedOperands as
+/// an argument to predicate's c++ code once all operands have been matched.
+class RecordNamedOperandMatcher : public OperandPredicateMatcher {
+protected:
+ unsigned StoreIdx;
+ std::string Name;
+
+public:
+ RecordNamedOperandMatcher(unsigned InsnVarID, unsigned OpIdx,
+ unsigned StoreIdx, StringRef Name)
+ : OperandPredicateMatcher(OPM_RecordNamedOperand, InsnVarID, OpIdx),
+ StoreIdx(StoreIdx), Name(Name) {}
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == OPM_RecordNamedOperand;
+ }
+
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return OperandPredicateMatcher::isIdentical(B) &&
+ StoreIdx == cast<RecordNamedOperandMatcher>(&B)->StoreIdx &&
+ Name == cast<RecordNamedOperandMatcher>(&B)->Name;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIM_RecordNamedOperand")
+ << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::Comment("Op") << MatchTable::IntValue(OpIdx)
+ << MatchTable::Comment("StoreIdx") << MatchTable::IntValue(StoreIdx)
+ << MatchTable::Comment("Name : " + Name) << MatchTable::LineBreak;
+ }
+};
+
+/// Generates code to check that an operand is a particular target constant.
+class ComplexPatternOperandMatcher : public OperandPredicateMatcher {
+protected:
+ const OperandMatcher &Operand;
+ const Record &TheDef;
+
+ unsigned getAllocatedTemporariesBaseID() const;
+
+public:
+ bool isIdentical(const PredicateMatcher &B) const override { return false; }
+
+ ComplexPatternOperandMatcher(unsigned InsnVarID, unsigned OpIdx,
+ const OperandMatcher &Operand,
+ const Record &TheDef)
+ : OperandPredicateMatcher(OPM_ComplexPattern, InsnVarID, OpIdx),
+ Operand(Operand), TheDef(TheDef) {}
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == OPM_ComplexPattern;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ unsigned ID = getAllocatedTemporariesBaseID();
+ Table << MatchTable::Opcode("GIM_CheckComplexPattern")
+ << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::Comment("Op") << MatchTable::IntValue(OpIdx)
+ << MatchTable::Comment("Renderer") << MatchTable::IntValue(ID)
+ << MatchTable::NamedValue(("GICP_" + TheDef.getName()).str())
+ << MatchTable::LineBreak;
+ }
+
+ unsigned countRendererFns() const override {
+ return 1;
+ }
+};
+
+/// Generates code to check that an operand is in a particular register bank.
+class RegisterBankOperandMatcher : public OperandPredicateMatcher {
+protected:
+ const CodeGenRegisterClass &RC;
+
+public:
+ RegisterBankOperandMatcher(unsigned InsnVarID, unsigned OpIdx,
+ const CodeGenRegisterClass &RC)
+ : OperandPredicateMatcher(OPM_RegBank, InsnVarID, OpIdx), RC(RC) {}
+
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return OperandPredicateMatcher::isIdentical(B) &&
+ RC.getDef() == cast<RegisterBankOperandMatcher>(&B)->RC.getDef();
+ }
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == OPM_RegBank;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIM_CheckRegBankForClass")
+ << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::Comment("Op") << MatchTable::IntValue(OpIdx)
+ << MatchTable::Comment("RC")
+ << MatchTable::NamedValue(RC.getQualifiedName() + "RegClassID")
+ << MatchTable::LineBreak;
+ }
+};
+
+/// Generates code to check that an operand is a basic block.
+class MBBOperandMatcher : public OperandPredicateMatcher {
+public:
+ MBBOperandMatcher(unsigned InsnVarID, unsigned OpIdx)
+ : OperandPredicateMatcher(OPM_MBB, InsnVarID, OpIdx) {}
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == OPM_MBB;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIM_CheckIsMBB") << MatchTable::Comment("MI")
+ << MatchTable::IntValue(InsnVarID) << MatchTable::Comment("Op")
+ << MatchTable::IntValue(OpIdx) << MatchTable::LineBreak;
+ }
+};
+
+class ImmOperandMatcher : public OperandPredicateMatcher {
+public:
+ ImmOperandMatcher(unsigned InsnVarID, unsigned OpIdx)
+ : OperandPredicateMatcher(IPM_Imm, InsnVarID, OpIdx) {}
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == IPM_Imm;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIM_CheckIsImm") << MatchTable::Comment("MI")
+ << MatchTable::IntValue(InsnVarID) << MatchTable::Comment("Op")
+ << MatchTable::IntValue(OpIdx) << MatchTable::LineBreak;
+ }
+};
+
+/// Generates code to check that an operand is a G_CONSTANT with a particular
+/// int.
+class ConstantIntOperandMatcher : public OperandPredicateMatcher {
+protected:
+ int64_t Value;
+
+public:
+ ConstantIntOperandMatcher(unsigned InsnVarID, unsigned OpIdx, int64_t Value)
+ : OperandPredicateMatcher(OPM_Int, InsnVarID, OpIdx), Value(Value) {}
+
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return OperandPredicateMatcher::isIdentical(B) &&
+ Value == cast<ConstantIntOperandMatcher>(&B)->Value;
+ }
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == OPM_Int;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIM_CheckConstantInt")
+ << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::Comment("Op") << MatchTable::IntValue(OpIdx)
+ << MatchTable::IntValue(Value) << MatchTable::LineBreak;
+ }
+};
+
+/// Generates code to check that an operand is a raw int (where MO.isImm() or
+/// MO.isCImm() is true).
+class LiteralIntOperandMatcher : public OperandPredicateMatcher {
+protected:
+ int64_t Value;
+
+public:
+ LiteralIntOperandMatcher(unsigned InsnVarID, unsigned OpIdx, int64_t Value)
+ : OperandPredicateMatcher(OPM_LiteralInt, InsnVarID, OpIdx),
+ Value(Value) {}
+
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return OperandPredicateMatcher::isIdentical(B) &&
+ Value == cast<LiteralIntOperandMatcher>(&B)->Value;
+ }
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == OPM_LiteralInt;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIM_CheckLiteralInt")
+ << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::Comment("Op") << MatchTable::IntValue(OpIdx)
+ << MatchTable::IntValue(Value) << MatchTable::LineBreak;
+ }
+};
+
+/// Generates code to check that an operand is an CmpInst predicate
+class CmpPredicateOperandMatcher : public OperandPredicateMatcher {
+protected:
+ std::string PredName;
+
+public:
+ CmpPredicateOperandMatcher(unsigned InsnVarID, unsigned OpIdx,
+ std::string P)
+ : OperandPredicateMatcher(OPM_CmpPredicate, InsnVarID, OpIdx), PredName(P) {}
+
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return OperandPredicateMatcher::isIdentical(B) &&
+ PredName == cast<CmpPredicateOperandMatcher>(&B)->PredName;
+ }
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == OPM_CmpPredicate;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIM_CheckCmpPredicate")
+ << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::Comment("Op") << MatchTable::IntValue(OpIdx)
+ << MatchTable::Comment("Predicate")
+ << MatchTable::NamedValue("CmpInst", PredName)
+ << MatchTable::LineBreak;
+ }
+};
+
+/// Generates code to check that an operand is an intrinsic ID.
+class IntrinsicIDOperandMatcher : public OperandPredicateMatcher {
+protected:
+ const CodeGenIntrinsic *II;
+
+public:
+ IntrinsicIDOperandMatcher(unsigned InsnVarID, unsigned OpIdx,
+ const CodeGenIntrinsic *II)
+ : OperandPredicateMatcher(OPM_IntrinsicID, InsnVarID, OpIdx), II(II) {}
+
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return OperandPredicateMatcher::isIdentical(B) &&
+ II == cast<IntrinsicIDOperandMatcher>(&B)->II;
+ }
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == OPM_IntrinsicID;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIM_CheckIntrinsicID")
+ << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::Comment("Op") << MatchTable::IntValue(OpIdx)
+ << MatchTable::NamedValue("Intrinsic::" + II->EnumName)
+ << MatchTable::LineBreak;
+ }
+};
+
+/// Generates code to check that this operand is an immediate whose value meets
+/// an immediate predicate.
+class OperandImmPredicateMatcher : public OperandPredicateMatcher {
+protected:
+ TreePredicateFn Predicate;
+
+public:
+ OperandImmPredicateMatcher(unsigned InsnVarID, unsigned OpIdx,
+ const TreePredicateFn &Predicate)
+ : OperandPredicateMatcher(IPM_ImmPredicate, InsnVarID, OpIdx),
+ Predicate(Predicate) {}
+
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return OperandPredicateMatcher::isIdentical(B) &&
+ Predicate.getOrigPatFragRecord() ==
+ cast<OperandImmPredicateMatcher>(&B)
+ ->Predicate.getOrigPatFragRecord();
+ }
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == IPM_ImmPredicate;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIM_CheckImmOperandPredicate")
+ << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::Comment("MO") << MatchTable::IntValue(OpIdx)
+ << MatchTable::Comment("Predicate")
+ << MatchTable::NamedValue(getEnumNameForPredicate(Predicate))
+ << MatchTable::LineBreak;
+ }
+};
+
+/// Generates code to check that a set of predicates match for a particular
+/// operand.
+class OperandMatcher : public PredicateListMatcher<OperandPredicateMatcher> {
+protected:
+ InstructionMatcher &Insn;
+ unsigned OpIdx;
+ std::string SymbolicName;
+
+ /// The index of the first temporary variable allocated to this operand. The
+ /// number of allocated temporaries can be found with
+ /// countRendererFns().
+ unsigned AllocatedTemporariesBaseID;
+
+public:
+ OperandMatcher(InstructionMatcher &Insn, unsigned OpIdx,
+ const std::string &SymbolicName,
+ unsigned AllocatedTemporariesBaseID)
+ : Insn(Insn), OpIdx(OpIdx), SymbolicName(SymbolicName),
+ AllocatedTemporariesBaseID(AllocatedTemporariesBaseID) {}
+
+ bool hasSymbolicName() const { return !SymbolicName.empty(); }
+ StringRef getSymbolicName() const { return SymbolicName; }
+ void setSymbolicName(StringRef Name) {
+ assert(SymbolicName.empty() && "Operand already has a symbolic name");
+ SymbolicName = std::string(Name);
+ }
+
+ /// Construct a new operand predicate and add it to the matcher.
+ template <class Kind, class... Args>
+ std::optional<Kind *> addPredicate(Args &&...args) {
+ if (isSameAsAnotherOperand())
+ return std::nullopt;
+ Predicates.emplace_back(std::make_unique<Kind>(
+ getInsnVarID(), getOpIdx(), std::forward<Args>(args)...));
+ return static_cast<Kind *>(Predicates.back().get());
+ }
+
+ unsigned getOpIdx() const { return OpIdx; }
+ unsigned getInsnVarID() const;
+
+ std::string getOperandExpr(unsigned InsnVarID) const {
+ return "State.MIs[" + llvm::to_string(InsnVarID) + "]->getOperand(" +
+ llvm::to_string(OpIdx) + ")";
+ }
+
+ InstructionMatcher &getInstructionMatcher() const { return Insn; }
+
+ Error addTypeCheckPredicate(const TypeSetByHwMode &VTy,
+ bool OperandIsAPointer);
+
+ /// Emit MatchTable opcodes that test whether the instruction named in
+ /// InsnVarID matches all the predicates and all the operands.
+ void emitPredicateOpcodes(MatchTable &Table, RuleMatcher &Rule) {
+ if (!Optimized) {
+ std::string Comment;
+ raw_string_ostream CommentOS(Comment);
+ CommentOS << "MIs[" << getInsnVarID() << "] ";
+ if (SymbolicName.empty())
+ CommentOS << "Operand " << OpIdx;
+ else
+ CommentOS << SymbolicName;
+ Table << MatchTable::Comment(Comment) << MatchTable::LineBreak;
+ }
+
+ emitPredicateListOpcodes(Table, Rule);
+ }
+
+ /// Compare the priority of this object and B.
+ ///
+ /// Returns true if this object is more important than B.
+ bool isHigherPriorityThan(OperandMatcher &B) {
+ // Operand matchers involving more predicates have higher priority.
+ if (predicates_size() > B.predicates_size())
+ return true;
+ if (predicates_size() < B.predicates_size())
+ return false;
+
+ // This assumes that predicates are added in a consistent order.
+ for (auto &&Predicate : zip(predicates(), B.predicates())) {
+ if (std::get<0>(Predicate)->isHigherPriorityThan(*std::get<1>(Predicate)))
+ return true;
+ if (std::get<1>(Predicate)->isHigherPriorityThan(*std::get<0>(Predicate)))
+ return false;
+ }
+
+ return false;
+ };
+
+ /// Report the maximum number of temporary operands needed by the operand
+ /// matcher.
+ unsigned countRendererFns() {
+ return std::accumulate(
+ predicates().begin(), predicates().end(), 0,
+ [](unsigned A,
+ const std::unique_ptr<OperandPredicateMatcher> &Predicate) {
+ return A + Predicate->countRendererFns();
+ });
+ }
+
+ unsigned getAllocatedTemporariesBaseID() const {
+ return AllocatedTemporariesBaseID;
+ }
+
+ bool isSameAsAnotherOperand() {
+ for (const auto &Predicate : predicates())
+ if (isa<SameOperandMatcher>(Predicate))
+ return true;
+ return false;
+ }
+};
+
+Error OperandMatcher::addTypeCheckPredicate(const TypeSetByHwMode &VTy,
+ bool OperandIsAPointer) {
+ if (!VTy.isMachineValueType())
+ return failedImport("unsupported typeset");
+
+ if (VTy.getMachineValueType() == MVT::iPTR && OperandIsAPointer) {
+ addPredicate<PointerToAnyOperandMatcher>(0);
+ return Error::success();
+ }
+
+ auto OpTyOrNone = MVTToLLT(VTy.getMachineValueType().SimpleTy);
+ if (!OpTyOrNone)
+ return failedImport("unsupported type");
+
+ if (OperandIsAPointer)
+ addPredicate<PointerToAnyOperandMatcher>(OpTyOrNone->get().getSizeInBits());
+ else if (VTy.isPointer())
+ addPredicate<LLTOperandMatcher>(LLT::pointer(VTy.getPtrAddrSpace(),
+ OpTyOrNone->get().getSizeInBits()));
+ else
+ addPredicate<LLTOperandMatcher>(*OpTyOrNone);
+ return Error::success();
+}
+
+unsigned ComplexPatternOperandMatcher::getAllocatedTemporariesBaseID() const {
+ return Operand.getAllocatedTemporariesBaseID();
+}
+
+/// Generates code to check a predicate on an instruction.
+///
+/// Typical predicates include:
+/// * The opcode of the instruction is a particular value.
+/// * The nsw/nuw flag is/isn't set.
+class InstructionPredicateMatcher : public PredicateMatcher {
+public:
+ InstructionPredicateMatcher(PredicateKind Kind, unsigned InsnVarID)
+ : PredicateMatcher(Kind, InsnVarID) {}
+ virtual ~InstructionPredicateMatcher() {}
+
+ /// Compare the priority of this object and B.
+ ///
+ /// Returns true if this object is more important than B.
+ virtual bool
+ isHigherPriorityThan(const InstructionPredicateMatcher &B) const {
+ return Kind < B.Kind;
+ };
+};
+
+template <>
+std::string
+PredicateListMatcher<PredicateMatcher>::getNoPredicateComment() const {
+ return "No instruction predicates";
+}
+
+/// Generates code to check the opcode of an instruction.
+class InstructionOpcodeMatcher : public InstructionPredicateMatcher {
+protected:
+ // Allow matching one to several, similar opcodes that share properties. This
+ // is to handle patterns where one SelectionDAG operation maps to multiple
+ // GlobalISel ones (e.g. G_BUILD_VECTOR and G_BUILD_VECTOR_TRUNC). The first
+ // is treated as the canonical opcode.
+ SmallVector<const CodeGenInstruction *, 2> Insts;
+
+ static DenseMap<const CodeGenInstruction *, unsigned> OpcodeValues;
+
+
+ MatchTableRecord getInstValue(const CodeGenInstruction *I) const {
+ const auto VI = OpcodeValues.find(I);
+ if (VI != OpcodeValues.end())
+ return MatchTable::NamedValue(I->Namespace, I->TheDef->getName(),
+ VI->second);
+ return MatchTable::NamedValue(I->Namespace, I->TheDef->getName());
+ }
+
+public:
+ static void initOpcodeValuesMap(const CodeGenTarget &Target) {
+ OpcodeValues.clear();
+
+ unsigned OpcodeValue = 0;
+ for (const CodeGenInstruction *I : Target.getInstructionsByEnumValue())
+ OpcodeValues[I] = OpcodeValue++;
+ }
+
+ InstructionOpcodeMatcher(unsigned InsnVarID,
+ ArrayRef<const CodeGenInstruction *> I)
+ : InstructionPredicateMatcher(IPM_Opcode, InsnVarID),
+ Insts(I.begin(), I.end()) {
+ assert((Insts.size() == 1 || Insts.size() == 2) &&
+ "unexpected number of opcode alternatives");
+ }
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == IPM_Opcode;
+ }
+
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return InstructionPredicateMatcher::isIdentical(B) &&
+ Insts == cast<InstructionOpcodeMatcher>(&B)->Insts;
+ }
+
+ bool hasValue() const override {
+ return Insts.size() == 1 && OpcodeValues.count(Insts[0]);
+ }
+
+ // TODO: This is used for the SwitchMatcher optimization. We should be able to
+ // return a list of the opcodes to match.
+ MatchTableRecord getValue() const override {
+ assert(Insts.size() == 1);
+
+ const CodeGenInstruction *I = Insts[0];
+ const auto VI = OpcodeValues.find(I);
+ if (VI != OpcodeValues.end())
+ return MatchTable::NamedValue(I->Namespace, I->TheDef->getName(),
+ VI->second);
+ return MatchTable::NamedValue(I->Namespace, I->TheDef->getName());
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ StringRef CheckType = Insts.size() == 1 ?
+ "GIM_CheckOpcode" : "GIM_CheckOpcodeIsEither";
+ Table << MatchTable::Opcode(CheckType) << MatchTable::Comment("MI")
+ << MatchTable::IntValue(InsnVarID);
+
+ for (const CodeGenInstruction *I : Insts)
+ Table << getInstValue(I);
+ Table << MatchTable::LineBreak;
+ }
+
+ /// Compare the priority of this object and B.
+ ///
+ /// Returns true if this object is more important than B.
+ bool
+ isHigherPriorityThan(const InstructionPredicateMatcher &B) const override {
+ if (InstructionPredicateMatcher::isHigherPriorityThan(B))
+ return true;
+ if (B.InstructionPredicateMatcher::isHigherPriorityThan(*this))
+ return false;
+
+ // Prioritize opcodes for cosmetic reasons in the generated source. Although
+ // this is cosmetic at the moment, we may want to drive a similar ordering
+ // using instruction frequency information to improve compile time.
+ if (const InstructionOpcodeMatcher *BO =
+ dyn_cast<InstructionOpcodeMatcher>(&B))
+ return Insts[0]->TheDef->getName() < BO->Insts[0]->TheDef->getName();
+
+ return false;
+ };
+
+ bool isConstantInstruction() const {
+ return Insts.size() == 1 && Insts[0]->TheDef->getName() == "G_CONSTANT";
+ }
+
+ // The first opcode is the canonical opcode, and later are alternatives.
+ StringRef getOpcode() const {
+ return Insts[0]->TheDef->getName();
+ }
+
+ ArrayRef<const CodeGenInstruction *> getAlternativeOpcodes() {
+ return Insts;
+ }
+
+ bool isVariadicNumOperands() const {
+ // If one is variadic, they all should be.
+ return Insts[0]->Operands.isVariadic;
+ }
+
+ StringRef getOperandType(unsigned OpIdx) const {
+ // Types expected to be uniform for all alternatives.
+ return Insts[0]->Operands[OpIdx].OperandType;
+ }
+};
+
+DenseMap<const CodeGenInstruction *, unsigned>
+ InstructionOpcodeMatcher::OpcodeValues;
+
+class InstructionNumOperandsMatcher final : public InstructionPredicateMatcher {
+ unsigned NumOperands = 0;
+
+public:
+ InstructionNumOperandsMatcher(unsigned InsnVarID, unsigned NumOperands)
+ : InstructionPredicateMatcher(IPM_NumOperands, InsnVarID),
+ NumOperands(NumOperands) {}
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == IPM_NumOperands;
+ }
+
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return InstructionPredicateMatcher::isIdentical(B) &&
+ NumOperands == cast<InstructionNumOperandsMatcher>(&B)->NumOperands;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIM_CheckNumOperands")
+ << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::Comment("Expected")
+ << MatchTable::IntValue(NumOperands) << MatchTable::LineBreak;
+ }
+};
+
+/// Generates code to check that this instruction is a constant whose value
+/// meets an immediate predicate.
+///
+/// Immediates are slightly odd since they are typically used like an operand
+/// but are represented as an operator internally. We typically write simm8:$src
+/// in a tablegen pattern, but this is just syntactic sugar for
+/// (imm:i32)<<P:Predicate_simm8>>:$imm which more directly describes the nodes
+/// that will be matched and the predicate (which is attached to the imm
+/// operator) that will be tested. In SelectionDAG this describes a
+/// ConstantSDNode whose internal value will be tested using the simm8 predicate.
+///
+/// The corresponding GlobalISel representation is %1 = G_CONSTANT iN Value. In
+/// this representation, the immediate could be tested with an
+/// InstructionMatcher, InstructionOpcodeMatcher, OperandMatcher, and a
+/// OperandPredicateMatcher-subclass to check the Value meets the predicate but
+/// there are two implementation issues with producing that matcher
+/// configuration from the SelectionDAG pattern:
+/// * ImmLeaf is a PatFrag whose root is an InstructionMatcher. This means that
+/// were we to sink the immediate predicate to the operand we would have to
+/// have two partial implementations of PatFrag support, one for immediates
+/// and one for non-immediates.
+/// * At the point we handle the predicate, the OperandMatcher hasn't been
+/// created yet. If we were to sink the predicate to the OperandMatcher we
+/// would also have to complicate (or duplicate) the code that descends and
+/// creates matchers for the subtree.
+/// Overall, it's simpler to handle it in the place it was found.
+class InstructionImmPredicateMatcher : public InstructionPredicateMatcher {
+protected:
+ TreePredicateFn Predicate;
+
+public:
+ InstructionImmPredicateMatcher(unsigned InsnVarID,
+ const TreePredicateFn &Predicate)
+ : InstructionPredicateMatcher(IPM_ImmPredicate, InsnVarID),
+ Predicate(Predicate) {}
+
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return InstructionPredicateMatcher::isIdentical(B) &&
+ Predicate.getOrigPatFragRecord() ==
+ cast<InstructionImmPredicateMatcher>(&B)
+ ->Predicate.getOrigPatFragRecord();
+ }
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == IPM_ImmPredicate;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode(getMatchOpcodeForImmPredicate(Predicate))
+ << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::Comment("Predicate")
+ << MatchTable::NamedValue(getEnumNameForPredicate(Predicate))
+ << MatchTable::LineBreak;
+ }
+};
+
+/// Generates code to check that a memory instruction has a atomic ordering
+/// MachineMemoryOperand.
+class AtomicOrderingMMOPredicateMatcher : public InstructionPredicateMatcher {
+public:
+ enum AOComparator {
+ AO_Exactly,
+ AO_OrStronger,
+ AO_WeakerThan,
+ };
+
+protected:
+ StringRef Order;
+ AOComparator Comparator;
+
+public:
+ AtomicOrderingMMOPredicateMatcher(unsigned InsnVarID, StringRef Order,
+ AOComparator Comparator = AO_Exactly)
+ : InstructionPredicateMatcher(IPM_AtomicOrderingMMO, InsnVarID),
+ Order(Order), Comparator(Comparator) {}
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == IPM_AtomicOrderingMMO;
+ }
+
+ bool isIdentical(const PredicateMatcher &B) const override {
+ if (!InstructionPredicateMatcher::isIdentical(B))
+ return false;
+ const auto &R = *cast<AtomicOrderingMMOPredicateMatcher>(&B);
+ return Order == R.Order && Comparator == R.Comparator;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ StringRef Opcode = "GIM_CheckAtomicOrdering";
+
+ if (Comparator == AO_OrStronger)
+ Opcode = "GIM_CheckAtomicOrderingOrStrongerThan";
+ if (Comparator == AO_WeakerThan)
+ Opcode = "GIM_CheckAtomicOrderingWeakerThan";
+
+ Table << MatchTable::Opcode(Opcode) << MatchTable::Comment("MI")
+ << MatchTable::IntValue(InsnVarID) << MatchTable::Comment("Order")
+ << MatchTable::NamedValue(("(int64_t)AtomicOrdering::" + Order).str())
+ << MatchTable::LineBreak;
+ }
+};
+
+/// Generates code to check that the size of an MMO is exactly N bytes.
+class MemorySizePredicateMatcher : public InstructionPredicateMatcher {
+protected:
+ unsigned MMOIdx;
+ uint64_t Size;
+
+public:
+ MemorySizePredicateMatcher(unsigned InsnVarID, unsigned MMOIdx, unsigned Size)
+ : InstructionPredicateMatcher(IPM_MemoryLLTSize, InsnVarID),
+ MMOIdx(MMOIdx), Size(Size) {}
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == IPM_MemoryLLTSize;
+ }
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return InstructionPredicateMatcher::isIdentical(B) &&
+ MMOIdx == cast<MemorySizePredicateMatcher>(&B)->MMOIdx &&
+ Size == cast<MemorySizePredicateMatcher>(&B)->Size;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIM_CheckMemorySizeEqualTo")
+ << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::Comment("MMO") << MatchTable::IntValue(MMOIdx)
+ << MatchTable::Comment("Size") << MatchTable::IntValue(Size)
+ << MatchTable::LineBreak;
+ }
+};
+
+class MemoryAddressSpacePredicateMatcher : public InstructionPredicateMatcher {
+protected:
+ unsigned MMOIdx;
+ SmallVector<unsigned, 4> AddrSpaces;
+
+public:
+ MemoryAddressSpacePredicateMatcher(unsigned InsnVarID, unsigned MMOIdx,
+ ArrayRef<unsigned> AddrSpaces)
+ : InstructionPredicateMatcher(IPM_MemoryAddressSpace, InsnVarID),
+ MMOIdx(MMOIdx), AddrSpaces(AddrSpaces.begin(), AddrSpaces.end()) {}
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == IPM_MemoryAddressSpace;
+ }
+ bool isIdentical(const PredicateMatcher &B) const override {
+ if (!InstructionPredicateMatcher::isIdentical(B))
+ return false;
+ auto *Other = cast<MemoryAddressSpacePredicateMatcher>(&B);
+ return MMOIdx == Other->MMOIdx && AddrSpaces == Other->AddrSpaces;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIM_CheckMemoryAddressSpace")
+ << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::Comment("MMO") << MatchTable::IntValue(MMOIdx)
+ // Encode number of address spaces to expect.
+ << MatchTable::Comment("NumAddrSpace")
+ << MatchTable::IntValue(AddrSpaces.size());
+ for (unsigned AS : AddrSpaces)
+ Table << MatchTable::Comment("AddrSpace") << MatchTable::IntValue(AS);
+
+ Table << MatchTable::LineBreak;
+ }
+};
+
+class MemoryAlignmentPredicateMatcher : public InstructionPredicateMatcher {
+protected:
+ unsigned MMOIdx;
+ int MinAlign;
+
+public:
+ MemoryAlignmentPredicateMatcher(unsigned InsnVarID, unsigned MMOIdx,
+ int MinAlign)
+ : InstructionPredicateMatcher(IPM_MemoryAlignment, InsnVarID),
+ MMOIdx(MMOIdx), MinAlign(MinAlign) {
+ assert(MinAlign > 0);
+ }
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == IPM_MemoryAlignment;
+ }
+
+ bool isIdentical(const PredicateMatcher &B) const override {
+ if (!InstructionPredicateMatcher::isIdentical(B))
+ return false;
+ auto *Other = cast<MemoryAlignmentPredicateMatcher>(&B);
+ return MMOIdx == Other->MMOIdx && MinAlign == Other->MinAlign;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIM_CheckMemoryAlignment")
+ << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::Comment("MMO") << MatchTable::IntValue(MMOIdx)
+ << MatchTable::Comment("MinAlign") << MatchTable::IntValue(MinAlign)
+ << MatchTable::LineBreak;
+ }
+};
+
+/// Generates code to check that the size of an MMO is less-than, equal-to, or
+/// greater than a given LLT.
+class MemoryVsLLTSizePredicateMatcher : public InstructionPredicateMatcher {
+public:
+ enum RelationKind {
+ GreaterThan,
+ EqualTo,
+ LessThan,
+ };
+
+protected:
+ unsigned MMOIdx;
+ RelationKind Relation;
+ unsigned OpIdx;
+
+public:
+ MemoryVsLLTSizePredicateMatcher(unsigned InsnVarID, unsigned MMOIdx,
+ enum RelationKind Relation,
+ unsigned OpIdx)
+ : InstructionPredicateMatcher(IPM_MemoryVsLLTSize, InsnVarID),
+ MMOIdx(MMOIdx), Relation(Relation), OpIdx(OpIdx) {}
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == IPM_MemoryVsLLTSize;
+ }
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return InstructionPredicateMatcher::isIdentical(B) &&
+ MMOIdx == cast<MemoryVsLLTSizePredicateMatcher>(&B)->MMOIdx &&
+ Relation == cast<MemoryVsLLTSizePredicateMatcher>(&B)->Relation &&
+ OpIdx == cast<MemoryVsLLTSizePredicateMatcher>(&B)->OpIdx;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode(Relation == EqualTo
+ ? "GIM_CheckMemorySizeEqualToLLT"
+ : Relation == GreaterThan
+ ? "GIM_CheckMemorySizeGreaterThanLLT"
+ : "GIM_CheckMemorySizeLessThanLLT")
+ << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::Comment("MMO") << MatchTable::IntValue(MMOIdx)
+ << MatchTable::Comment("OpIdx") << MatchTable::IntValue(OpIdx)
+ << MatchTable::LineBreak;
+ }
+};
+
+// Matcher for immAllOnesV/immAllZerosV
+class VectorSplatImmPredicateMatcher : public InstructionPredicateMatcher {
+public:
+ enum SplatKind {
+ AllZeros,
+ AllOnes
+ };
+
+private:
+ SplatKind Kind;
+
+public:
+ VectorSplatImmPredicateMatcher(unsigned InsnVarID, SplatKind K)
+ : InstructionPredicateMatcher(IPM_VectorSplatImm, InsnVarID), Kind(K) {}
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == IPM_VectorSplatImm;
+ }
+
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return InstructionPredicateMatcher::isIdentical(B) &&
+ Kind == static_cast<const VectorSplatImmPredicateMatcher &>(B).Kind;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ if (Kind == AllOnes)
+ Table << MatchTable::Opcode("GIM_CheckIsBuildVectorAllOnes");
+ else
+ Table << MatchTable::Opcode("GIM_CheckIsBuildVectorAllZeros");
+
+ Table << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID);
+ Table << MatchTable::LineBreak;
+ }
+};
+
+/// Generates code to check an arbitrary C++ instruction predicate.
+class GenericInstructionPredicateMatcher : public InstructionPredicateMatcher {
+protected:
+ TreePredicateFn Predicate;
+
+public:
+ GenericInstructionPredicateMatcher(unsigned InsnVarID,
+ TreePredicateFn Predicate)
+ : InstructionPredicateMatcher(IPM_GenericPredicate, InsnVarID),
+ Predicate(Predicate) {}
+
+ static bool classof(const InstructionPredicateMatcher *P) {
+ return P->getKind() == IPM_GenericPredicate;
+ }
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return InstructionPredicateMatcher::isIdentical(B) &&
+ Predicate ==
+ static_cast<const GenericInstructionPredicateMatcher &>(B)
+ .Predicate;
+ }
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIM_CheckCxxInsnPredicate")
+ << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::Comment("FnId")
+ << MatchTable::NamedValue(getEnumNameForPredicate(Predicate))
+ << MatchTable::LineBreak;
+ }
+};
+
+/// Generates code to check for the absence of use of the result.
+// TODO? Generalize this to support checking for one use.
+class NoUsePredicateMatcher : public InstructionPredicateMatcher {
+public:
+ NoUsePredicateMatcher(unsigned InsnVarID)
+ : InstructionPredicateMatcher(IPM_NoUse, InsnVarID) {}
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == IPM_NoUse;
+ }
+
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return InstructionPredicateMatcher::isIdentical(B);
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIM_CheckHasNoUse")
+ << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::LineBreak;
+ }
+};
+
+/// Generates code to check that a set of predicates and operands match for a
+/// particular instruction.
+///
+/// Typical predicates include:
+/// * Has a specific opcode.
+/// * Has an nsw/nuw flag or doesn't.
+class InstructionMatcher final : public PredicateListMatcher<PredicateMatcher> {
+protected:
+ typedef std::vector<std::unique_ptr<OperandMatcher>> OperandVec;
+
+ RuleMatcher &Rule;
+
+ /// The operands to match. All rendered operands must be present even if the
+ /// condition is always true.
+ OperandVec Operands;
+ bool NumOperandsCheck = true;
+
+ std::string SymbolicName;
+ unsigned InsnVarID;
+
+ /// PhysRegInputs - List list has an entry for each explicitly specified
+ /// physreg input to the pattern. The first elt is the Register node, the
+ /// second is the recorded slot number the input pattern match saved it in.
+ SmallVector<std::pair<Record *, unsigned>, 2> PhysRegInputs;
+
+public:
+ InstructionMatcher(RuleMatcher &Rule, StringRef SymbolicName,
+ bool NumOpsCheck = true)
+ : Rule(Rule), NumOperandsCheck(NumOpsCheck), SymbolicName(SymbolicName) {
+ // We create a new instruction matcher.
+ // Get a new ID for that instruction.
+ InsnVarID = Rule.implicitlyDefineInsnVar(*this);
+ }
+
+ /// Construct a new instruction predicate and add it to the matcher.
+ template <class Kind, class... Args>
+ std::optional<Kind *> addPredicate(Args &&...args) {
+ Predicates.emplace_back(
+ std::make_unique<Kind>(getInsnVarID(), std::forward<Args>(args)...));
+ return static_cast<Kind *>(Predicates.back().get());
+ }
+
+ RuleMatcher &getRuleMatcher() const { return Rule; }
+
+ unsigned getInsnVarID() const { return InsnVarID; }
+
+ /// Add an operand to the matcher.
+ OperandMatcher &addOperand(unsigned OpIdx, const std::string &SymbolicName,
+ unsigned AllocatedTemporariesBaseID) {
+ Operands.emplace_back(new OperandMatcher(*this, OpIdx, SymbolicName,
+ AllocatedTemporariesBaseID));
+ if (!SymbolicName.empty())
+ Rule.defineOperand(SymbolicName, *Operands.back());
+
+ return *Operands.back();
+ }
+
+ OperandMatcher &getOperand(unsigned OpIdx) {
+ auto I = llvm::find_if(Operands,
+ [&OpIdx](const std::unique_ptr<OperandMatcher> &X) {
+ return X->getOpIdx() == OpIdx;
+ });
+ if (I != Operands.end())
+ return **I;
+ llvm_unreachable("Failed to lookup operand");
+ }
+
+ OperandMatcher &addPhysRegInput(Record *Reg, unsigned OpIdx,
+ unsigned TempOpIdx) {
+ assert(SymbolicName.empty());
+ OperandMatcher *OM = new OperandMatcher(*this, OpIdx, "", TempOpIdx);
+ Operands.emplace_back(OM);
+ Rule.definePhysRegOperand(Reg, *OM);
+ PhysRegInputs.emplace_back(Reg, OpIdx);
+ return *OM;
+ }
+
+ ArrayRef<std::pair<Record *, unsigned>> getPhysRegInputs() const {
+ return PhysRegInputs;
+ }
+
+ StringRef getSymbolicName() const { return SymbolicName; }
+ unsigned getNumOperands() const { return Operands.size(); }
+ OperandVec::iterator operands_begin() { return Operands.begin(); }
+ OperandVec::iterator operands_end() { return Operands.end(); }
+ iterator_range<OperandVec::iterator> operands() {
+ return make_range(operands_begin(), operands_end());
+ }
+ OperandVec::const_iterator operands_begin() const { return Operands.begin(); }
+ OperandVec::const_iterator operands_end() const { return Operands.end(); }
+ iterator_range<OperandVec::const_iterator> operands() const {
+ return make_range(operands_begin(), operands_end());
+ }
+ bool operands_empty() const { return Operands.empty(); }
+
+ void pop_front() { Operands.erase(Operands.begin()); }
+
+ void optimize();
+
+ /// Emit MatchTable opcodes that test whether the instruction named in
+ /// InsnVarName matches all the predicates and all the operands.
+ void emitPredicateOpcodes(MatchTable &Table, RuleMatcher &Rule) {
+ if (NumOperandsCheck)
+ InstructionNumOperandsMatcher(InsnVarID, getNumOperands())
+ .emitPredicateOpcodes(Table, Rule);
+
+ // First emit all instruction level predicates need to be verified before we
+ // can verify operands.
+ emitFilteredPredicateListOpcodes(
+ [](const PredicateMatcher &P) {
+ return !P.dependsOnOperands();
+ }, Table, Rule);
+
+ // Emit all operand constraints.
+ for (const auto &Operand : Operands)
+ Operand->emitPredicateOpcodes(Table, Rule);
+
+ // All of the tablegen defined predicates should now be matched. Now emit
+ // any custom predicates that rely on all generated checks.
+ emitFilteredPredicateListOpcodes(
+ [](const PredicateMatcher &P) {
+ return P.dependsOnOperands();
+ }, Table, Rule);
+ }
+
+ /// Compare the priority of this object and B.
+ ///
+ /// Returns true if this object is more important than B.
+ bool isHigherPriorityThan(InstructionMatcher &B) {
+ // Instruction matchers involving more operands have higher priority.
+ if (Operands.size() > B.Operands.size())
+ return true;
+ if (Operands.size() < B.Operands.size())
+ return false;
+
+ for (auto &&P : zip(predicates(), B.predicates())) {
+ auto L = static_cast<InstructionPredicateMatcher *>(std::get<0>(P).get());
+ auto R = static_cast<InstructionPredicateMatcher *>(std::get<1>(P).get());
+ if (L->isHigherPriorityThan(*R))
+ return true;
+ if (R->isHigherPriorityThan(*L))
+ return false;
+ }
+
+ for (auto Operand : zip(Operands, B.Operands)) {
+ if (std::get<0>(Operand)->isHigherPriorityThan(*std::get<1>(Operand)))
+ return true;
+ if (std::get<1>(Operand)->isHigherPriorityThan(*std::get<0>(Operand)))
+ return false;
+ }
+
+ return false;
+ };
+
+ /// Report the maximum number of temporary operands needed by the instruction
+ /// matcher.
+ unsigned countRendererFns() {
+ return std::accumulate(
+ predicates().begin(), predicates().end(), 0,
+ [](unsigned A,
+ const std::unique_ptr<PredicateMatcher> &Predicate) {
+ return A + Predicate->countRendererFns();
+ }) +
+ std::accumulate(
+ Operands.begin(), Operands.end(), 0,
+ [](unsigned A, const std::unique_ptr<OperandMatcher> &Operand) {
+ return A + Operand->countRendererFns();
+ });
+ }
+
+ InstructionOpcodeMatcher &getOpcodeMatcher() {
+ for (auto &P : predicates())
+ if (auto *OpMatcher = dyn_cast<InstructionOpcodeMatcher>(P.get()))
+ return *OpMatcher;
+ llvm_unreachable("Didn't find an opcode matcher");
+ }
+
+ bool isConstantInstruction() {
+ return getOpcodeMatcher().isConstantInstruction();
+ }
+
+ StringRef getOpcode() { return getOpcodeMatcher().getOpcode(); }
+};
+
+StringRef RuleMatcher::getOpcode() const {
+ return Matchers.front()->getOpcode();
+}
+
+unsigned RuleMatcher::getNumOperands() const {
+ return Matchers.front()->getNumOperands();
+}
+
+LLTCodeGen RuleMatcher::getFirstConditionAsRootType() {
+ InstructionMatcher &InsnMatcher = *Matchers.front();
+ if (!InsnMatcher.predicates_empty())
+ if (const auto *TM =
+ dyn_cast<LLTOperandMatcher>(&**InsnMatcher.predicates_begin()))
+ if (TM->getInsnVarID() == 0 && TM->getOpIdx() == 0)
+ return TM->getTy();
+ return {};
+}
+
+/// Generates code to check that the operand is a register defined by an
+/// instruction that matches the given instruction matcher.
+///
+/// For example, the pattern:
+/// (set $dst, (G_MUL (G_ADD $src1, $src2), $src3))
+/// would use an InstructionOperandMatcher for operand 1 of the G_MUL to match
+/// the:
+/// (G_ADD $src1, $src2)
+/// subpattern.
+class InstructionOperandMatcher : public OperandPredicateMatcher {
+protected:
+ std::unique_ptr<InstructionMatcher> InsnMatcher;
+
+public:
+ InstructionOperandMatcher(unsigned InsnVarID, unsigned OpIdx,
+ RuleMatcher &Rule, StringRef SymbolicName,
+ bool NumOpsCheck = true)
+ : OperandPredicateMatcher(OPM_Instruction, InsnVarID, OpIdx),
+ InsnMatcher(new InstructionMatcher(Rule, SymbolicName, NumOpsCheck)) {}
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == OPM_Instruction;
+ }
+
+ InstructionMatcher &getInsnMatcher() const { return *InsnMatcher; }
+
+ void emitCaptureOpcodes(MatchTable &Table, RuleMatcher &Rule) const {
+ const unsigned NewInsnVarID = InsnMatcher->getInsnVarID();
+ Table << MatchTable::Opcode("GIM_RecordInsn")
+ << MatchTable::Comment("DefineMI")
+ << MatchTable::IntValue(NewInsnVarID) << MatchTable::Comment("MI")
+ << MatchTable::IntValue(getInsnVarID())
+ << MatchTable::Comment("OpIdx") << MatchTable::IntValue(getOpIdx())
+ << MatchTable::Comment("MIs[" + llvm::to_string(NewInsnVarID) + "]")
+ << MatchTable::LineBreak;
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ emitCaptureOpcodes(Table, Rule);
+ InsnMatcher->emitPredicateOpcodes(Table, Rule);
+ }
+
+ bool isHigherPriorityThan(const OperandPredicateMatcher &B) const override {
+ if (OperandPredicateMatcher::isHigherPriorityThan(B))
+ return true;
+ if (B.OperandPredicateMatcher::isHigherPriorityThan(*this))
+ return false;
+
+ if (const InstructionOperandMatcher *BP =
+ dyn_cast<InstructionOperandMatcher>(&B))
+ if (InsnMatcher->isHigherPriorityThan(*BP->InsnMatcher))
+ return true;
+ return false;
+ }
+
+ /// Report the maximum number of temporary operands needed by the predicate
+ /// matcher.
+ unsigned countRendererFns() const override {
+ return InsnMatcher->countRendererFns();
+ }
+};
+
+void InstructionMatcher::optimize() {
+ SmallVector<std::unique_ptr<PredicateMatcher>, 8> Stash;
+ const auto &OpcMatcher = getOpcodeMatcher();
+
+ Stash.push_back(predicates_pop_front());
+ if (Stash.back().get() == &OpcMatcher) {
+ if (NumOperandsCheck && OpcMatcher.isVariadicNumOperands())
+ Stash.emplace_back(
+ new InstructionNumOperandsMatcher(InsnVarID, getNumOperands()));
+ NumOperandsCheck = false;
+
+ for (auto &OM : Operands)
+ for (auto &OP : OM->predicates())
+ if (isa<IntrinsicIDOperandMatcher>(OP)) {
+ Stash.push_back(std::move(OP));
+ OM->eraseNullPredicates();
+ break;
+ }
+ }
+
+ if (InsnVarID > 0) {
+ assert(!Operands.empty() && "Nested instruction is expected to def a vreg");
+ for (auto &OP : Operands[0]->predicates())
+ OP.reset();
+ Operands[0]->eraseNullPredicates();
+ }
+ for (auto &OM : Operands) {
+ for (auto &OP : OM->predicates())
+ if (isa<LLTOperandMatcher>(OP))
+ Stash.push_back(std::move(OP));
+ OM->eraseNullPredicates();
+ }
+ while (!Stash.empty())
+ prependPredicate(Stash.pop_back_val());
+}
+
+//===- Actions ------------------------------------------------------------===//
+class OperandRenderer {
+public:
+ enum RendererKind {
+ OR_Copy,
+ OR_CopyOrAddZeroReg,
+ OR_CopySubReg,
+ OR_CopyPhysReg,
+ OR_CopyConstantAsImm,
+ OR_CopyFConstantAsFPImm,
+ OR_Imm,
+ OR_SubRegIndex,
+ OR_Register,
+ OR_TempRegister,
+ OR_ComplexPattern,
+ OR_Custom,
+ OR_CustomOperand
+ };
+
+protected:
+ RendererKind Kind;
+
+public:
+ OperandRenderer(RendererKind Kind) : Kind(Kind) {}
+ virtual ~OperandRenderer() {}
+
+ RendererKind getKind() const { return Kind; }
+
+ virtual void emitRenderOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const = 0;
+};
+
+/// A CopyRenderer emits code to copy a single operand from an existing
+/// instruction to the one being built.
+class CopyRenderer : public OperandRenderer {
+protected:
+ unsigned NewInsnID;
+ /// The name of the operand.
+ const StringRef SymbolicName;
+
+public:
+ CopyRenderer(unsigned NewInsnID, StringRef SymbolicName)
+ : OperandRenderer(OR_Copy), NewInsnID(NewInsnID),
+ SymbolicName(SymbolicName) {
+ assert(!SymbolicName.empty() && "Cannot copy from an unspecified source");
+ }
+
+ static bool classof(const OperandRenderer *R) {
+ return R->getKind() == OR_Copy;
+ }
+
+ StringRef getSymbolicName() const { return SymbolicName; }
+
+ void emitRenderOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
+ const OperandMatcher &Operand = Rule.getOperandMatcher(SymbolicName);
+ unsigned OldInsnVarID = Rule.getInsnVarID(Operand.getInstructionMatcher());
+ Table << MatchTable::Opcode("GIR_Copy") << MatchTable::Comment("NewInsnID")
+ << MatchTable::IntValue(NewInsnID) << MatchTable::Comment("OldInsnID")
+ << MatchTable::IntValue(OldInsnVarID) << MatchTable::Comment("OpIdx")
+ << MatchTable::IntValue(Operand.getOpIdx())
+ << MatchTable::Comment(SymbolicName) << MatchTable::LineBreak;
+ }
+};
+
+/// A CopyRenderer emits code to copy a virtual register to a specific physical
+/// register.
+class CopyPhysRegRenderer : public OperandRenderer {
+protected:
+ unsigned NewInsnID;
+ Record *PhysReg;
+
+public:
+ CopyPhysRegRenderer(unsigned NewInsnID, Record *Reg)
+ : OperandRenderer(OR_CopyPhysReg), NewInsnID(NewInsnID),
+ PhysReg(Reg) {
+ assert(PhysReg);
+ }
+
+ static bool classof(const OperandRenderer *R) {
+ return R->getKind() == OR_CopyPhysReg;
+ }
+
+ Record *getPhysReg() const { return PhysReg; }
+
+ void emitRenderOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
+ const OperandMatcher &Operand = Rule.getPhysRegOperandMatcher(PhysReg);
+ unsigned OldInsnVarID = Rule.getInsnVarID(Operand.getInstructionMatcher());
+ Table << MatchTable::Opcode("GIR_Copy") << MatchTable::Comment("NewInsnID")
+ << MatchTable::IntValue(NewInsnID) << MatchTable::Comment("OldInsnID")
+ << MatchTable::IntValue(OldInsnVarID) << MatchTable::Comment("OpIdx")
+ << MatchTable::IntValue(Operand.getOpIdx())
+ << MatchTable::Comment(PhysReg->getName())
+ << MatchTable::LineBreak;
+ }
+};
+
+/// A CopyOrAddZeroRegRenderer emits code to copy a single operand from an
+/// existing instruction to the one being built. If the operand turns out to be
+/// a 'G_CONSTANT 0' then it replaces the operand with a zero register.
+class CopyOrAddZeroRegRenderer : public OperandRenderer {
+protected:
+ unsigned NewInsnID;
+ /// The name of the operand.
+ const StringRef SymbolicName;
+ const Record *ZeroRegisterDef;
+
+public:
+ CopyOrAddZeroRegRenderer(unsigned NewInsnID,
+ StringRef SymbolicName, Record *ZeroRegisterDef)
+ : OperandRenderer(OR_CopyOrAddZeroReg), NewInsnID(NewInsnID),
+ SymbolicName(SymbolicName), ZeroRegisterDef(ZeroRegisterDef) {
+ assert(!SymbolicName.empty() && "Cannot copy from an unspecified source");
+ }
+
+ static bool classof(const OperandRenderer *R) {
+ return R->getKind() == OR_CopyOrAddZeroReg;
+ }
+
+ StringRef getSymbolicName() const { return SymbolicName; }
+
+ void emitRenderOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
+ const OperandMatcher &Operand = Rule.getOperandMatcher(SymbolicName);
+ unsigned OldInsnVarID = Rule.getInsnVarID(Operand.getInstructionMatcher());
+ Table << MatchTable::Opcode("GIR_CopyOrAddZeroReg")
+ << MatchTable::Comment("NewInsnID") << MatchTable::IntValue(NewInsnID)
+ << MatchTable::Comment("OldInsnID")
+ << MatchTable::IntValue(OldInsnVarID) << MatchTable::Comment("OpIdx")
+ << MatchTable::IntValue(Operand.getOpIdx())
+ << MatchTable::NamedValue(
+ (ZeroRegisterDef->getValue("Namespace")
+ ? ZeroRegisterDef->getValueAsString("Namespace")
+ : ""),
+ ZeroRegisterDef->getName())
+ << MatchTable::Comment(SymbolicName) << MatchTable::LineBreak;
+ }
+};
+
+/// A CopyConstantAsImmRenderer emits code to render a G_CONSTANT instruction to
+/// an extended immediate operand.
+class CopyConstantAsImmRenderer : public OperandRenderer {
+protected:
+ unsigned NewInsnID;
+ /// The name of the operand.
+ const std::string SymbolicName;
+ bool Signed;
+
+public:
+ CopyConstantAsImmRenderer(unsigned NewInsnID, StringRef SymbolicName)
+ : OperandRenderer(OR_CopyConstantAsImm), NewInsnID(NewInsnID),
+ SymbolicName(SymbolicName), Signed(true) {}
+
+ static bool classof(const OperandRenderer *R) {
+ return R->getKind() == OR_CopyConstantAsImm;
+ }
+
+ StringRef getSymbolicName() const { return SymbolicName; }
+
+ void emitRenderOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
+ InstructionMatcher &InsnMatcher = Rule.getInstructionMatcher(SymbolicName);
+ unsigned OldInsnVarID = Rule.getInsnVarID(InsnMatcher);
+ Table << MatchTable::Opcode(Signed ? "GIR_CopyConstantAsSImm"
+ : "GIR_CopyConstantAsUImm")
+ << MatchTable::Comment("NewInsnID") << MatchTable::IntValue(NewInsnID)
+ << MatchTable::Comment("OldInsnID")
+ << MatchTable::IntValue(OldInsnVarID)
+ << MatchTable::Comment(SymbolicName) << MatchTable::LineBreak;
+ }
+};
+
+/// A CopyFConstantAsFPImmRenderer emits code to render a G_FCONSTANT
+/// instruction to an extended immediate operand.
+class CopyFConstantAsFPImmRenderer : public OperandRenderer {
+protected:
+ unsigned NewInsnID;
+ /// The name of the operand.
+ const std::string SymbolicName;
+
+public:
+ CopyFConstantAsFPImmRenderer(unsigned NewInsnID, StringRef SymbolicName)
+ : OperandRenderer(OR_CopyFConstantAsFPImm), NewInsnID(NewInsnID),
+ SymbolicName(SymbolicName) {}
+
+ static bool classof(const OperandRenderer *R) {
+ return R->getKind() == OR_CopyFConstantAsFPImm;
+ }
+
+ StringRef getSymbolicName() const { return SymbolicName; }
+
+ void emitRenderOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
+ InstructionMatcher &InsnMatcher = Rule.getInstructionMatcher(SymbolicName);
+ unsigned OldInsnVarID = Rule.getInsnVarID(InsnMatcher);
+ Table << MatchTable::Opcode("GIR_CopyFConstantAsFPImm")
+ << MatchTable::Comment("NewInsnID") << MatchTable::IntValue(NewInsnID)
+ << MatchTable::Comment("OldInsnID")
+ << MatchTable::IntValue(OldInsnVarID)
+ << MatchTable::Comment(SymbolicName) << MatchTable::LineBreak;
+ }
+};
+
+/// A CopySubRegRenderer emits code to copy a single register operand from an
+/// existing instruction to the one being built and indicate that only a
+/// subregister should be copied.
+class CopySubRegRenderer : public OperandRenderer {
+protected:
+ unsigned NewInsnID;
+ /// The name of the operand.
+ const StringRef SymbolicName;
+ /// The subregister to extract.
+ const CodeGenSubRegIndex *SubReg;
+
+public:
+ CopySubRegRenderer(unsigned NewInsnID, StringRef SymbolicName,
+ const CodeGenSubRegIndex *SubReg)
+ : OperandRenderer(OR_CopySubReg), NewInsnID(NewInsnID),
+ SymbolicName(SymbolicName), SubReg(SubReg) {}
+
+ static bool classof(const OperandRenderer *R) {
+ return R->getKind() == OR_CopySubReg;
+ }
+
+ StringRef getSymbolicName() const { return SymbolicName; }
+
+ void emitRenderOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
+ const OperandMatcher &Operand = Rule.getOperandMatcher(SymbolicName);
+ unsigned OldInsnVarID = Rule.getInsnVarID(Operand.getInstructionMatcher());
+ Table << MatchTable::Opcode("GIR_CopySubReg")
+ << MatchTable::Comment("NewInsnID") << MatchTable::IntValue(NewInsnID)
+ << MatchTable::Comment("OldInsnID")
+ << MatchTable::IntValue(OldInsnVarID) << MatchTable::Comment("OpIdx")
+ << MatchTable::IntValue(Operand.getOpIdx())
+ << MatchTable::Comment("SubRegIdx")
+ << MatchTable::IntValue(SubReg->EnumValue)
+ << MatchTable::Comment(SymbolicName) << MatchTable::LineBreak;
+ }
+};
+
+/// Adds a specific physical register to the instruction being built.
+/// This is typically useful for WZR/XZR on AArch64.
+class AddRegisterRenderer : public OperandRenderer {
+protected:
+ unsigned InsnID;
+ const Record *RegisterDef;
+ bool IsDef;
+ const CodeGenTarget &Target;
+
+public:
+ AddRegisterRenderer(unsigned InsnID, const CodeGenTarget &Target,
+ const Record *RegisterDef, bool IsDef = false)
+ : OperandRenderer(OR_Register), InsnID(InsnID), RegisterDef(RegisterDef),
+ IsDef(IsDef), Target(Target) {}
+
+ static bool classof(const OperandRenderer *R) {
+ return R->getKind() == OR_Register;
+ }
+
+ void emitRenderOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIR_AddRegister")
+ << MatchTable::Comment("InsnID") << MatchTable::IntValue(InsnID);
+ if (RegisterDef->getName() != "zero_reg") {
+ Table << MatchTable::NamedValue(
+ (RegisterDef->getValue("Namespace")
+ ? RegisterDef->getValueAsString("Namespace")
+ : ""),
+ RegisterDef->getName());
+ } else {
+ Table << MatchTable::NamedValue(Target.getRegNamespace(), "NoRegister");
+ }
+ Table << MatchTable::Comment("AddRegisterRegFlags");
+
+ // TODO: This is encoded as a 64-bit element, but only 16 or 32-bits are
+ // really needed for a physical register reference. We can pack the
+ // register and flags in a single field.
+ if (IsDef)
+ Table << MatchTable::NamedValue("RegState::Define");
+ else
+ Table << MatchTable::IntValue(0);
+ Table << MatchTable::LineBreak;
+ }
+};
+
+/// Adds a specific temporary virtual register to the instruction being built.
+/// This is used to chain instructions together when emitting multiple
+/// instructions.
+class TempRegRenderer : public OperandRenderer {
+protected:
+ unsigned InsnID;
+ unsigned TempRegID;
+ const CodeGenSubRegIndex *SubRegIdx;
+ bool IsDef;
+ bool IsDead;
+
+public:
+ TempRegRenderer(unsigned InsnID, unsigned TempRegID, bool IsDef = false,
+ const CodeGenSubRegIndex *SubReg = nullptr,
+ bool IsDead = false)
+ : OperandRenderer(OR_Register), InsnID(InsnID), TempRegID(TempRegID),
+ SubRegIdx(SubReg), IsDef(IsDef), IsDead(IsDead) {}
+
+ static bool classof(const OperandRenderer *R) {
+ return R->getKind() == OR_TempRegister;
+ }
+
+ void emitRenderOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
+ if (SubRegIdx) {
+ assert(!IsDef);
+ Table << MatchTable::Opcode("GIR_AddTempSubRegister");
+ } else
+ Table << MatchTable::Opcode("GIR_AddTempRegister");
+
+ Table << MatchTable::Comment("InsnID") << MatchTable::IntValue(InsnID)
+ << MatchTable::Comment("TempRegID") << MatchTable::IntValue(TempRegID)
+ << MatchTable::Comment("TempRegFlags");
+
+ if (IsDef) {
+ SmallString<32> RegFlags;
+ RegFlags += "RegState::Define";
+ if (IsDead)
+ RegFlags += "|RegState::Dead";
+ Table << MatchTable::NamedValue(RegFlags);
+ } else
+ Table << MatchTable::IntValue(0);
+
+ if (SubRegIdx)
+ Table << MatchTable::NamedValue(SubRegIdx->getQualifiedName());
+ Table << MatchTable::LineBreak;
+ }
+};
+
+/// Adds a specific immediate to the instruction being built.
+class ImmRenderer : public OperandRenderer {
+protected:
+ unsigned InsnID;
+ int64_t Imm;
+
+public:
+ ImmRenderer(unsigned InsnID, int64_t Imm)
+ : OperandRenderer(OR_Imm), InsnID(InsnID), Imm(Imm) {}
+
+ static bool classof(const OperandRenderer *R) {
+ return R->getKind() == OR_Imm;
+ }
+
+ void emitRenderOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIR_AddImm") << MatchTable::Comment("InsnID")
+ << MatchTable::IntValue(InsnID) << MatchTable::Comment("Imm")
+ << MatchTable::IntValue(Imm) << MatchTable::LineBreak;
+ }
+};
+
+/// Adds an enum value for a subreg index to the instruction being built.
+class SubRegIndexRenderer : public OperandRenderer {
+protected:
+ unsigned InsnID;
+ const CodeGenSubRegIndex *SubRegIdx;
+
+public:
+ SubRegIndexRenderer(unsigned InsnID, const CodeGenSubRegIndex *SRI)
+ : OperandRenderer(OR_SubRegIndex), InsnID(InsnID), SubRegIdx(SRI) {}
+
+ static bool classof(const OperandRenderer *R) {
+ return R->getKind() == OR_SubRegIndex;
+ }
+
+ void emitRenderOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIR_AddImm") << MatchTable::Comment("InsnID")
+ << MatchTable::IntValue(InsnID) << MatchTable::Comment("SubRegIndex")
+ << MatchTable::IntValue(SubRegIdx->EnumValue)
+ << MatchTable::LineBreak;
+ }
+};
+
+/// Adds operands by calling a renderer function supplied by the ComplexPattern
+/// matcher function.
+class RenderComplexPatternOperand : public OperandRenderer {
+private:
+ unsigned InsnID;
+ const Record &TheDef;
+ /// The name of the operand.
+ const StringRef SymbolicName;
+ /// The renderer number. This must be unique within a rule since it's used to
+ /// identify a temporary variable to hold the renderer function.
+ unsigned RendererID;
+ /// When provided, this is the suboperand of the ComplexPattern operand to
+ /// render. Otherwise all the suboperands will be rendered.
+ std::optional<unsigned> SubOperand;
+
+ unsigned getNumOperands() const {
+ return TheDef.getValueAsDag("Operands")->getNumArgs();
+ }
+
+public:
+ RenderComplexPatternOperand(unsigned InsnID, const Record &TheDef,
+ StringRef SymbolicName, unsigned RendererID,
+ std::optional<unsigned> SubOperand = std::nullopt)
+ : OperandRenderer(OR_ComplexPattern), InsnID(InsnID), TheDef(TheDef),
+ SymbolicName(SymbolicName), RendererID(RendererID),
+ SubOperand(SubOperand) {}
+
+ static bool classof(const OperandRenderer *R) {
+ return R->getKind() == OR_ComplexPattern;
+ }
+
+ void emitRenderOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode(SubOperand ? "GIR_ComplexSubOperandRenderer"
+ : "GIR_ComplexRenderer")
+ << MatchTable::Comment("InsnID") << MatchTable::IntValue(InsnID)
+ << MatchTable::Comment("RendererID")
+ << MatchTable::IntValue(RendererID);
+ if (SubOperand)
+ Table << MatchTable::Comment("SubOperand")
+ << MatchTable::IntValue(*SubOperand);
+ Table << MatchTable::Comment(SymbolicName) << MatchTable::LineBreak;
+ }
+};
+
+class CustomRenderer : public OperandRenderer {
+protected:
+ unsigned InsnID;
+ const Record &Renderer;
+ /// The name of the operand.
+ const std::string SymbolicName;
+
+public:
+ CustomRenderer(unsigned InsnID, const Record &Renderer,
+ StringRef SymbolicName)
+ : OperandRenderer(OR_Custom), InsnID(InsnID), Renderer(Renderer),
+ SymbolicName(SymbolicName) {}
+
+ static bool classof(const OperandRenderer *R) {
+ return R->getKind() == OR_Custom;
+ }
+
+ void emitRenderOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
+ InstructionMatcher &InsnMatcher = Rule.getInstructionMatcher(SymbolicName);
+ unsigned OldInsnVarID = Rule.getInsnVarID(InsnMatcher);
+ Table << MatchTable::Opcode("GIR_CustomRenderer")
+ << MatchTable::Comment("InsnID") << MatchTable::IntValue(InsnID)
+ << MatchTable::Comment("OldInsnID")
+ << MatchTable::IntValue(OldInsnVarID)
+ << MatchTable::Comment("Renderer")
+ << MatchTable::NamedValue(
+ "GICR_" + Renderer.getValueAsString("RendererFn").str())
+ << MatchTable::Comment(SymbolicName) << MatchTable::LineBreak;
+ }
+};
+
+class CustomOperandRenderer : public OperandRenderer {
+protected:
+ unsigned InsnID;
+ const Record &Renderer;
+ /// The name of the operand.
+ const std::string SymbolicName;
+
+public:
+ CustomOperandRenderer(unsigned InsnID, const Record &Renderer,
+ StringRef SymbolicName)
+ : OperandRenderer(OR_CustomOperand), InsnID(InsnID), Renderer(Renderer),
+ SymbolicName(SymbolicName) {}
+
+ static bool classof(const OperandRenderer *R) {
+ return R->getKind() == OR_CustomOperand;
+ }
+
+ void emitRenderOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
+ const OperandMatcher &OpdMatcher = Rule.getOperandMatcher(SymbolicName);
+ Table << MatchTable::Opcode("GIR_CustomOperandRenderer")
+ << MatchTable::Comment("InsnID") << MatchTable::IntValue(InsnID)
+ << MatchTable::Comment("OldInsnID")
+ << MatchTable::IntValue(OpdMatcher.getInsnVarID())
+ << MatchTable::Comment("OpIdx")
+ << MatchTable::IntValue(OpdMatcher.getOpIdx())
+ << MatchTable::Comment("OperandRenderer")
+ << MatchTable::NamedValue(
+ "GICR_" + Renderer.getValueAsString("RendererFn").str())
+ << MatchTable::Comment(SymbolicName) << MatchTable::LineBreak;
+ }
+};
+
+/// An action taken when all Matcher predicates succeeded for a parent rule.
+///
+/// Typical actions include:
+/// * Changing the opcode of an instruction.
+/// * Adding an operand to an instruction.
+class MatchAction {
+public:
+ virtual ~MatchAction() {}
+
+ /// Emit the MatchTable opcodes to implement the action.
+ virtual void emitActionOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const = 0;
+};
+
+/// Generates a comment describing the matched rule being acted upon.
+class DebugCommentAction : public MatchAction {
+private:
+ std::string S;
+
+public:
+ DebugCommentAction(StringRef S) : S(std::string(S)) {}
+
+ void emitActionOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
+ Table << MatchTable::Comment(S) << MatchTable::LineBreak;
+ }
+};
+
+/// Generates code to build an instruction or mutate an existing instruction
+/// into the desired instruction when this is possible.
+class BuildMIAction : public MatchAction {
+private:
+ unsigned InsnID;
+ const CodeGenInstruction *I;
+ InstructionMatcher *Matched;
+ std::vector<std::unique_ptr<OperandRenderer>> OperandRenderers;
+
+ /// True if the instruction can be built solely by mutating the opcode.
+ bool canMutate(RuleMatcher &Rule, const InstructionMatcher *Insn) const {
+ if (!Insn)
+ return false;
+
+ if (OperandRenderers.size() != Insn->getNumOperands())
+ return false;
+
+ for (const auto &Renderer : enumerate(OperandRenderers)) {
+ if (const auto *Copy = dyn_cast<CopyRenderer>(&*Renderer.value())) {
+ const OperandMatcher &OM = Rule.getOperandMatcher(Copy->getSymbolicName());
+ if (Insn != &OM.getInstructionMatcher() ||
+ OM.getOpIdx() != Renderer.index())
+ return false;
+ } else
+ return false;
+ }
+
+ return true;
+ }
+
+public:
+ BuildMIAction(unsigned InsnID, const CodeGenInstruction *I)
+ : InsnID(InsnID), I(I), Matched(nullptr) {}
+
+ unsigned getInsnID() const { return InsnID; }
+ const CodeGenInstruction *getCGI() const { return I; }
+
+ void chooseInsnToMutate(RuleMatcher &Rule) {
+ for (auto *MutateCandidate : Rule.mutatable_insns()) {
+ if (canMutate(Rule, MutateCandidate)) {
+ // Take the first one we're offered that we're able to mutate.
+ Rule.reserveInsnMatcherForMutation(MutateCandidate);
+ Matched = MutateCandidate;
+ return;
+ }
+ }
+ }
+
+ template <class Kind, class... Args>
+ Kind &addRenderer(Args&&... args) {
+ OperandRenderers.emplace_back(
+ std::make_unique<Kind>(InsnID, std::forward<Args>(args)...));
+ return *static_cast<Kind *>(OperandRenderers.back().get());
+ }
+
+ void emitActionOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
+ if (Matched) {
+ assert(canMutate(Rule, Matched) &&
+ "Arranged to mutate an insn that isn't mutatable");
+
+ unsigned RecycleInsnID = Rule.getInsnVarID(*Matched);
+ Table << MatchTable::Opcode("GIR_MutateOpcode")
+ << MatchTable::Comment("InsnID") << MatchTable::IntValue(InsnID)
+ << MatchTable::Comment("RecycleInsnID")
+ << MatchTable::IntValue(RecycleInsnID)
+ << MatchTable::Comment("Opcode")
+ << MatchTable::NamedValue(I->Namespace, I->TheDef->getName())
+ << MatchTable::LineBreak;
+
+ if (!I->ImplicitDefs.empty() || !I->ImplicitUses.empty()) {
+ for (auto *Def : I->ImplicitDefs) {
+ auto Namespace = Def->getValue("Namespace")
+ ? Def->getValueAsString("Namespace")
+ : "";
+ Table << MatchTable::Opcode("GIR_AddImplicitDef")
+ << MatchTable::Comment("InsnID") << MatchTable::IntValue(InsnID)
+ << MatchTable::NamedValue(Namespace, Def->getName())
+ << MatchTable::LineBreak;
+ }
+ for (auto *Use : I->ImplicitUses) {
+ auto Namespace = Use->getValue("Namespace")
+ ? Use->getValueAsString("Namespace")
+ : "";
+ Table << MatchTable::Opcode("GIR_AddImplicitUse")
+ << MatchTable::Comment("InsnID") << MatchTable::IntValue(InsnID)
+ << MatchTable::NamedValue(Namespace, Use->getName())
+ << MatchTable::LineBreak;
+ }
+ }
+ return;
+ }
+
+ // TODO: Simple permutation looks like it could be almost as common as
+ // mutation due to commutative operations.
+
+ Table << MatchTable::Opcode("GIR_BuildMI") << MatchTable::Comment("InsnID")
+ << MatchTable::IntValue(InsnID) << MatchTable::Comment("Opcode")
+ << MatchTable::NamedValue(I->Namespace, I->TheDef->getName())
+ << MatchTable::LineBreak;
+ for (const auto &Renderer : OperandRenderers)
+ Renderer->emitRenderOpcodes(Table, Rule);
+
+ if (I->mayLoad || I->mayStore) {
+ Table << MatchTable::Opcode("GIR_MergeMemOperands")
+ << MatchTable::Comment("InsnID") << MatchTable::IntValue(InsnID)
+ << MatchTable::Comment("MergeInsnID's");
+ // Emit the ID's for all the instructions that are matched by this rule.
+ // TODO: Limit this to matched instructions that mayLoad/mayStore or have
+ // some other means of having a memoperand. Also limit this to
+ // emitted instructions that expect to have a memoperand too. For
+ // example, (G_SEXT (G_LOAD x)) that results in separate load and
+ // sign-extend instructions shouldn't put the memoperand on the
+ // sign-extend since it has no effect there.
+ std::vector<unsigned> MergeInsnIDs;
+ for (const auto &IDMatcherPair : Rule.defined_insn_vars())
+ MergeInsnIDs.push_back(IDMatcherPair.second);
+ llvm::sort(MergeInsnIDs);
+ for (const auto &MergeInsnID : MergeInsnIDs)
+ Table << MatchTable::IntValue(MergeInsnID);
+ Table << MatchTable::NamedValue("GIU_MergeMemOperands_EndOfList")
+ << MatchTable::LineBreak;
+ }
+
+ // FIXME: This is a hack but it's sufficient for ISel. We'll need to do
+ // better for combines. Particularly when there are multiple match
+ // roots.
+ if (InsnID == 0)
+ Table << MatchTable::Opcode("GIR_EraseFromParent")
+ << MatchTable::Comment("InsnID") << MatchTable::IntValue(InsnID)
+ << MatchTable::LineBreak;
+ }
+};
+
+/// Generates code to constrain the operands of an output instruction to the
+/// register classes specified by the definition of that instruction.
+class ConstrainOperandsToDefinitionAction : public MatchAction {
+ unsigned InsnID;
+
+public:
+ ConstrainOperandsToDefinitionAction(unsigned InsnID) : InsnID(InsnID) {}
+
+ void emitActionOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIR_ConstrainSelectedInstOperands")
+ << MatchTable::Comment("InsnID") << MatchTable::IntValue(InsnID)
+ << MatchTable::LineBreak;
+ }
+};
+
+/// Generates code to constrain the specified operand of an output instruction
+/// to the specified register class.
+class ConstrainOperandToRegClassAction : public MatchAction {
+ unsigned InsnID;
+ unsigned OpIdx;
+ const CodeGenRegisterClass &RC;
+
+public:
+ ConstrainOperandToRegClassAction(unsigned InsnID, unsigned OpIdx,
+ const CodeGenRegisterClass &RC)
+ : InsnID(InsnID), OpIdx(OpIdx), RC(RC) {}
+
+ void emitActionOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIR_ConstrainOperandRC")
+ << MatchTable::Comment("InsnID") << MatchTable::IntValue(InsnID)
+ << MatchTable::Comment("Op") << MatchTable::IntValue(OpIdx)
+ << MatchTable::NamedValue(RC.getQualifiedName() + "RegClassID")
+ << MatchTable::LineBreak;
+ }
+};
+
+/// Generates code to create a temporary register which can be used to chain
+/// instructions together.
+class MakeTempRegisterAction : public MatchAction {
+private:
+ LLTCodeGen Ty;
+ unsigned TempRegID;
+
+public:
+ MakeTempRegisterAction(const LLTCodeGen &Ty, unsigned TempRegID)
+ : Ty(Ty), TempRegID(TempRegID) {
+ KnownTypes.insert(Ty);
+ }
+
+ void emitActionOpcodes(MatchTable &Table, RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIR_MakeTempReg")
+ << MatchTable::Comment("TempRegID") << MatchTable::IntValue(TempRegID)
+ << MatchTable::Comment("TypeID")
+ << MatchTable::NamedValue(Ty.getCxxEnumValue())
+ << MatchTable::LineBreak;
+ }
+};
+
+InstructionMatcher &RuleMatcher::addInstructionMatcher(StringRef SymbolicName) {
+ Matchers.emplace_back(new InstructionMatcher(*this, SymbolicName));
+ MutatableInsns.insert(Matchers.back().get());
+ return *Matchers.back();
+}
+
+void RuleMatcher::addRequiredFeature(Record *Feature) {
+ RequiredFeatures.push_back(Feature);
+}
+
+const std::vector<Record *> &RuleMatcher::getRequiredFeatures() const {
+ return RequiredFeatures;
+}
+
+// Emplaces an action of the specified Kind at the end of the action list.
+//
+// Returns a reference to the newly created action.
+//
+// Like std::vector::emplace_back(), may invalidate all iterators if the new
+// size exceeds the capacity. Otherwise, only invalidates the past-the-end
+// iterator.
+template <class Kind, class... Args>
+Kind &RuleMatcher::addAction(Args &&... args) {
+ Actions.emplace_back(std::make_unique<Kind>(std::forward<Args>(args)...));
+ return *static_cast<Kind *>(Actions.back().get());
+}
+
+// Emplaces an action of the specified Kind before the given insertion point.
+//
+// Returns an iterator pointing at the newly created instruction.
+//
+// Like std::vector::insert(), may invalidate all iterators if the new size
+// exceeds the capacity. Otherwise, only invalidates the iterators from the
+// insertion point onwards.
+template <class Kind, class... Args>
+action_iterator RuleMatcher::insertAction(action_iterator InsertPt,
+ Args &&... args) {
+ return Actions.emplace(InsertPt,
+ std::make_unique<Kind>(std::forward<Args>(args)...));
+}
+
+unsigned RuleMatcher::implicitlyDefineInsnVar(InstructionMatcher &Matcher) {
+ unsigned NewInsnVarID = NextInsnVarID++;
+ InsnVariableIDs[&Matcher] = NewInsnVarID;
+ return NewInsnVarID;
+}
+
+unsigned RuleMatcher::getInsnVarID(InstructionMatcher &InsnMatcher) const {
+ const auto &I = InsnVariableIDs.find(&InsnMatcher);
+ if (I != InsnVariableIDs.end())
+ return I->second;
+ llvm_unreachable("Matched Insn was not captured in a local variable");
+}
+
+void RuleMatcher::defineOperand(StringRef SymbolicName, OperandMatcher &OM) {
+ if (DefinedOperands.find(SymbolicName) == DefinedOperands.end()) {
+ DefinedOperands[SymbolicName] = &OM;
+ return;
+ }
+
+ // If the operand is already defined, then we must ensure both references in
+ // the matcher have the exact same node.
+ OM.addPredicate<SameOperandMatcher>(
+ OM.getSymbolicName(), getOperandMatcher(OM.getSymbolicName()).getOpIdx());
+}
+
+void RuleMatcher::definePhysRegOperand(Record *Reg, OperandMatcher &OM) {
+ if (PhysRegOperands.find(Reg) == PhysRegOperands.end()) {
+ PhysRegOperands[Reg] = &OM;
+ return;
+ }
+}
+
+InstructionMatcher &
+RuleMatcher::getInstructionMatcher(StringRef SymbolicName) const {
+ for (const auto &I : InsnVariableIDs)
+ if (I.first->getSymbolicName() == SymbolicName)
+ return *I.first;
+ llvm_unreachable(
+ ("Failed to lookup instruction " + SymbolicName).str().c_str());
+}
+
+const OperandMatcher &
+RuleMatcher::getPhysRegOperandMatcher(Record *Reg) const {
+ const auto &I = PhysRegOperands.find(Reg);
+
+ if (I == PhysRegOperands.end()) {
+ PrintFatalError(SrcLoc, "Register " + Reg->getName() +
+ " was not declared in matcher");
+ }
+
+ return *I->second;
+}
+
+const OperandMatcher &
+RuleMatcher::getOperandMatcher(StringRef Name) const {
+ const auto &I = DefinedOperands.find(Name);
+
+ if (I == DefinedOperands.end())
+ PrintFatalError(SrcLoc, "Operand " + Name + " was not declared in matcher");
+
+ return *I->second;
+}
+
+void RuleMatcher::emit(MatchTable &Table) {
+ if (Matchers.empty())
+ llvm_unreachable("Unexpected empty matcher!");
+
+ // The representation supports rules that require multiple roots such as:
+ // %ptr(p0) = ...
+ // %elt0(s32) = G_LOAD %ptr
+ // %1(p0) = G_ADD %ptr, 4
+ // %elt1(s32) = G_LOAD p0 %1
+ // which could be usefully folded into:
+ // %ptr(p0) = ...
+ // %elt0(s32), %elt1(s32) = TGT_LOAD_PAIR %ptr
+ // on some targets but we don't need to make use of that yet.
+ assert(Matchers.size() == 1 && "Cannot handle multi-root matchers yet");
+
+ unsigned LabelID = Table.allocateLabelID();
+ Table << MatchTable::Opcode("GIM_Try", +1)
+ << MatchTable::Comment("On fail goto")
+ << MatchTable::JumpTarget(LabelID)
+ << MatchTable::Comment(("Rule ID " + Twine(RuleID) + " //").str())
+ << MatchTable::LineBreak;
+
+ if (!RequiredFeatures.empty()) {
+ Table << MatchTable::Opcode("GIM_CheckFeatures")
+ << MatchTable::NamedValue(getNameForFeatureBitset(RequiredFeatures))
+ << MatchTable::LineBreak;
+ }
+
+ Matchers.front()->emitPredicateOpcodes(Table, *this);
+
+ // We must also check if it's safe to fold the matched instructions.
+ if (InsnVariableIDs.size() >= 2) {
+ // Invert the map to create stable ordering (by var names)
+ SmallVector<unsigned, 2> InsnIDs;
+ for (const auto &Pair : InsnVariableIDs) {
+ // Skip the root node since it isn't moving anywhere. Everything else is
+ // sinking to meet it.
+ if (Pair.first == Matchers.front().get())
+ continue;
+
+ InsnIDs.push_back(Pair.second);
+ }
+ llvm::sort(InsnIDs);
+
+ for (const auto &InsnID : InsnIDs) {
+ // Reject the difficult cases until we have a more accurate check.
+ Table << MatchTable::Opcode("GIM_CheckIsSafeToFold")
+ << MatchTable::Comment("InsnID") << MatchTable::IntValue(InsnID)
+ << MatchTable::LineBreak;
+
+ // FIXME: Emit checks to determine it's _actually_ safe to fold and/or
+ // account for unsafe cases.
+ //
+ // Example:
+ // MI1--> %0 = ...
+ // %1 = ... %0
+ // MI0--> %2 = ... %0
+ // It's not safe to erase MI1. We currently handle this by not
+ // erasing %0 (even when it's dead).
+ //
+ // Example:
+ // MI1--> %0 = load volatile @a
+ // %1 = load volatile @a
+ // MI0--> %2 = ... %0
+ // It's not safe to sink %0's def past %1. We currently handle
+ // this by rejecting all loads.
+ //
+ // Example:
+ // MI1--> %0 = load @a
+ // %1 = store @a
+ // MI0--> %2 = ... %0
+ // It's not safe to sink %0's def past %1. We currently handle
+ // this by rejecting all loads.
+ //
+ // Example:
+ // G_CONDBR %cond, @BB1
+ // BB0:
+ // MI1--> %0 = load @a
+ // G_BR @BB1
+ // BB1:
+ // MI0--> %2 = ... %0
+ // It's not always safe to sink %0 across control flow. In this
+ // case it may introduce a memory fault. We currentl handle this
+ // by rejecting all loads.
+ }
+ }
+
+ for (const auto &PM : EpilogueMatchers)
+ PM->emitPredicateOpcodes(Table, *this);
+
+ for (const auto &MA : Actions)
+ MA->emitActionOpcodes(Table, *this);
+
+ if (Table.isWithCoverage())
+ Table << MatchTable::Opcode("GIR_Coverage") << MatchTable::IntValue(RuleID)
+ << MatchTable::LineBreak;
+ else
+ Table << MatchTable::Comment(("GIR_Coverage, " + Twine(RuleID) + ",").str())
+ << MatchTable::LineBreak;
+
+ Table << MatchTable::Opcode("GIR_Done", -1) << MatchTable::LineBreak
+ << MatchTable::Label(LabelID);
+ ++NumPatternEmitted;
+}
+
+bool RuleMatcher::isHigherPriorityThan(const RuleMatcher &B) const {
+ // Rules involving more match roots have higher priority.
+ if (Matchers.size() > B.Matchers.size())
+ return true;
+ if (Matchers.size() < B.Matchers.size())
+ return false;
+
+ for (auto Matcher : zip(Matchers, B.Matchers)) {
+ if (std::get<0>(Matcher)->isHigherPriorityThan(*std::get<1>(Matcher)))
+ return true;
+ if (std::get<1>(Matcher)->isHigherPriorityThan(*std::get<0>(Matcher)))
+ return false;
+ }
+
+ return false;
+}
+
+unsigned RuleMatcher::countRendererFns() const {
+ return std::accumulate(
+ Matchers.begin(), Matchers.end(), 0,
+ [](unsigned A, const std::unique_ptr<InstructionMatcher> &Matcher) {
+ return A + Matcher->countRendererFns();
+ });
+}
+
+bool OperandPredicateMatcher::isHigherPriorityThan(
+ const OperandPredicateMatcher &B) const {
+ // Generally speaking, an instruction is more important than an Int or a
+ // LiteralInt because it can cover more nodes but theres an exception to
+ // this. G_CONSTANT's are less important than either of those two because they
+ // are more permissive.
+
+ const InstructionOperandMatcher *AOM =
+ dyn_cast<InstructionOperandMatcher>(this);
+ const InstructionOperandMatcher *BOM =
+ dyn_cast<InstructionOperandMatcher>(&B);
+ bool AIsConstantInsn = AOM && AOM->getInsnMatcher().isConstantInstruction();
+ bool BIsConstantInsn = BOM && BOM->getInsnMatcher().isConstantInstruction();
+
+ if (AOM && BOM) {
+ // The relative priorities between a G_CONSTANT and any other instruction
+ // don't actually matter but this code is needed to ensure a strict weak
+ // ordering. This is particularly important on Windows where the rules will
+ // be incorrectly sorted without it.
+ if (AIsConstantInsn != BIsConstantInsn)
+ return AIsConstantInsn < BIsConstantInsn;
+ return false;
+ }
+
+ if (AOM && AIsConstantInsn && (B.Kind == OPM_Int || B.Kind == OPM_LiteralInt))
+ return false;
+ if (BOM && BIsConstantInsn && (Kind == OPM_Int || Kind == OPM_LiteralInt))
+ return true;
+
+ return Kind < B.Kind;
+}
+
+void SameOperandMatcher::emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const {
+ const OperandMatcher &OtherOM = Rule.getOperandMatcher(MatchingName);
+ unsigned OtherInsnVarID = Rule.getInsnVarID(OtherOM.getInstructionMatcher());
+ assert(OtherInsnVarID == OtherOM.getInstructionMatcher().getInsnVarID());
+
+ Table << MatchTable::Opcode("GIM_CheckIsSameOperand")
+ << MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::Comment("OpIdx") << MatchTable::IntValue(OpIdx)
+ << MatchTable::Comment("OtherMI")
+ << MatchTable::IntValue(OtherInsnVarID)
+ << MatchTable::Comment("OtherOpIdx")
+ << MatchTable::IntValue(OtherOM.getOpIdx())
+ << MatchTable::LineBreak;
+}
+
+//===- GlobalISelEmitter class --------------------------------------------===//
+
+static Expected<LLTCodeGen> getInstResultType(const TreePatternNode *Dst) {
+ ArrayRef<TypeSetByHwMode> ChildTypes = Dst->getExtTypes();
+ if (ChildTypes.size() != 1)
+ return failedImport("Dst pattern child has multiple results");
+
+ std::optional<LLTCodeGen> MaybeOpTy;
+ if (ChildTypes.front().isMachineValueType()) {
+ MaybeOpTy =
+ MVTToLLT(ChildTypes.front().getMachineValueType().SimpleTy);
+ }
+
+ if (!MaybeOpTy)
+ return failedImport("Dst operand has an unsupported type");
+ return *MaybeOpTy;
+}
+
+class GlobalISelEmitter {
+public:
+ explicit GlobalISelEmitter(RecordKeeper &RK);
+ void run(raw_ostream &OS);
+
+private:
+ const RecordKeeper &RK;
+ const CodeGenDAGPatterns CGP;
+ const CodeGenTarget &Target;
+ CodeGenRegBank &CGRegs;
+
+ /// Keep track of the equivalence between SDNodes and Instruction by mapping
+ /// SDNodes to the GINodeEquiv mapping. We need to map to the GINodeEquiv to
+ /// check for attributes on the relation such as CheckMMOIsNonAtomic.
+ /// This is defined using 'GINodeEquiv' in the target description.
+ DenseMap<Record *, Record *> NodeEquivs;
+
+ /// Keep track of the equivalence between ComplexPattern's and
+ /// GIComplexOperandMatcher. Map entries are specified by subclassing
+ /// GIComplexPatternEquiv.
+ DenseMap<const Record *, const Record *> ComplexPatternEquivs;
+
+ /// Keep track of the equivalence between SDNodeXForm's and
+ /// GICustomOperandRenderer. Map entries are specified by subclassing
+ /// GISDNodeXFormEquiv.
+ DenseMap<const Record *, const Record *> SDNodeXFormEquivs;
+
+ /// Keep track of Scores of PatternsToMatch similar to how the DAG does.
+ /// This adds compatibility for RuleMatchers to use this for ordering rules.
+ DenseMap<uint64_t, int> RuleMatcherScores;
+
+ // Map of predicates to their subtarget features.
+ SubtargetFeatureInfoMap SubtargetFeatures;
+
+ // Rule coverage information.
+ std::optional<CodeGenCoverage> RuleCoverage;
+
+ /// Variables used to help with collecting of named operands for predicates
+ /// with 'let PredicateCodeUsesOperands = 1'. WaitingForNamedOperands is set
+ /// to the number of named operands that predicate expects. Store locations in
+ /// StoreIdxForName correspond to the order in which operand names appear in
+ /// predicate's argument list.
+ /// When we visit named leaf operand and WaitingForNamedOperands is not zero,
+ /// add matcher that will record operand and decrease counter.
+ unsigned WaitingForNamedOperands = 0;
+ StringMap<unsigned> StoreIdxForName;
+
+ void gatherOpcodeValues();
+ void gatherTypeIDValues();
+ void gatherNodeEquivs();
+
+ Record *findNodeEquiv(Record *N) const;
+ const CodeGenInstruction *getEquivNode(Record &Equiv,
+ const TreePatternNode *N) const;
+
+ Error importRulePredicates(RuleMatcher &M, ArrayRef<Record *> Predicates);
+ Expected<InstructionMatcher &>
+ createAndImportSelDAGMatcher(RuleMatcher &Rule,
+ InstructionMatcher &InsnMatcher,
+ const TreePatternNode *Src, unsigned &TempOpIdx);
+ Error importComplexPatternOperandMatcher(OperandMatcher &OM, Record *R,
+ unsigned &TempOpIdx) const;
+ Error importChildMatcher(RuleMatcher &Rule, InstructionMatcher &InsnMatcher,
+ const TreePatternNode *SrcChild,
+ bool OperandIsAPointer, bool OperandIsImmArg,
+ unsigned OpIdx, unsigned &TempOpIdx);
+
+ Expected<BuildMIAction &> createAndImportInstructionRenderer(
+ RuleMatcher &M, InstructionMatcher &InsnMatcher,
+ const TreePatternNode *Src, const TreePatternNode *Dst);
+ Expected<action_iterator> createAndImportSubInstructionRenderer(
+ action_iterator InsertPt, RuleMatcher &M, const TreePatternNode *Dst,
+ unsigned TempReg);
+ Expected<action_iterator>
+ createInstructionRenderer(action_iterator InsertPt, RuleMatcher &M,
+ const TreePatternNode *Dst);
+
+ Expected<action_iterator>
+ importExplicitDefRenderers(action_iterator InsertPt, RuleMatcher &M,
+ BuildMIAction &DstMIBuilder,
+ const TreePatternNode *Dst);
+
+ Expected<action_iterator>
+ importExplicitUseRenderers(action_iterator InsertPt, RuleMatcher &M,
+ BuildMIAction &DstMIBuilder,
+ const llvm::TreePatternNode *Dst);
+ Expected<action_iterator>
+ importExplicitUseRenderer(action_iterator InsertPt, RuleMatcher &Rule,
+ BuildMIAction &DstMIBuilder,
+ TreePatternNode *DstChild);
+ Error importDefaultOperandRenderers(action_iterator InsertPt, RuleMatcher &M,
+ BuildMIAction &DstMIBuilder,
+ DagInit *DefaultOps) const;
+ Error
+ importImplicitDefRenderers(BuildMIAction &DstMIBuilder,
+ const std::vector<Record *> &ImplicitDefs) const;
+
+ void emitCxxPredicateFns(raw_ostream &OS, StringRef CodeFieldName,
+ StringRef TypeIdentifier, StringRef ArgType,
+ StringRef ArgName, StringRef AdditionalArgs,
+ StringRef AdditionalDeclarations,
+ std::function<bool(const Record *R)> Filter);
+ void emitImmPredicateFns(raw_ostream &OS, StringRef TypeIdentifier,
+ StringRef ArgType,
+ std::function<bool(const Record *R)> Filter);
+ void emitMIPredicateFns(raw_ostream &OS);
+
+ /// Analyze pattern \p P, returning a matcher for it if possible.
+ /// Otherwise, return an Error explaining why we don't support it.
+ Expected<RuleMatcher> runOnPattern(const PatternToMatch &P);
+
+ void declareSubtargetFeature(Record *Predicate);
+
+ MatchTable buildMatchTable(MutableArrayRef<RuleMatcher> Rules, bool Optimize,
+ bool WithCoverage);
+
+ /// Infer a CodeGenRegisterClass for the type of \p SuperRegNode. The returned
+ /// CodeGenRegisterClass will support the CodeGenRegisterClass of
+ /// \p SubRegNode, and the subregister index defined by \p SubRegIdxNode.
+ /// If no register class is found, return std::nullopt.
+ std::optional<const CodeGenRegisterClass *>
+ inferSuperRegisterClassForNode(const TypeSetByHwMode &Ty,
+ TreePatternNode *SuperRegNode,
+ TreePatternNode *SubRegIdxNode);
+ std::optional<CodeGenSubRegIndex *>
+ inferSubRegIndexForNode(TreePatternNode *SubRegIdxNode);
+
+ /// Infer a CodeGenRegisterClass which suppoorts \p Ty and \p SubRegIdxNode.
+ /// Return std::nullopt if no such class exists.
+ std::optional<const CodeGenRegisterClass *>
+ inferSuperRegisterClass(const TypeSetByHwMode &Ty,
+ TreePatternNode *SubRegIdxNode);
+
+ /// Return the CodeGenRegisterClass associated with \p Leaf if it has one.
+ std::optional<const CodeGenRegisterClass *>
+ getRegClassFromLeaf(TreePatternNode *Leaf);
+
+ /// Return a CodeGenRegisterClass for \p N if one can be found. Return
+ /// std::nullopt otherwise.
+ std::optional<const CodeGenRegisterClass *>
+ inferRegClassFromPattern(TreePatternNode *N);
+
+ /// Return the size of the MemoryVT in this predicate, if possible.
+ std::optional<unsigned>
+ getMemSizeBitsFromPredicate(const TreePredicateFn &Predicate);
+
+ // Add builtin predicates.
+ Expected<InstructionMatcher &>
+ addBuiltinPredicates(const Record *SrcGIEquivOrNull,
+ const TreePredicateFn &Predicate,
+ InstructionMatcher &InsnMatcher, bool &HasAddedMatcher);
+
+public:
+ /// Takes a sequence of \p Rules and group them based on the predicates
+ /// they share. \p MatcherStorage is used as a memory container
+ /// for the group that are created as part of this process.
+ ///
+ /// What this optimization does looks like if GroupT = GroupMatcher:
+ /// Output without optimization:
+ /// \verbatim
+ /// # R1
+ /// # predicate A
+ /// # predicate B
+ /// ...
+ /// # R2
+ /// # predicate A // <-- effectively this is going to be checked twice.
+ /// // Once in R1 and once in R2.
+ /// # predicate C
+ /// \endverbatim
+ /// Output with optimization:
+ /// \verbatim
+ /// # Group1_2
+ /// # predicate A // <-- Check is now shared.
+ /// # R1
+ /// # predicate B
+ /// # R2
+ /// # predicate C
+ /// \endverbatim
+ template <class GroupT>
+ static std::vector<Matcher *> optimizeRules(
+ ArrayRef<Matcher *> Rules,
+ std::vector<std::unique_ptr<Matcher>> &MatcherStorage);
+};
+
+void GlobalISelEmitter::gatherOpcodeValues() {
+ InstructionOpcodeMatcher::initOpcodeValuesMap(Target);
+}
+
+void GlobalISelEmitter::gatherTypeIDValues() {
+ LLTOperandMatcher::initTypeIDValuesMap();
+}
+
+void GlobalISelEmitter::gatherNodeEquivs() {
+ assert(NodeEquivs.empty());
+ for (Record *Equiv : RK.getAllDerivedDefinitions("GINodeEquiv"))
+ NodeEquivs[Equiv->getValueAsDef("Node")] = Equiv;
+
+ assert(ComplexPatternEquivs.empty());
+ for (Record *Equiv : RK.getAllDerivedDefinitions("GIComplexPatternEquiv")) {
+ Record *SelDAGEquiv = Equiv->getValueAsDef("SelDAGEquivalent");
+ if (!SelDAGEquiv)
+ continue;
+ ComplexPatternEquivs[SelDAGEquiv] = Equiv;
+ }
+
+ assert(SDNodeXFormEquivs.empty());
+ for (Record *Equiv : RK.getAllDerivedDefinitions("GISDNodeXFormEquiv")) {
+ Record *SelDAGEquiv = Equiv->getValueAsDef("SelDAGEquivalent");
+ if (!SelDAGEquiv)
+ continue;
+ SDNodeXFormEquivs[SelDAGEquiv] = Equiv;
+ }
+}
+
+Record *GlobalISelEmitter::findNodeEquiv(Record *N) const {
+ return NodeEquivs.lookup(N);
+}
+
+const CodeGenInstruction *
+GlobalISelEmitter::getEquivNode(Record &Equiv, const TreePatternNode *N) const {
+ if (N->getNumChildren() >= 1) {
+ // setcc operation maps to two different G_* instructions based on the type.
+ if (!Equiv.isValueUnset("IfFloatingPoint") &&
+ MVT(N->getChild(0)->getSimpleType(0)).isFloatingPoint())
+ return &Target.getInstruction(Equiv.getValueAsDef("IfFloatingPoint"));
+ }
+
+ for (const TreePredicateCall &Call : N->getPredicateCalls()) {
+ const TreePredicateFn &Predicate = Call.Fn;
+ if (!Equiv.isValueUnset("IfSignExtend") &&
+ (Predicate.isLoad() || Predicate.isAtomic()) &&
+ Predicate.isSignExtLoad())
+ return &Target.getInstruction(Equiv.getValueAsDef("IfSignExtend"));
+ if (!Equiv.isValueUnset("IfZeroExtend") &&
+ (Predicate.isLoad() || Predicate.isAtomic()) &&
+ Predicate.isZeroExtLoad())
+ return &Target.getInstruction(Equiv.getValueAsDef("IfZeroExtend"));
+ }
+
+ return &Target.getInstruction(Equiv.getValueAsDef("I"));
+}
+
+GlobalISelEmitter::GlobalISelEmitter(RecordKeeper &RK)
+ : RK(RK), CGP(RK), Target(CGP.getTargetInfo()),
+ CGRegs(Target.getRegBank()) {}
+
+//===- Emitter ------------------------------------------------------------===//
+
+Error GlobalISelEmitter::importRulePredicates(RuleMatcher &M,
+ ArrayRef<Record *> Predicates) {
+ for (Record *Pred : Predicates) {
+ if (Pred->getValueAsString("CondString").empty())
+ continue;
+ declareSubtargetFeature(Pred);
+ M.addRequiredFeature(Pred);
+ }
+
+ return Error::success();
+}
+
+std::optional<unsigned> GlobalISelEmitter::getMemSizeBitsFromPredicate(
+ const TreePredicateFn &Predicate) {
+ std::optional<LLTCodeGen> MemTyOrNone =
+ MVTToLLT(getValueType(Predicate.getMemoryVT()));
+
+ if (!MemTyOrNone)
+ return std::nullopt;
+
+ // Align so unusual types like i1 don't get rounded down.
+ return llvm::alignTo(
+ static_cast<unsigned>(MemTyOrNone->get().getSizeInBits()), 8);
+}
+
+Expected<InstructionMatcher &> GlobalISelEmitter::addBuiltinPredicates(
+ const Record *SrcGIEquivOrNull, const TreePredicateFn &Predicate,
+ InstructionMatcher &InsnMatcher, bool &HasAddedMatcher) {
+ if (Predicate.isLoad() || Predicate.isStore() || Predicate.isAtomic()) {
+ if (const ListInit *AddrSpaces = Predicate.getAddressSpaces()) {
+ SmallVector<unsigned, 4> ParsedAddrSpaces;
+
+ for (Init *Val : AddrSpaces->getValues()) {
+ IntInit *IntVal = dyn_cast<IntInit>(Val);
+ if (!IntVal)
+ return failedImport("Address space is not an integer");
+ ParsedAddrSpaces.push_back(IntVal->getValue());
+ }
+
+ if (!ParsedAddrSpaces.empty()) {
+ InsnMatcher.addPredicate<MemoryAddressSpacePredicateMatcher>(
+ 0, ParsedAddrSpaces);
+ return InsnMatcher;
+ }
+ }
+
+ int64_t MinAlign = Predicate.getMinAlignment();
+ if (MinAlign > 0) {
+ InsnMatcher.addPredicate<MemoryAlignmentPredicateMatcher>(0, MinAlign);
+ return InsnMatcher;
+ }
+ }
+
+ // G_LOAD is used for both non-extending and any-extending loads.
+ if (Predicate.isLoad() && Predicate.isNonExtLoad()) {
+ InsnMatcher.addPredicate<MemoryVsLLTSizePredicateMatcher>(
+ 0, MemoryVsLLTSizePredicateMatcher::EqualTo, 0);
+ return InsnMatcher;
+ }
+ if (Predicate.isLoad() && Predicate.isAnyExtLoad()) {
+ InsnMatcher.addPredicate<MemoryVsLLTSizePredicateMatcher>(
+ 0, MemoryVsLLTSizePredicateMatcher::LessThan, 0);
+ return InsnMatcher;
+ }
+
+ if (Predicate.isStore()) {
+ if (Predicate.isTruncStore()) {
+ if (Predicate.getMemoryVT() != nullptr) {
+ // FIXME: If MemoryVT is set, we end up with 2 checks for the MMO size.
+ auto MemSizeInBits = getMemSizeBitsFromPredicate(Predicate);
+ if (!MemSizeInBits)
+ return failedImport("MemVT could not be converted to LLT");
+
+ InsnMatcher.addPredicate<MemorySizePredicateMatcher>(0, *MemSizeInBits /
+ 8);
+ } else {
+ InsnMatcher.addPredicate<MemoryVsLLTSizePredicateMatcher>(
+ 0, MemoryVsLLTSizePredicateMatcher::LessThan, 0);
+ }
+ return InsnMatcher;
+ }
+ if (Predicate.isNonTruncStore()) {
+ // We need to check the sizes match here otherwise we could incorrectly
+ // match truncating stores with non-truncating ones.
+ InsnMatcher.addPredicate<MemoryVsLLTSizePredicateMatcher>(
+ 0, MemoryVsLLTSizePredicateMatcher::EqualTo, 0);
+ }
+ }
+
+ // No check required. We already did it by swapping the opcode.
+ if (!SrcGIEquivOrNull->isValueUnset("IfSignExtend") &&
+ Predicate.isSignExtLoad())
+ return InsnMatcher;
+
+ // No check required. We already did it by swapping the opcode.
+ if (!SrcGIEquivOrNull->isValueUnset("IfZeroExtend") &&
+ Predicate.isZeroExtLoad())
+ return InsnMatcher;
+
+ // No check required. G_STORE by itself is a non-extending store.
+ if (Predicate.isNonTruncStore())
+ return InsnMatcher;
+
+ if (Predicate.isLoad() || Predicate.isStore() || Predicate.isAtomic()) {
+ if (Predicate.getMemoryVT() != nullptr) {
+ auto MemSizeInBits = getMemSizeBitsFromPredicate(Predicate);
+ if (!MemSizeInBits)
+ return failedImport("MemVT could not be converted to LLT");
+
+ InsnMatcher.addPredicate<MemorySizePredicateMatcher>(0,
+ *MemSizeInBits / 8);
+ return InsnMatcher;
+ }
+ }
+
+ if (Predicate.isLoad() || Predicate.isStore()) {
+ // No check required. A G_LOAD/G_STORE is an unindexed load.
+ if (Predicate.isUnindexed())
+ return InsnMatcher;
+ }
+
+ if (Predicate.isAtomic()) {
+ if (Predicate.isAtomicOrderingMonotonic()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>("Monotonic");
+ return InsnMatcher;
+ }
+ if (Predicate.isAtomicOrderingAcquire()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>("Acquire");
+ return InsnMatcher;
+ }
+ if (Predicate.isAtomicOrderingRelease()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>("Release");
+ return InsnMatcher;
+ }
+ if (Predicate.isAtomicOrderingAcquireRelease()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
+ "AcquireRelease");
+ return InsnMatcher;
+ }
+ if (Predicate.isAtomicOrderingSequentiallyConsistent()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
+ "SequentiallyConsistent");
+ return InsnMatcher;
+ }
+ }
+
+ if (Predicate.isAtomicOrderingAcquireOrStronger()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
+ "Acquire", AtomicOrderingMMOPredicateMatcher::AO_OrStronger);
+ return InsnMatcher;
+ }
+ if (Predicate.isAtomicOrderingWeakerThanAcquire()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
+ "Acquire", AtomicOrderingMMOPredicateMatcher::AO_WeakerThan);
+ return InsnMatcher;
+ }
+
+ if (Predicate.isAtomicOrderingReleaseOrStronger()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
+ "Release", AtomicOrderingMMOPredicateMatcher::AO_OrStronger);
+ return InsnMatcher;
+ }
+ if (Predicate.isAtomicOrderingWeakerThanRelease()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
+ "Release", AtomicOrderingMMOPredicateMatcher::AO_WeakerThan);
+ return InsnMatcher;
+ }
+ HasAddedMatcher = false;
+ return InsnMatcher;
+}
+
+Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
+ RuleMatcher &Rule, InstructionMatcher &InsnMatcher,
+ const TreePatternNode *Src, unsigned &TempOpIdx) {
+ Record *SrcGIEquivOrNull = nullptr;
+ const CodeGenInstruction *SrcGIOrNull = nullptr;
+
+ // Start with the defined operands (i.e., the results of the root operator).
+ if (Src->getExtTypes().size() > 1)
+ return failedImport("Src pattern has multiple results");
+
+ if (Src->isLeaf()) {
+ Init *SrcInit = Src->getLeafValue();
+ if (isa<IntInit>(SrcInit)) {
+ InsnMatcher.addPredicate<InstructionOpcodeMatcher>(
+ &Target.getInstruction(RK.getDef("G_CONSTANT")));
+ } else
+ return failedImport(
+ "Unable to deduce gMIR opcode to handle Src (which is a leaf)");
+ } else {
+ SrcGIEquivOrNull = findNodeEquiv(Src->getOperator());
+ if (!SrcGIEquivOrNull)
+ return failedImport("Pattern operator lacks an equivalent Instruction" +
+ explainOperator(Src->getOperator()));
+ SrcGIOrNull = getEquivNode(*SrcGIEquivOrNull, Src);
+
+ // The operators look good: match the opcode
+ InsnMatcher.addPredicate<InstructionOpcodeMatcher>(SrcGIOrNull);
+ }
+
+ unsigned OpIdx = 0;
+ for (const TypeSetByHwMode &VTy : Src->getExtTypes()) {
+ // Results don't have a name unless they are the root node. The caller will
+ // set the name if appropriate.
+ const bool OperandIsAPointer =
+ SrcGIOrNull && SrcGIOrNull->isOutOperandAPointer(OpIdx);
+ OperandMatcher &OM = InsnMatcher.addOperand(OpIdx++, "", TempOpIdx);
+ if (auto Error = OM.addTypeCheckPredicate(VTy, OperandIsAPointer))
+ return failedImport(toString(std::move(Error)) +
+ " for result of Src pattern operator");
+ }
+
+ for (const TreePredicateCall &Call : Src->getPredicateCalls()) {
+ const TreePredicateFn &Predicate = Call.Fn;
+ bool HasAddedBuiltinMatcher = true;
+ if (Predicate.isAlwaysTrue())
+ continue;
+
+ if (Predicate.isImmediatePattern()) {
+ InsnMatcher.addPredicate<InstructionImmPredicateMatcher>(Predicate);
+ continue;
+ }
+
+ auto InsnMatcherOrError = addBuiltinPredicates(
+ SrcGIEquivOrNull, Predicate, InsnMatcher, HasAddedBuiltinMatcher);
+ if (auto Error = InsnMatcherOrError.takeError())
+ return std::move(Error);
+
+ // FIXME: This should be part of addBuiltinPredicates(). If we add this at
+ // the start of addBuiltinPredicates() without returning, then there might
+ // be cases where we hit the last return before which the
+ // HasAddedBuiltinMatcher will be set to false. The predicate could be
+ // missed if we add it in the middle or at the end due to return statements
+ // after the addPredicate<>() calls.
+ if (Predicate.hasNoUse()) {
+ InsnMatcher.addPredicate<NoUsePredicateMatcher>();
+ HasAddedBuiltinMatcher = true;
+ }
+
+ if (Predicate.hasGISelPredicateCode()) {
+ if (Predicate.usesOperands()) {
+ assert(WaitingForNamedOperands == 0 &&
+ "previous predicate didn't find all operands or "
+ "nested predicate that uses operands");
+ TreePattern *TP = Predicate.getOrigPatFragRecord();
+ WaitingForNamedOperands = TP->getNumArgs();
+ for (unsigned i = 0; i < WaitingForNamedOperands; ++i)
+ StoreIdxForName[getScopedName(Call.Scope, TP->getArgName(i))] = i;
+ }
+ InsnMatcher.addPredicate<GenericInstructionPredicateMatcher>(Predicate);
+ continue;
+ }
+ if (!HasAddedBuiltinMatcher) {
+ return failedImport("Src pattern child has predicate (" +
+ explainPredicates(Src) + ")");
+ }
+ }
+
+ bool IsAtomic = false;
+ if (SrcGIEquivOrNull && SrcGIEquivOrNull->getValueAsBit("CheckMMOIsNonAtomic"))
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>("NotAtomic");
+ else if (SrcGIEquivOrNull && SrcGIEquivOrNull->getValueAsBit("CheckMMOIsAtomic")) {
+ IsAtomic = true;
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
+ "Unordered", AtomicOrderingMMOPredicateMatcher::AO_OrStronger);
+ }
+
+ if (Src->isLeaf()) {
+ Init *SrcInit = Src->getLeafValue();
+ if (IntInit *SrcIntInit = dyn_cast<IntInit>(SrcInit)) {
+ OperandMatcher &OM =
+ InsnMatcher.addOperand(OpIdx++, Src->getName(), TempOpIdx);
+ OM.addPredicate<LiteralIntOperandMatcher>(SrcIntInit->getValue());
+ } else
+ return failedImport(
+ "Unable to deduce gMIR opcode to handle Src (which is a leaf)");
+ } else {
+ assert(SrcGIOrNull &&
+ "Expected to have already found an equivalent Instruction");
+ if (SrcGIOrNull->TheDef->getName() == "G_CONSTANT" ||
+ SrcGIOrNull->TheDef->getName() == "G_FCONSTANT") {
+ // imm/fpimm still have operands but we don't need to do anything with it
+ // here since we don't support ImmLeaf predicates yet. However, we still
+ // need to note the hidden operand to get GIM_CheckNumOperands correct.
+ InsnMatcher.addOperand(OpIdx++, "", TempOpIdx);
+ return InsnMatcher;
+ }
+
+ // Special case because the operand order is changed from setcc. The
+ // predicate operand needs to be swapped from the last operand to the first
+ // source.
+
+ unsigned NumChildren = Src->getNumChildren();
+ bool IsFCmp = SrcGIOrNull->TheDef->getName() == "G_FCMP";
+
+ if (IsFCmp || SrcGIOrNull->TheDef->getName() == "G_ICMP") {
+ TreePatternNode *SrcChild = Src->getChild(NumChildren - 1);
+ if (SrcChild->isLeaf()) {
+ DefInit *DI = dyn_cast<DefInit>(SrcChild->getLeafValue());
+ Record *CCDef = DI ? DI->getDef() : nullptr;
+ if (!CCDef || !CCDef->isSubClassOf("CondCode"))
+ return failedImport("Unable to handle CondCode");
+
+ OperandMatcher &OM =
+ InsnMatcher.addOperand(OpIdx++, SrcChild->getName(), TempOpIdx);
+ StringRef PredType = IsFCmp ? CCDef->getValueAsString("FCmpPredicate") :
+ CCDef->getValueAsString("ICmpPredicate");
+
+ if (!PredType.empty()) {
+ OM.addPredicate<CmpPredicateOperandMatcher>(std::string(PredType));
+ // Process the other 2 operands normally.
+ --NumChildren;
+ }
+ }
+ }
+
+ // Hack around an unfortunate mistake in how atomic store (and really
+ // atomicrmw in general) operands were ordered. A ISD::STORE used the order
+ // <stored value>, <pointer> order. ISD::ATOMIC_STORE used the opposite,
+ // <pointer>, <stored value>. In GlobalISel there's just the one store
+ // opcode, so we need to swap the operands here to get the right type check.
+ if (IsAtomic && SrcGIOrNull->TheDef->getName() == "G_STORE") {
+ assert(NumChildren == 2 && "wrong operands for atomic store");
+
+ TreePatternNode *PtrChild = Src->getChild(0);
+ TreePatternNode *ValueChild = Src->getChild(1);
+
+ if (auto Error = importChildMatcher(Rule, InsnMatcher, PtrChild, true,
+ false, 1, TempOpIdx))
+ return std::move(Error);
+
+ if (auto Error = importChildMatcher(Rule, InsnMatcher, ValueChild, false,
+ false, 0, TempOpIdx))
+ return std::move(Error);
+ return InsnMatcher;
+ }
+
+ // Match the used operands (i.e. the children of the operator).
+ bool IsIntrinsic =
+ SrcGIOrNull->TheDef->getName() == "G_INTRINSIC" ||
+ SrcGIOrNull->TheDef->getName() == "G_INTRINSIC_W_SIDE_EFFECTS";
+ const CodeGenIntrinsic *II = Src->getIntrinsicInfo(CGP);
+ if (IsIntrinsic && !II)
+ return failedImport("Expected IntInit containing intrinsic ID)");
+
+ for (unsigned i = 0; i != NumChildren; ++i) {
+ TreePatternNode *SrcChild = Src->getChild(i);
+
+ // We need to determine the meaning of a literal integer based on the
+ // context. If this is a field required to be an immediate (such as an
+ // immarg intrinsic argument), the required predicates are different than
+ // a constant which may be materialized in a register. If we have an
+ // argument that is required to be an immediate, we should not emit an LLT
+ // type check, and should not be looking for a G_CONSTANT defined
+ // register.
+ bool OperandIsImmArg = SrcGIOrNull->isInOperandImmArg(i);
+
+ // SelectionDAG allows pointers to be represented with iN since it doesn't
+ // distinguish between pointers and integers but they are different types in GlobalISel.
+ // Coerce integers to pointers to address space 0 if the context indicates a pointer.
+ //
+ bool OperandIsAPointer = SrcGIOrNull->isInOperandAPointer(i);
+
+ if (IsIntrinsic) {
+ // For G_INTRINSIC/G_INTRINSIC_W_SIDE_EFFECTS, the operand immediately
+ // following the defs is an intrinsic ID.
+ if (i == 0) {
+ OperandMatcher &OM =
+ InsnMatcher.addOperand(OpIdx++, SrcChild->getName(), TempOpIdx);
+ OM.addPredicate<IntrinsicIDOperandMatcher>(II);
+ continue;
+ }
+
+ // We have to check intrinsics for llvm_anyptr_ty and immarg parameters.
+ //
+ // Note that we have to look at the i-1th parameter, because we don't
+ // have the intrinsic ID in the intrinsic's parameter list.
+ OperandIsAPointer |= II->isParamAPointer(i - 1);
+ OperandIsImmArg |= II->isParamImmArg(i - 1);
+ }
+
+ if (auto Error =
+ importChildMatcher(Rule, InsnMatcher, SrcChild, OperandIsAPointer,
+ OperandIsImmArg, OpIdx++, TempOpIdx))
+ return std::move(Error);
+ }
+ }
+
+ return InsnMatcher;
+}
+
+Error GlobalISelEmitter::importComplexPatternOperandMatcher(
+ OperandMatcher &OM, Record *R, unsigned &TempOpIdx) const {
+ const auto &ComplexPattern = ComplexPatternEquivs.find(R);
+ if (ComplexPattern == ComplexPatternEquivs.end())
+ return failedImport("SelectionDAG ComplexPattern (" + R->getName() +
+ ") not mapped to GlobalISel");
+
+ OM.addPredicate<ComplexPatternOperandMatcher>(OM, *ComplexPattern->second);
+ TempOpIdx++;
+ return Error::success();
+}
+
+// Get the name to use for a pattern operand. For an anonymous physical register
+// input, this should use the register name.
+static StringRef getSrcChildName(const TreePatternNode *SrcChild,
+ Record *&PhysReg) {
+ StringRef SrcChildName = SrcChild->getName();
+ if (SrcChildName.empty() && SrcChild->isLeaf()) {
+ if (auto *ChildDefInit = dyn_cast<DefInit>(SrcChild->getLeafValue())) {
+ auto *ChildRec = ChildDefInit->getDef();
+ if (ChildRec->isSubClassOf("Register")) {
+ SrcChildName = ChildRec->getName();
+ PhysReg = ChildRec;
+ }
+ }
+ }
+
+ return SrcChildName;
+}
+
+Error GlobalISelEmitter::importChildMatcher(
+ RuleMatcher &Rule, InstructionMatcher &InsnMatcher,
+ const TreePatternNode *SrcChild, bool OperandIsAPointer,
+ bool OperandIsImmArg, unsigned OpIdx, unsigned &TempOpIdx) {
+
+ Record *PhysReg = nullptr;
+ std::string SrcChildName = std::string(getSrcChildName(SrcChild, PhysReg));
+ if (!SrcChild->isLeaf() &&
+ SrcChild->getOperator()->isSubClassOf("ComplexPattern")) {
+ // The "name" of a non-leaf complex pattern (MY_PAT $op1, $op2) is
+ // "MY_PAT:op1:op2" and the ones with same "name" represent same operand.
+ std::string PatternName = std::string(SrcChild->getOperator()->getName());
+ for (unsigned i = 0; i < SrcChild->getNumChildren(); ++i) {
+ PatternName += ":";
+ PatternName += SrcChild->getChild(i)->getName();
+ }
+ SrcChildName = PatternName;
+ }
+
+ OperandMatcher &OM =
+ PhysReg ? InsnMatcher.addPhysRegInput(PhysReg, OpIdx, TempOpIdx)
+ : InsnMatcher.addOperand(OpIdx, SrcChildName, TempOpIdx);
+ if (OM.isSameAsAnotherOperand())
+ return Error::success();
+
+ ArrayRef<TypeSetByHwMode> ChildTypes = SrcChild->getExtTypes();
+ if (ChildTypes.size() != 1)
+ return failedImport("Src pattern child has multiple results");
+
+ // Check MBB's before the type check since they are not a known type.
+ if (!SrcChild->isLeaf()) {
+ if (SrcChild->getOperator()->isSubClassOf("SDNode")) {
+ auto &ChildSDNI = CGP.getSDNodeInfo(SrcChild->getOperator());
+ if (ChildSDNI.getSDClassName() == "BasicBlockSDNode") {
+ OM.addPredicate<MBBOperandMatcher>();
+ return Error::success();
+ }
+ if (SrcChild->getOperator()->getName() == "timm") {
+ OM.addPredicate<ImmOperandMatcher>();
+
+ // Add predicates, if any
+ for (const TreePredicateCall &Call : SrcChild->getPredicateCalls()) {
+ const TreePredicateFn &Predicate = Call.Fn;
+
+ // Only handle immediate patterns for now
+ if (Predicate.isImmediatePattern()) {
+ OM.addPredicate<OperandImmPredicateMatcher>(Predicate);
+ }
+ }
+
+ return Error::success();
+ }
+ }
+ }
+
+ // Immediate arguments have no meaningful type to check as they don't have
+ // registers.
+ if (!OperandIsImmArg) {
+ if (auto Error =
+ OM.addTypeCheckPredicate(ChildTypes.front(), OperandIsAPointer))
+ return failedImport(toString(std::move(Error)) + " for Src operand (" +
+ to_string(*SrcChild) + ")");
+ }
+
+ // Check for nested instructions.
+ if (!SrcChild->isLeaf()) {
+ if (SrcChild->getOperator()->isSubClassOf("ComplexPattern")) {
+ // When a ComplexPattern is used as an operator, it should do the same
+ // thing as when used as a leaf. However, the children of the operator
+ // name the sub-operands that make up the complex operand and we must
+ // prepare to reference them in the renderer too.
+ unsigned RendererID = TempOpIdx;
+ if (auto Error = importComplexPatternOperandMatcher(
+ OM, SrcChild->getOperator(), TempOpIdx))
+ return Error;
+
+ for (unsigned i = 0, e = SrcChild->getNumChildren(); i != e; ++i) {
+ auto *SubOperand = SrcChild->getChild(i);
+ if (!SubOperand->getName().empty()) {
+ if (auto Error = Rule.defineComplexSubOperand(
+ SubOperand->getName(), SrcChild->getOperator(), RendererID, i,
+ SrcChildName))
+ return Error;
+ }
+ }
+
+ return Error::success();
+ }
+
+ auto MaybeInsnOperand = OM.addPredicate<InstructionOperandMatcher>(
+ InsnMatcher.getRuleMatcher(), SrcChild->getName());
+ if (!MaybeInsnOperand) {
+ // This isn't strictly true. If the user were to provide exactly the same
+ // matchers as the original operand then we could allow it. However, it's
+ // simpler to not permit the redundant specification.
+ return failedImport("Nested instruction cannot be the same as another operand");
+ }
+
+ // Map the node to a gMIR instruction.
+ InstructionOperandMatcher &InsnOperand = **MaybeInsnOperand;
+ auto InsnMatcherOrError = createAndImportSelDAGMatcher(
+ Rule, InsnOperand.getInsnMatcher(), SrcChild, TempOpIdx);
+ if (auto Error = InsnMatcherOrError.takeError())
+ return Error;
+
+ return Error::success();
+ }
+
+ if (SrcChild->hasAnyPredicate())
+ return failedImport("Src pattern child has unsupported predicate");
+
+ // Check for constant immediates.
+ if (auto *ChildInt = dyn_cast<IntInit>(SrcChild->getLeafValue())) {
+ if (OperandIsImmArg) {
+ // Checks for argument directly in operand list
+ OM.addPredicate<LiteralIntOperandMatcher>(ChildInt->getValue());
+ } else {
+ // Checks for materialized constant
+ OM.addPredicate<ConstantIntOperandMatcher>(ChildInt->getValue());
+ }
+ return Error::success();
+ }
+
+ // Check for def's like register classes or ComplexPattern's.
+ if (auto *ChildDefInit = dyn_cast<DefInit>(SrcChild->getLeafValue())) {
+ auto *ChildRec = ChildDefInit->getDef();
+
+ if (WaitingForNamedOperands) {
+ auto PA = SrcChild->getNamesAsPredicateArg().begin();
+ std::string Name = getScopedName(PA->getScope(), PA->getIdentifier());
+ OM.addPredicate<RecordNamedOperandMatcher>(StoreIdxForName[Name], Name);
+ --WaitingForNamedOperands;
+ }
+
+ // Check for register classes.
+ if (ChildRec->isSubClassOf("RegisterClass") ||
+ ChildRec->isSubClassOf("RegisterOperand")) {
+ OM.addPredicate<RegisterBankOperandMatcher>(
+ Target.getRegisterClass(getInitValueAsRegClass(ChildDefInit)));
+ return Error::success();
+ }
+
+ if (ChildRec->isSubClassOf("Register")) {
+ // This just be emitted as a copy to the specific register.
+ ValueTypeByHwMode VT = ChildTypes.front().getValueTypeByHwMode();
+ const CodeGenRegisterClass *RC
+ = CGRegs.getMinimalPhysRegClass(ChildRec, &VT);
+ if (!RC) {
+ return failedImport(
+ "Could not determine physical register class of pattern source");
+ }
+
+ OM.addPredicate<RegisterBankOperandMatcher>(*RC);
+ return Error::success();
+ }
+
+ // Check for ValueType.
+ if (ChildRec->isSubClassOf("ValueType")) {
+ // We already added a type check as standard practice so this doesn't need
+ // to do anything.
+ return Error::success();
+ }
+
+ // Check for ComplexPattern's.
+ if (ChildRec->isSubClassOf("ComplexPattern"))
+ return importComplexPatternOperandMatcher(OM, ChildRec, TempOpIdx);
+
+ if (ChildRec->isSubClassOf("ImmLeaf")) {
+ return failedImport(
+ "Src pattern child def is an unsupported tablegen class (ImmLeaf)");
+ }
+
+ // Place holder for SRCVALUE nodes. Nothing to do here.
+ if (ChildRec->getName() == "srcvalue")
+ return Error::success();
+
+ const bool ImmAllOnesV = ChildRec->getName() == "immAllOnesV";
+ if (ImmAllOnesV || ChildRec->getName() == "immAllZerosV") {
+ auto MaybeInsnOperand = OM.addPredicate<InstructionOperandMatcher>(
+ InsnMatcher.getRuleMatcher(), SrcChild->getName(), false);
+ InstructionOperandMatcher &InsnOperand = **MaybeInsnOperand;
+
+ ValueTypeByHwMode VTy = ChildTypes.front().getValueTypeByHwMode();
+
+ const CodeGenInstruction &BuildVector
+ = Target.getInstruction(RK.getDef("G_BUILD_VECTOR"));
+ const CodeGenInstruction &BuildVectorTrunc
+ = Target.getInstruction(RK.getDef("G_BUILD_VECTOR_TRUNC"));
+
+ // Treat G_BUILD_VECTOR as the canonical opcode, and G_BUILD_VECTOR_TRUNC
+ // as an alternative.
+ InsnOperand.getInsnMatcher().addPredicate<InstructionOpcodeMatcher>(
+ ArrayRef({&BuildVector, &BuildVectorTrunc}));
+
+ // TODO: Handle both G_BUILD_VECTOR and G_BUILD_VECTOR_TRUNC We could
+ // theoretically not emit any opcode check, but getOpcodeMatcher currently
+ // has to succeed.
+ OperandMatcher &OM =
+ InsnOperand.getInsnMatcher().addOperand(0, "", TempOpIdx);
+ if (auto Error =
+ OM.addTypeCheckPredicate(VTy, false /* OperandIsAPointer */))
+ return failedImport(toString(std::move(Error)) +
+ " for result of Src pattern operator");
+
+ InsnOperand.getInsnMatcher().addPredicate<VectorSplatImmPredicateMatcher>(
+ ImmAllOnesV ? VectorSplatImmPredicateMatcher::AllOnes
+ : VectorSplatImmPredicateMatcher::AllZeros);
+ return Error::success();
+ }
+
+ return failedImport(
+ "Src pattern child def is an unsupported tablegen class");
+ }
+
+ return failedImport("Src pattern child is an unsupported kind");
+}
+
+Expected<action_iterator> GlobalISelEmitter::importExplicitUseRenderer(
+ action_iterator InsertPt, RuleMatcher &Rule, BuildMIAction &DstMIBuilder,
+ TreePatternNode *DstChild) {
+
+ const auto &SubOperand = Rule.getComplexSubOperand(DstChild->getName());
+ if (SubOperand) {
+ DstMIBuilder.addRenderer<RenderComplexPatternOperand>(
+ *std::get<0>(*SubOperand), DstChild->getName(),
+ std::get<1>(*SubOperand), std::get<2>(*SubOperand));
+ return InsertPt;
+ }
+
+ if (!DstChild->isLeaf()) {
+ if (DstChild->getOperator()->isSubClassOf("SDNodeXForm")) {
+ auto Child = DstChild->getChild(0);
+ auto I = SDNodeXFormEquivs.find(DstChild->getOperator());
+ if (I != SDNodeXFormEquivs.end()) {
+ Record *XFormOpc = DstChild->getOperator()->getValueAsDef("Opcode");
+ if (XFormOpc->getName() == "timm") {
+ // If this is a TargetConstant, there won't be a corresponding
+ // instruction to transform. Instead, this will refer directly to an
+ // operand in an instruction's operand list.
+ DstMIBuilder.addRenderer<CustomOperandRenderer>(*I->second,
+ Child->getName());
+ } else {
+ DstMIBuilder.addRenderer<CustomRenderer>(*I->second,
+ Child->getName());
+ }
+
+ return InsertPt;
+ }
+ return failedImport("SDNodeXForm " + Child->getName() +
+ " has no custom renderer");
+ }
+
+ // We accept 'bb' here. It's an operator because BasicBlockSDNode isn't
+ // inline, but in MI it's just another operand.
+ if (DstChild->getOperator()->isSubClassOf("SDNode")) {
+ auto &ChildSDNI = CGP.getSDNodeInfo(DstChild->getOperator());
+ if (ChildSDNI.getSDClassName() == "BasicBlockSDNode") {
+ DstMIBuilder.addRenderer<CopyRenderer>(DstChild->getName());
+ return InsertPt;
+ }
+ }
+
+ // Similarly, imm is an operator in TreePatternNode's view but must be
+ // rendered as operands.
+ // FIXME: The target should be able to choose sign-extended when appropriate
+ // (e.g. on Mips).
+ if (DstChild->getOperator()->getName() == "timm") {
+ DstMIBuilder.addRenderer<CopyRenderer>(DstChild->getName());
+ return InsertPt;
+ } else if (DstChild->getOperator()->getName() == "imm") {
+ DstMIBuilder.addRenderer<CopyConstantAsImmRenderer>(DstChild->getName());
+ return InsertPt;
+ } else if (DstChild->getOperator()->getName() == "fpimm") {
+ DstMIBuilder.addRenderer<CopyFConstantAsFPImmRenderer>(
+ DstChild->getName());
+ return InsertPt;
+ }
+
+ if (DstChild->getOperator()->isSubClassOf("Instruction")) {
+ auto OpTy = getInstResultType(DstChild);
+ if (!OpTy)
+ return OpTy.takeError();
+
+ unsigned TempRegID = Rule.allocateTempRegID();
+ InsertPt = Rule.insertAction<MakeTempRegisterAction>(
+ InsertPt, *OpTy, TempRegID);
+ DstMIBuilder.addRenderer<TempRegRenderer>(TempRegID);
+
+ auto InsertPtOrError = createAndImportSubInstructionRenderer(
+ ++InsertPt, Rule, DstChild, TempRegID);
+ if (auto Error = InsertPtOrError.takeError())
+ return std::move(Error);
+ return InsertPtOrError.get();
+ }
+
+ return failedImport("Dst pattern child isn't a leaf node or an MBB" + llvm::to_string(*DstChild));
+ }
+
+ // It could be a specific immediate in which case we should just check for
+ // that immediate.
+ if (const IntInit *ChildIntInit =
+ dyn_cast<IntInit>(DstChild->getLeafValue())) {
+ DstMIBuilder.addRenderer<ImmRenderer>(ChildIntInit->getValue());
+ return InsertPt;
+ }
+
+ // Otherwise, we're looking for a bog-standard RegisterClass operand.
+ if (auto *ChildDefInit = dyn_cast<DefInit>(DstChild->getLeafValue())) {
+ auto *ChildRec = ChildDefInit->getDef();
+
+ ArrayRef<TypeSetByHwMode> ChildTypes = DstChild->getExtTypes();
+ if (ChildTypes.size() != 1)
+ return failedImport("Dst pattern child has multiple results");
+
+ std::optional<LLTCodeGen> OpTyOrNone;
+ if (ChildTypes.front().isMachineValueType())
+ OpTyOrNone = MVTToLLT(ChildTypes.front().getMachineValueType().SimpleTy);
+ if (!OpTyOrNone)
+ return failedImport("Dst operand has an unsupported type");
+
+ if (ChildRec->isSubClassOf("Register")) {
+ DstMIBuilder.addRenderer<AddRegisterRenderer>(Target, ChildRec);
+ return InsertPt;
+ }
+
+ if (ChildRec->isSubClassOf("RegisterClass") ||
+ ChildRec->isSubClassOf("RegisterOperand") ||
+ ChildRec->isSubClassOf("ValueType")) {
+ if (ChildRec->isSubClassOf("RegisterOperand") &&
+ !ChildRec->isValueUnset("GIZeroRegister")) {
+ DstMIBuilder.addRenderer<CopyOrAddZeroRegRenderer>(
+ DstChild->getName(), ChildRec->getValueAsDef("GIZeroRegister"));
+ return InsertPt;
+ }
+
+ DstMIBuilder.addRenderer<CopyRenderer>(DstChild->getName());
+ return InsertPt;
+ }
+
+ if (ChildRec->isSubClassOf("SubRegIndex")) {
+ CodeGenSubRegIndex *SubIdx = CGRegs.getSubRegIdx(ChildRec);
+ DstMIBuilder.addRenderer<ImmRenderer>(SubIdx->EnumValue);
+ return InsertPt;
+ }
+
+ if (ChildRec->isSubClassOf("ComplexPattern")) {
+ const auto &ComplexPattern = ComplexPatternEquivs.find(ChildRec);
+ if (ComplexPattern == ComplexPatternEquivs.end())
+ return failedImport(
+ "SelectionDAG ComplexPattern not mapped to GlobalISel");
+
+ const OperandMatcher &OM = Rule.getOperandMatcher(DstChild->getName());
+ DstMIBuilder.addRenderer<RenderComplexPatternOperand>(
+ *ComplexPattern->second, DstChild->getName(),
+ OM.getAllocatedTemporariesBaseID());
+ return InsertPt;
+ }
+
+ return failedImport(
+ "Dst pattern child def is an unsupported tablegen class");
+ }
+ return failedImport("Dst pattern child is an unsupported kind");
+}
+
+Expected<BuildMIAction &> GlobalISelEmitter::createAndImportInstructionRenderer(
+ RuleMatcher &M, InstructionMatcher &InsnMatcher, const TreePatternNode *Src,
+ const TreePatternNode *Dst) {
+ auto InsertPtOrError = createInstructionRenderer(M.actions_end(), M, Dst);
+ if (auto Error = InsertPtOrError.takeError())
+ return std::move(Error);
+
+ action_iterator InsertPt = InsertPtOrError.get();
+ BuildMIAction &DstMIBuilder = *static_cast<BuildMIAction *>(InsertPt->get());
+
+ for (auto PhysInput : InsnMatcher.getPhysRegInputs()) {
+ InsertPt = M.insertAction<BuildMIAction>(
+ InsertPt, M.allocateOutputInsnID(),
+ &Target.getInstruction(RK.getDef("COPY")));
+ BuildMIAction &CopyToPhysRegMIBuilder =
+ *static_cast<BuildMIAction *>(InsertPt->get());
+ CopyToPhysRegMIBuilder.addRenderer<AddRegisterRenderer>(Target,
+ PhysInput.first,
+ true);
+ CopyToPhysRegMIBuilder.addRenderer<CopyPhysRegRenderer>(PhysInput.first);
+ }
+
+ if (auto Error = importExplicitDefRenderers(InsertPt, M, DstMIBuilder, Dst)
+ .takeError())
+ return std::move(Error);
+
+ if (auto Error = importExplicitUseRenderers(InsertPt, M, DstMIBuilder, Dst)
+ .takeError())
+ return std::move(Error);
+
+ return DstMIBuilder;
+}
+
+Expected<action_iterator>
+GlobalISelEmitter::createAndImportSubInstructionRenderer(
+ const action_iterator InsertPt, RuleMatcher &M, const TreePatternNode *Dst,
+ unsigned TempRegID) {
+ auto InsertPtOrError = createInstructionRenderer(InsertPt, M, Dst);
+
+ // TODO: Assert there's exactly one result.
+
+ if (auto Error = InsertPtOrError.takeError())
+ return std::move(Error);
+
+ BuildMIAction &DstMIBuilder =
+ *static_cast<BuildMIAction *>(InsertPtOrError.get()->get());
+
+ // Assign the result to TempReg.
+ DstMIBuilder.addRenderer<TempRegRenderer>(TempRegID, true);
+
+ InsertPtOrError =
+ importExplicitUseRenderers(InsertPtOrError.get(), M, DstMIBuilder, Dst);
+ if (auto Error = InsertPtOrError.takeError())
+ return std::move(Error);
+
+ // We need to make sure that when we import an INSERT_SUBREG as a
+ // subinstruction that it ends up being constrained to the correct super
+ // register and subregister classes.
+ auto OpName = Target.getInstruction(Dst->getOperator()).TheDef->getName();
+ if (OpName == "INSERT_SUBREG") {
+ auto SubClass = inferRegClassFromPattern(Dst->getChild(1));
+ if (!SubClass)
+ return failedImport(
+ "Cannot infer register class from INSERT_SUBREG operand #1");
+ std::optional<const CodeGenRegisterClass *> SuperClass =
+ inferSuperRegisterClassForNode(Dst->getExtType(0), Dst->getChild(0),
+ Dst->getChild(2));
+ if (!SuperClass)
+ return failedImport(
+ "Cannot infer register class for INSERT_SUBREG operand #0");
+ // The destination and the super register source of an INSERT_SUBREG must
+ // be the same register class.
+ M.insertAction<ConstrainOperandToRegClassAction>(
+ InsertPt, DstMIBuilder.getInsnID(), 0, **SuperClass);
+ M.insertAction<ConstrainOperandToRegClassAction>(
+ InsertPt, DstMIBuilder.getInsnID(), 1, **SuperClass);
+ M.insertAction<ConstrainOperandToRegClassAction>(
+ InsertPt, DstMIBuilder.getInsnID(), 2, **SubClass);
+ return InsertPtOrError.get();
+ }
+
+ if (OpName == "EXTRACT_SUBREG") {
+ // EXTRACT_SUBREG selects into a subregister COPY but unlike most
+ // instructions, the result register class is controlled by the
+ // subregisters of the operand. As a result, we must constrain the result
+ // class rather than check that it's already the right one.
+ auto SuperClass = inferRegClassFromPattern(Dst->getChild(0));
+ if (!SuperClass)
+ return failedImport(
+ "Cannot infer register class from EXTRACT_SUBREG operand #0");
+
+ auto SubIdx = inferSubRegIndexForNode(Dst->getChild(1));
+ if (!SubIdx)
+ return failedImport("EXTRACT_SUBREG child #1 is not a subreg index");
+
+ const auto SrcRCDstRCPair =
+ (*SuperClass)->getMatchingSubClassWithSubRegs(CGRegs, *SubIdx);
+ assert(SrcRCDstRCPair->second && "Couldn't find a matching subclass");
+ M.insertAction<ConstrainOperandToRegClassAction>(
+ InsertPt, DstMIBuilder.getInsnID(), 0, *SrcRCDstRCPair->second);
+ M.insertAction<ConstrainOperandToRegClassAction>(
+ InsertPt, DstMIBuilder.getInsnID(), 1, *SrcRCDstRCPair->first);
+
+ // We're done with this pattern! It's eligible for GISel emission; return
+ // it.
+ return InsertPtOrError.get();
+ }
+
+ // Similar to INSERT_SUBREG, we also have to handle SUBREG_TO_REG as a
+ // subinstruction.
+ if (OpName == "SUBREG_TO_REG") {
+ auto SubClass = inferRegClassFromPattern(Dst->getChild(1));
+ if (!SubClass)
+ return failedImport(
+ "Cannot infer register class from SUBREG_TO_REG child #1");
+ auto SuperClass = inferSuperRegisterClass(Dst->getExtType(0),
+ Dst->getChild(2));
+ if (!SuperClass)
+ return failedImport(
+ "Cannot infer register class for SUBREG_TO_REG operand #0");
+ M.insertAction<ConstrainOperandToRegClassAction>(
+ InsertPt, DstMIBuilder.getInsnID(), 0, **SuperClass);
+ M.insertAction<ConstrainOperandToRegClassAction>(
+ InsertPt, DstMIBuilder.getInsnID(), 2, **SubClass);
+ return InsertPtOrError.get();
+ }
+
+ if (OpName == "REG_SEQUENCE") {
+ auto SuperClass = inferRegClassFromPattern(Dst->getChild(0));
+ M.insertAction<ConstrainOperandToRegClassAction>(
+ InsertPt, DstMIBuilder.getInsnID(), 0, **SuperClass);
+
+ unsigned Num = Dst->getNumChildren();
+ for (unsigned I = 1; I != Num; I += 2) {
+ TreePatternNode *SubRegChild = Dst->getChild(I + 1);
+
+ auto SubIdx = inferSubRegIndexForNode(SubRegChild);
+ if (!SubIdx)
+ return failedImport("REG_SEQUENCE child is not a subreg index");
+
+ const auto SrcRCDstRCPair =
+ (*SuperClass)->getMatchingSubClassWithSubRegs(CGRegs, *SubIdx);
+ assert(SrcRCDstRCPair->second && "Couldn't find a matching subclass");
+ M.insertAction<ConstrainOperandToRegClassAction>(
+ InsertPt, DstMIBuilder.getInsnID(), I, *SrcRCDstRCPair->second);
+ }
+
+ return InsertPtOrError.get();
+ }
+
+ M.insertAction<ConstrainOperandsToDefinitionAction>(InsertPt,
+ DstMIBuilder.getInsnID());
+ return InsertPtOrError.get();
+}
+
+Expected<action_iterator> GlobalISelEmitter::createInstructionRenderer(
+ action_iterator InsertPt, RuleMatcher &M, const TreePatternNode *Dst) {
+ Record *DstOp = Dst->getOperator();
+ if (!DstOp->isSubClassOf("Instruction")) {
+ if (DstOp->isSubClassOf("ValueType"))
+ return failedImport(
+ "Pattern operator isn't an instruction (it's a ValueType)");
+ return failedImport("Pattern operator isn't an instruction");
+ }
+ CodeGenInstruction *DstI = &Target.getInstruction(DstOp);
+
+ // COPY_TO_REGCLASS is just a copy with a ConstrainOperandToRegClassAction
+ // attached. Similarly for EXTRACT_SUBREG except that's a subregister copy.
+ StringRef Name = DstI->TheDef->getName();
+ if (Name == "COPY_TO_REGCLASS" || Name == "EXTRACT_SUBREG")
+ DstI = &Target.getInstruction(RK.getDef("COPY"));
+
+ return M.insertAction<BuildMIAction>(InsertPt, M.allocateOutputInsnID(),
+ DstI);
+}
+
+Expected<action_iterator> GlobalISelEmitter::importExplicitDefRenderers(
+ action_iterator InsertPt, RuleMatcher &M, BuildMIAction &DstMIBuilder,
+ const TreePatternNode *Dst) {
+ const CodeGenInstruction *DstI = DstMIBuilder.getCGI();
+ const unsigned NumDefs = DstI->Operands.NumDefs;
+ if (NumDefs == 0)
+ return InsertPt;
+
+ DstMIBuilder.addRenderer<CopyRenderer>(DstI->Operands[0].Name);
+
+ // Some instructions have multiple defs, but are missing a type entry
+ // (e.g. s_cc_out operands).
+ if (Dst->getExtTypes().size() < NumDefs)
+ return failedImport("unhandled discarded def");
+
+ // Patterns only handle a single result, so any result after the first is an
+ // implicitly dead def.
+ for (unsigned I = 1; I < NumDefs; ++I) {
+ const TypeSetByHwMode &ExtTy = Dst->getExtType(I);
+ if (!ExtTy.isMachineValueType())
+ return failedImport("unsupported typeset");
+
+ auto OpTy = MVTToLLT(ExtTy.getMachineValueType().SimpleTy);
+ if (!OpTy)
+ return failedImport("unsupported type");
+
+ unsigned TempRegID = M.allocateTempRegID();
+ InsertPt =
+ M.insertAction<MakeTempRegisterAction>(InsertPt, *OpTy, TempRegID);
+ DstMIBuilder.addRenderer<TempRegRenderer>(TempRegID, true, nullptr, true);
+ }
+
+ return InsertPt;
+}
+
+Expected<action_iterator> GlobalISelEmitter::importExplicitUseRenderers(
+ action_iterator InsertPt, RuleMatcher &M, BuildMIAction &DstMIBuilder,
+ const llvm::TreePatternNode *Dst) {
+ const CodeGenInstruction *DstI = DstMIBuilder.getCGI();
+ CodeGenInstruction *OrigDstI = &Target.getInstruction(Dst->getOperator());
+
+ StringRef Name = OrigDstI->TheDef->getName();
+ unsigned ExpectedDstINumUses = Dst->getNumChildren();
+
+ // EXTRACT_SUBREG needs to use a subregister COPY.
+ if (Name == "EXTRACT_SUBREG") {
+ if (!Dst->getChild(1)->isLeaf())
+ return failedImport("EXTRACT_SUBREG child #1 is not a leaf");
+ DefInit *SubRegInit = dyn_cast<DefInit>(Dst->getChild(1)->getLeafValue());
+ if (!SubRegInit)
+ return failedImport("EXTRACT_SUBREG child #1 is not a subreg index");
+
+ CodeGenSubRegIndex *SubIdx = CGRegs.getSubRegIdx(SubRegInit->getDef());
+ TreePatternNode *ValChild = Dst->getChild(0);
+ if (!ValChild->isLeaf()) {
+ // We really have to handle the source instruction, and then insert a
+ // copy from the subregister.
+ auto ExtractSrcTy = getInstResultType(ValChild);
+ if (!ExtractSrcTy)
+ return ExtractSrcTy.takeError();
+
+ unsigned TempRegID = M.allocateTempRegID();
+ InsertPt = M.insertAction<MakeTempRegisterAction>(
+ InsertPt, *ExtractSrcTy, TempRegID);
+
+ auto InsertPtOrError = createAndImportSubInstructionRenderer(
+ ++InsertPt, M, ValChild, TempRegID);
+ if (auto Error = InsertPtOrError.takeError())
+ return std::move(Error);
+
+ DstMIBuilder.addRenderer<TempRegRenderer>(TempRegID, false, SubIdx);
+ return InsertPt;
+ }
+
+ // If this is a source operand, this is just a subregister copy.
+ Record *RCDef = getInitValueAsRegClass(ValChild->getLeafValue());
+ if (!RCDef)
+ return failedImport("EXTRACT_SUBREG child #0 could not "
+ "be coerced to a register class");
+
+ CodeGenRegisterClass *RC = CGRegs.getRegClass(RCDef);
+
+ const auto SrcRCDstRCPair =
+ RC->getMatchingSubClassWithSubRegs(CGRegs, SubIdx);
+ if (SrcRCDstRCPair) {
+ assert(SrcRCDstRCPair->second && "Couldn't find a matching subclass");
+ if (SrcRCDstRCPair->first != RC)
+ return failedImport("EXTRACT_SUBREG requires an additional COPY");
+ }
+
+ DstMIBuilder.addRenderer<CopySubRegRenderer>(Dst->getChild(0)->getName(),
+ SubIdx);
+ return InsertPt;
+ }
+
+ if (Name == "REG_SEQUENCE") {
+ if (!Dst->getChild(0)->isLeaf())
+ return failedImport("REG_SEQUENCE child #0 is not a leaf");
+
+ Record *RCDef = getInitValueAsRegClass(Dst->getChild(0)->getLeafValue());
+ if (!RCDef)
+ return failedImport("REG_SEQUENCE child #0 could not "
+ "be coerced to a register class");
+
+ if ((ExpectedDstINumUses - 1) % 2 != 0)
+ return failedImport("Malformed REG_SEQUENCE");
+
+ for (unsigned I = 1; I != ExpectedDstINumUses; I += 2) {
+ TreePatternNode *ValChild = Dst->getChild(I);
+ TreePatternNode *SubRegChild = Dst->getChild(I + 1);
+
+ if (DefInit *SubRegInit =
+ dyn_cast<DefInit>(SubRegChild->getLeafValue())) {
+ CodeGenSubRegIndex *SubIdx = CGRegs.getSubRegIdx(SubRegInit->getDef());
+
+ auto InsertPtOrError =
+ importExplicitUseRenderer(InsertPt, M, DstMIBuilder, ValChild);
+ if (auto Error = InsertPtOrError.takeError())
+ return std::move(Error);
+ InsertPt = InsertPtOrError.get();
+ DstMIBuilder.addRenderer<SubRegIndexRenderer>(SubIdx);
+ }
+ }
+
+ return InsertPt;
+ }
+
+ // Render the explicit uses.
+ unsigned DstINumUses = OrigDstI->Operands.size() - OrigDstI->Operands.NumDefs;
+ if (Name == "COPY_TO_REGCLASS") {
+ DstINumUses--; // Ignore the class constraint.
+ ExpectedDstINumUses--;
+ }
+
+ // NumResults - This is the number of results produced by the instruction in
+ // the "outs" list.
+ unsigned NumResults = OrigDstI->Operands.NumDefs;
+
+ // Number of operands we know the output instruction must have. If it is
+ // variadic, we could have more operands.
+ unsigned NumFixedOperands = DstI->Operands.size();
+
+ // Loop over all of the fixed operands of the instruction pattern, emitting
+ // code to fill them all in. The node 'N' usually has number children equal to
+ // the number of input operands of the instruction. However, in cases where
+ // there are predicate operands for an instruction, we need to fill in the
+ // 'execute always' values. Match up the node operands to the instruction
+ // operands to do this.
+ unsigned Child = 0;
+
+ // Similarly to the code in TreePatternNode::ApplyTypeConstraints, count the
+ // number of operands at the end of the list which have default values.
+ // Those can come from the pattern if it provides enough arguments, or be
+ // filled in with the default if the pattern hasn't provided them. But any
+ // operand with a default value _before_ the last mandatory one will be
+ // filled in with their defaults unconditionally.
+ unsigned NonOverridableOperands = NumFixedOperands;
+ while (NonOverridableOperands > NumResults &&
+ CGP.operandHasDefault(DstI->Operands[NonOverridableOperands - 1].Rec))
+ --NonOverridableOperands;
+
+ unsigned NumDefaultOps = 0;
+ for (unsigned I = 0; I != DstINumUses; ++I) {
+ unsigned InstOpNo = DstI->Operands.NumDefs + I;
+
+ // Determine what to emit for this operand.
+ Record *OperandNode = DstI->Operands[InstOpNo].Rec;
+
+ // If the operand has default values, introduce them now.
+ if (CGP.operandHasDefault(OperandNode) &&
+ (InstOpNo < NonOverridableOperands || Child >= Dst->getNumChildren())) {
+ // This is a predicate or optional def operand which the pattern has not
+ // overridden, or which we aren't letting it override; emit the 'default
+ // ops' operands.
+
+ const CGIOperandList::OperandInfo &DstIOperand = DstI->Operands[InstOpNo];
+ DagInit *DefaultOps = DstIOperand.Rec->getValueAsDag("DefaultOps");
+ if (auto Error = importDefaultOperandRenderers(
+ InsertPt, M, DstMIBuilder, DefaultOps))
+ return std::move(Error);
+ ++NumDefaultOps;
+ continue;
+ }
+
+ auto InsertPtOrError = importExplicitUseRenderer(InsertPt, M, DstMIBuilder,
+ Dst->getChild(Child));
+ if (auto Error = InsertPtOrError.takeError())
+ return std::move(Error);
+ InsertPt = InsertPtOrError.get();
+ ++Child;
+ }
+
+ if (NumDefaultOps + ExpectedDstINumUses != DstINumUses)
+ return failedImport("Expected " + llvm::to_string(DstINumUses) +
+ " used operands but found " +
+ llvm::to_string(ExpectedDstINumUses) +
+ " explicit ones and " + llvm::to_string(NumDefaultOps) +
+ " default ones");
+
+ return InsertPt;
+}
+
+Error GlobalISelEmitter::importDefaultOperandRenderers(
+ action_iterator InsertPt, RuleMatcher &M, BuildMIAction &DstMIBuilder,
+ DagInit *DefaultOps) const {
+ for (const auto *DefaultOp : DefaultOps->getArgs()) {
+ std::optional<LLTCodeGen> OpTyOrNone;
+
+ // Look through ValueType operators.
+ if (const DagInit *DefaultDagOp = dyn_cast<DagInit>(DefaultOp)) {
+ if (const DefInit *DefaultDagOperator =
+ dyn_cast<DefInit>(DefaultDagOp->getOperator())) {
+ if (DefaultDagOperator->getDef()->isSubClassOf("ValueType")) {
+ OpTyOrNone = MVTToLLT(getValueType(
+ DefaultDagOperator->getDef()));
+ DefaultOp = DefaultDagOp->getArg(0);
+ }
+ }
+ }
+
+ if (const DefInit *DefaultDefOp = dyn_cast<DefInit>(DefaultOp)) {
+ auto Def = DefaultDefOp->getDef();
+ if (Def->getName() == "undef_tied_input") {
+ unsigned TempRegID = M.allocateTempRegID();
+ M.insertAction<MakeTempRegisterAction>(InsertPt, *OpTyOrNone,
+ TempRegID);
+ InsertPt = M.insertAction<BuildMIAction>(
+ InsertPt, M.allocateOutputInsnID(),
+ &Target.getInstruction(RK.getDef("IMPLICIT_DEF")));
+ BuildMIAction &IDMIBuilder = *static_cast<BuildMIAction *>(
+ InsertPt->get());
+ IDMIBuilder.addRenderer<TempRegRenderer>(TempRegID);
+ DstMIBuilder.addRenderer<TempRegRenderer>(TempRegID);
+ } else {
+ DstMIBuilder.addRenderer<AddRegisterRenderer>(Target, Def);
+ }
+ continue;
+ }
+
+ if (const IntInit *DefaultIntOp = dyn_cast<IntInit>(DefaultOp)) {
+ DstMIBuilder.addRenderer<ImmRenderer>(DefaultIntOp->getValue());
+ continue;
+ }
+
+ return failedImport("Could not add default op");
+ }
+
+ return Error::success();
+}
+
+Error GlobalISelEmitter::importImplicitDefRenderers(
+ BuildMIAction &DstMIBuilder,
+ const std::vector<Record *> &ImplicitDefs) const {
+ if (!ImplicitDefs.empty())
+ return failedImport("Pattern defines a physical register");
+ return Error::success();
+}
+
+std::optional<const CodeGenRegisterClass *>
+GlobalISelEmitter::getRegClassFromLeaf(TreePatternNode *Leaf) {
+ assert(Leaf && "Expected node?");
+ assert(Leaf->isLeaf() && "Expected leaf?");
+ Record *RCRec = getInitValueAsRegClass(Leaf->getLeafValue());
+ if (!RCRec)
+ return std::nullopt;
+ CodeGenRegisterClass *RC = CGRegs.getRegClass(RCRec);
+ if (!RC)
+ return std::nullopt;
+ return RC;
+}
+
+std::optional<const CodeGenRegisterClass *>
+GlobalISelEmitter::inferRegClassFromPattern(TreePatternNode *N) {
+ if (!N)
+ return std::nullopt;
+
+ if (N->isLeaf())
+ return getRegClassFromLeaf(N);
+
+ // We don't have a leaf node, so we have to try and infer something. Check
+ // that we have an instruction that we an infer something from.
+
+ // Only handle things that produce a single type.
+ if (N->getNumTypes() != 1)
+ return std::nullopt;
+ Record *OpRec = N->getOperator();
+
+ // We only want instructions.
+ if (!OpRec->isSubClassOf("Instruction"))
+ return std::nullopt;
+
+ // Don't want to try and infer things when there could potentially be more
+ // than one candidate register class.
+ auto &Inst = Target.getInstruction(OpRec);
+ if (Inst.Operands.NumDefs > 1)
+ return std::nullopt;
+
+ // Handle any special-case instructions which we can safely infer register
+ // classes from.
+ StringRef InstName = Inst.TheDef->getName();
+ bool IsRegSequence = InstName == "REG_SEQUENCE";
+ if (IsRegSequence || InstName == "COPY_TO_REGCLASS") {
+ // If we have a COPY_TO_REGCLASS, then we need to handle it specially. It
+ // has the desired register class as the first child.
+ TreePatternNode *RCChild = N->getChild(IsRegSequence ? 0 : 1);
+ if (!RCChild->isLeaf())
+ return std::nullopt;
+ return getRegClassFromLeaf(RCChild);
+ }
+ if (InstName == "INSERT_SUBREG") {
+ TreePatternNode *Child0 = N->getChild(0);
+ assert(Child0->getNumTypes() == 1 && "Unexpected number of types!");
+ const TypeSetByHwMode &VTy = Child0->getExtType(0);
+ return inferSuperRegisterClassForNode(VTy, Child0, N->getChild(2));
+ }
+ if (InstName == "EXTRACT_SUBREG") {
+ assert(N->getNumTypes() == 1 && "Unexpected number of types!");
+ const TypeSetByHwMode &VTy = N->getExtType(0);
+ return inferSuperRegisterClass(VTy, N->getChild(1));
+ }
+
+ // Handle destination record types that we can safely infer a register class
+ // from.
+ const auto &DstIOperand = Inst.Operands[0];
+ Record *DstIOpRec = DstIOperand.Rec;
+ if (DstIOpRec->isSubClassOf("RegisterOperand")) {
+ DstIOpRec = DstIOpRec->getValueAsDef("RegClass");
+ const CodeGenRegisterClass &RC = Target.getRegisterClass(DstIOpRec);
+ return &RC;
+ }
+
+ if (DstIOpRec->isSubClassOf("RegisterClass")) {
+ const CodeGenRegisterClass &RC = Target.getRegisterClass(DstIOpRec);
+ return &RC;
+ }
+
+ return std::nullopt;
+}
+
+std::optional<const CodeGenRegisterClass *>
+GlobalISelEmitter::inferSuperRegisterClass(const TypeSetByHwMode &Ty,
+ TreePatternNode *SubRegIdxNode) {
+ assert(SubRegIdxNode && "Expected subregister index node!");
+ // We need a ValueTypeByHwMode for getSuperRegForSubReg.
+ if (!Ty.isValueTypeByHwMode(false))
+ return std::nullopt;
+ if (!SubRegIdxNode->isLeaf())
+ return std::nullopt;
+ DefInit *SubRegInit = dyn_cast<DefInit>(SubRegIdxNode->getLeafValue());
+ if (!SubRegInit)
+ return std::nullopt;
+ CodeGenSubRegIndex *SubIdx = CGRegs.getSubRegIdx(SubRegInit->getDef());
+
+ // Use the information we found above to find a minimal register class which
+ // supports the subregister and type we want.
+ auto RC =
+ Target.getSuperRegForSubReg(Ty.getValueTypeByHwMode(), CGRegs, SubIdx,
+ /* MustBeAllocatable */ true);
+ if (!RC)
+ return std::nullopt;
+ return *RC;
+}
+
+std::optional<const CodeGenRegisterClass *>
+GlobalISelEmitter::inferSuperRegisterClassForNode(
+ const TypeSetByHwMode &Ty, TreePatternNode *SuperRegNode,
+ TreePatternNode *SubRegIdxNode) {
+ assert(SuperRegNode && "Expected super register node!");
+ // Check if we already have a defined register class for the super register
+ // node. If we do, then we should preserve that rather than inferring anything
+ // from the subregister index node. We can assume that whoever wrote the
+ // pattern in the first place made sure that the super register and
+ // subregister are compatible.
+ if (std::optional<const CodeGenRegisterClass *> SuperRegisterClass =
+ inferRegClassFromPattern(SuperRegNode))
+ return *SuperRegisterClass;
+ return inferSuperRegisterClass(Ty, SubRegIdxNode);
+}
+
+std::optional<CodeGenSubRegIndex *>
+GlobalISelEmitter::inferSubRegIndexForNode(TreePatternNode *SubRegIdxNode) {
+ if (!SubRegIdxNode->isLeaf())
+ return std::nullopt;
+
+ DefInit *SubRegInit = dyn_cast<DefInit>(SubRegIdxNode->getLeafValue());
+ if (!SubRegInit)
+ return std::nullopt;
+ return CGRegs.getSubRegIdx(SubRegInit->getDef());
+}
+
+Expected<RuleMatcher> GlobalISelEmitter::runOnPattern(const PatternToMatch &P) {
+ // Keep track of the matchers and actions to emit.
+ int Score = P.getPatternComplexity(CGP);
+ RuleMatcher M(P.getSrcRecord()->getLoc());
+ RuleMatcherScores[M.getRuleID()] = Score;
+ M.addAction<DebugCommentAction>(llvm::to_string(*P.getSrcPattern()) +
+ " => " +
+ llvm::to_string(*P.getDstPattern()));
+
+ SmallVector<Record *, 4> Predicates;
+ P.getPredicateRecords(Predicates);
+ if (auto Error = importRulePredicates(M, Predicates))
+ return std::move(Error);
+
+ // Next, analyze the pattern operators.
+ TreePatternNode *Src = P.getSrcPattern();
+ TreePatternNode *Dst = P.getDstPattern();
+
+ // If the root of either pattern isn't a simple operator, ignore it.
+ if (auto Err = isTrivialOperatorNode(Dst))
+ return failedImport("Dst pattern root isn't a trivial operator (" +
+ toString(std::move(Err)) + ")");
+ if (auto Err = isTrivialOperatorNode(Src))
+ return failedImport("Src pattern root isn't a trivial operator (" +
+ toString(std::move(Err)) + ")");
+
+ // The different predicates and matchers created during
+ // addInstructionMatcher use the RuleMatcher M to set up their
+ // instruction ID (InsnVarID) that are going to be used when
+ // M is going to be emitted.
+ // However, the code doing the emission still relies on the IDs
+ // returned during that process by the RuleMatcher when issuing
+ // the recordInsn opcodes.
+ // Because of that:
+ // 1. The order in which we created the predicates
+ // and such must be the same as the order in which we emit them,
+ // and
+ // 2. We need to reset the generation of the IDs in M somewhere between
+ // addInstructionMatcher and emit
+ //
+ // FIXME: Long term, we don't want to have to rely on this implicit
+ // naming being the same. One possible solution would be to have
+ // explicit operator for operation capture and reference those.
+ // The plus side is that it would expose opportunities to share
+ // the capture accross rules. The downside is that it would
+ // introduce a dependency between predicates (captures must happen
+ // before their first use.)
+ InstructionMatcher &InsnMatcherTemp = M.addInstructionMatcher(Src->getName());
+ unsigned TempOpIdx = 0;
+ auto InsnMatcherOrError =
+ createAndImportSelDAGMatcher(M, InsnMatcherTemp, Src, TempOpIdx);
+ if (auto Error = InsnMatcherOrError.takeError())
+ return std::move(Error);
+ InstructionMatcher &InsnMatcher = InsnMatcherOrError.get();
+
+ if (Dst->isLeaf()) {
+ Record *RCDef = getInitValueAsRegClass(Dst->getLeafValue());
+ if (RCDef) {
+ const CodeGenRegisterClass &RC = Target.getRegisterClass(RCDef);
+
+ // We need to replace the def and all its uses with the specified
+ // operand. However, we must also insert COPY's wherever needed.
+ // For now, emit a copy and let the register allocator clean up.
+ auto &DstI = Target.getInstruction(RK.getDef("COPY"));
+ const auto &DstIOperand = DstI.Operands[0];
+
+ OperandMatcher &OM0 = InsnMatcher.getOperand(0);
+ OM0.setSymbolicName(DstIOperand.Name);
+ M.defineOperand(OM0.getSymbolicName(), OM0);
+ OM0.addPredicate<RegisterBankOperandMatcher>(RC);
+
+ auto &DstMIBuilder =
+ M.addAction<BuildMIAction>(M.allocateOutputInsnID(), &DstI);
+ DstMIBuilder.addRenderer<CopyRenderer>(DstIOperand.Name);
+ DstMIBuilder.addRenderer<CopyRenderer>(Dst->getName());
+ M.addAction<ConstrainOperandToRegClassAction>(0, 0, RC);
+
+ // We're done with this pattern! It's eligible for GISel emission; return
+ // it.
+ ++NumPatternImported;
+ return std::move(M);
+ }
+
+ return failedImport("Dst pattern root isn't a known leaf");
+ }
+
+ // Start with the defined operands (i.e., the results of the root operator).
+ Record *DstOp = Dst->getOperator();
+ if (!DstOp->isSubClassOf("Instruction"))
+ return failedImport("Pattern operator isn't an instruction");
+
+ auto &DstI = Target.getInstruction(DstOp);
+ StringRef DstIName = DstI.TheDef->getName();
+
+ unsigned DstNumDefs = DstI.Operands.NumDefs,
+ SrcNumDefs = Src->getExtTypes().size();
+ if (DstNumDefs < SrcNumDefs) {
+ if (DstNumDefs != 0)
+ return failedImport("Src pattern result has more defs than dst MI (" +
+ to_string(SrcNumDefs) + " def(s) vs " +
+ to_string(DstNumDefs) + " def(s))");
+
+ bool FoundNoUsePred = false;
+ for (const auto &Pred : InsnMatcher.predicates()) {
+ if ((FoundNoUsePred = isa<NoUsePredicateMatcher>(Pred.get())))
+ break;
+ }
+ if (!FoundNoUsePred)
+ return failedImport("Src pattern result has " + to_string(SrcNumDefs) +
+ " def(s) without the HasNoUse predicate set to true "
+ "but Dst MI has no def");
+ }
+
+ // The root of the match also has constraints on the register bank so that it
+ // matches the result instruction.
+ unsigned OpIdx = 0;
+ unsigned N = std::min(DstNumDefs, SrcNumDefs);
+ for (unsigned I = 0; I < N; ++I) {
+ const TypeSetByHwMode &VTy = Src->getExtType(I);
+
+ const auto &DstIOperand = DstI.Operands[OpIdx];
+ Record *DstIOpRec = DstIOperand.Rec;
+ if (DstIName == "COPY_TO_REGCLASS") {
+ DstIOpRec = getInitValueAsRegClass(Dst->getChild(1)->getLeafValue());
+
+ if (DstIOpRec == nullptr)
+ return failedImport(
+ "COPY_TO_REGCLASS operand #1 isn't a register class");
+ } else if (DstIName == "REG_SEQUENCE") {
+ DstIOpRec = getInitValueAsRegClass(Dst->getChild(0)->getLeafValue());
+ if (DstIOpRec == nullptr)
+ return failedImport("REG_SEQUENCE operand #0 isn't a register class");
+ } else if (DstIName == "EXTRACT_SUBREG") {
+ auto InferredClass = inferRegClassFromPattern(Dst->getChild(0));
+ if (!InferredClass)
+ return failedImport("Could not infer class for EXTRACT_SUBREG operand #0");
+
+ // We can assume that a subregister is in the same bank as it's super
+ // register.
+ DstIOpRec = (*InferredClass)->getDef();
+ } else if (DstIName == "INSERT_SUBREG") {
+ auto MaybeSuperClass = inferSuperRegisterClassForNode(
+ VTy, Dst->getChild(0), Dst->getChild(2));
+ if (!MaybeSuperClass)
+ return failedImport(
+ "Cannot infer register class for INSERT_SUBREG operand #0");
+ // Move to the next pattern here, because the register class we found
+ // doesn't necessarily have a record associated with it. So, we can't
+ // set DstIOpRec using this.
+ OperandMatcher &OM = InsnMatcher.getOperand(OpIdx);
+ OM.setSymbolicName(DstIOperand.Name);
+ M.defineOperand(OM.getSymbolicName(), OM);
+ OM.addPredicate<RegisterBankOperandMatcher>(**MaybeSuperClass);
+ ++OpIdx;
+ continue;
+ } else if (DstIName == "SUBREG_TO_REG") {
+ auto MaybeRegClass = inferSuperRegisterClass(VTy, Dst->getChild(2));
+ if (!MaybeRegClass)
+ return failedImport(
+ "Cannot infer register class for SUBREG_TO_REG operand #0");
+ OperandMatcher &OM = InsnMatcher.getOperand(OpIdx);
+ OM.setSymbolicName(DstIOperand.Name);
+ M.defineOperand(OM.getSymbolicName(), OM);
+ OM.addPredicate<RegisterBankOperandMatcher>(**MaybeRegClass);
+ ++OpIdx;
+ continue;
+ } else if (DstIOpRec->isSubClassOf("RegisterOperand"))
+ DstIOpRec = DstIOpRec->getValueAsDef("RegClass");
+ else if (!DstIOpRec->isSubClassOf("RegisterClass"))
+ return failedImport("Dst MI def isn't a register class" +
+ to_string(*Dst));
+
+ OperandMatcher &OM = InsnMatcher.getOperand(OpIdx);
+ OM.setSymbolicName(DstIOperand.Name);
+ M.defineOperand(OM.getSymbolicName(), OM);
+ OM.addPredicate<RegisterBankOperandMatcher>(
+ Target.getRegisterClass(DstIOpRec));
+ ++OpIdx;
+ }
+
+ auto DstMIBuilderOrError =
+ createAndImportInstructionRenderer(M, InsnMatcher, Src, Dst);
+ if (auto Error = DstMIBuilderOrError.takeError())
+ return std::move(Error);
+ BuildMIAction &DstMIBuilder = DstMIBuilderOrError.get();
+
+ // Render the implicit defs.
+ // These are only added to the root of the result.
+ if (auto Error = importImplicitDefRenderers(DstMIBuilder, P.getDstRegs()))
+ return std::move(Error);
+
+ DstMIBuilder.chooseInsnToMutate(M);
+
+ // Constrain the registers to classes. This is normally derived from the
+ // emitted instruction but a few instructions require special handling.
+ if (DstIName == "COPY_TO_REGCLASS") {
+ // COPY_TO_REGCLASS does not provide operand constraints itself but the
+ // result is constrained to the class given by the second child.
+ Record *DstIOpRec =
+ getInitValueAsRegClass(Dst->getChild(1)->getLeafValue());
+
+ if (DstIOpRec == nullptr)
+ return failedImport("COPY_TO_REGCLASS operand #1 isn't a register class");
+
+ M.addAction<ConstrainOperandToRegClassAction>(
+ 0, 0, Target.getRegisterClass(DstIOpRec));
+
+ // We're done with this pattern! It's eligible for GISel emission; return
+ // it.
+ ++NumPatternImported;
+ return std::move(M);
+ }
+
+ if (DstIName == "EXTRACT_SUBREG") {
+ auto SuperClass = inferRegClassFromPattern(Dst->getChild(0));
+ if (!SuperClass)
+ return failedImport(
+ "Cannot infer register class from EXTRACT_SUBREG operand #0");
+
+ auto SubIdx = inferSubRegIndexForNode(Dst->getChild(1));
+ if (!SubIdx)
+ return failedImport("EXTRACT_SUBREG child #1 is not a subreg index");
+
+ // It would be nice to leave this constraint implicit but we're required
+ // to pick a register class so constrain the result to a register class
+ // that can hold the correct MVT.
+ //
+ // FIXME: This may introduce an extra copy if the chosen class doesn't
+ // actually contain the subregisters.
+ assert(Src->getExtTypes().size() == 1 &&
+ "Expected Src of EXTRACT_SUBREG to have one result type");
+
+ const auto SrcRCDstRCPair =
+ (*SuperClass)->getMatchingSubClassWithSubRegs(CGRegs, *SubIdx);
+ if (!SrcRCDstRCPair) {
+ return failedImport("subreg index is incompatible "
+ "with inferred reg class");
+ }
+
+ assert(SrcRCDstRCPair->second && "Couldn't find a matching subclass");
+ M.addAction<ConstrainOperandToRegClassAction>(0, 0, *SrcRCDstRCPair->second);
+ M.addAction<ConstrainOperandToRegClassAction>(0, 1, *SrcRCDstRCPair->first);
+
+ // We're done with this pattern! It's eligible for GISel emission; return
+ // it.
+ ++NumPatternImported;
+ return std::move(M);
+ }
+
+ if (DstIName == "INSERT_SUBREG") {
+ assert(Src->getExtTypes().size() == 1 &&
+ "Expected Src of INSERT_SUBREG to have one result type");
+ // We need to constrain the destination, a super regsister source, and a
+ // subregister source.
+ auto SubClass = inferRegClassFromPattern(Dst->getChild(1));
+ if (!SubClass)
+ return failedImport(
+ "Cannot infer register class from INSERT_SUBREG operand #1");
+ auto SuperClass = inferSuperRegisterClassForNode(
+ Src->getExtType(0), Dst->getChild(0), Dst->getChild(2));
+ if (!SuperClass)
+ return failedImport(
+ "Cannot infer register class for INSERT_SUBREG operand #0");
+ M.addAction<ConstrainOperandToRegClassAction>(0, 0, **SuperClass);
+ M.addAction<ConstrainOperandToRegClassAction>(0, 1, **SuperClass);
+ M.addAction<ConstrainOperandToRegClassAction>(0, 2, **SubClass);
+ ++NumPatternImported;
+ return std::move(M);
+ }
+
+ if (DstIName == "SUBREG_TO_REG") {
+ // We need to constrain the destination and subregister source.
+ assert(Src->getExtTypes().size() == 1 &&
+ "Expected Src of SUBREG_TO_REG to have one result type");
+
+ // Attempt to infer the subregister source from the first child. If it has
+ // an explicitly given register class, we'll use that. Otherwise, we will
+ // fail.
+ auto SubClass = inferRegClassFromPattern(Dst->getChild(1));
+ if (!SubClass)
+ return failedImport(
+ "Cannot infer register class from SUBREG_TO_REG child #1");
+ // We don't have a child to look at that might have a super register node.
+ auto SuperClass =
+ inferSuperRegisterClass(Src->getExtType(0), Dst->getChild(2));
+ if (!SuperClass)
+ return failedImport(
+ "Cannot infer register class for SUBREG_TO_REG operand #0");
+ M.addAction<ConstrainOperandToRegClassAction>(0, 0, **SuperClass);
+ M.addAction<ConstrainOperandToRegClassAction>(0, 2, **SubClass);
+ ++NumPatternImported;
+ return std::move(M);
+ }
+
+ if (DstIName == "REG_SEQUENCE") {
+ auto SuperClass = inferRegClassFromPattern(Dst->getChild(0));
+
+ M.addAction<ConstrainOperandToRegClassAction>(0, 0, **SuperClass);
+
+ unsigned Num = Dst->getNumChildren();
+ for (unsigned I = 1; I != Num; I += 2) {
+ TreePatternNode *SubRegChild = Dst->getChild(I + 1);
+
+ auto SubIdx = inferSubRegIndexForNode(SubRegChild);
+ if (!SubIdx)
+ return failedImport("REG_SEQUENCE child is not a subreg index");
+
+ const auto SrcRCDstRCPair =
+ (*SuperClass)->getMatchingSubClassWithSubRegs(CGRegs, *SubIdx);
+
+ M.addAction<ConstrainOperandToRegClassAction>(0, I,
+ *SrcRCDstRCPair->second);
+ }
+
+ ++NumPatternImported;
+ return std::move(M);
+ }
+
+ M.addAction<ConstrainOperandsToDefinitionAction>(0);
+
+ // We're done with this pattern! It's eligible for GISel emission; return it.
+ ++NumPatternImported;
+ return std::move(M);
+}
+
+// Emit imm predicate table and an enum to reference them with.
+// The 'Predicate_' part of the name is redundant but eliminating it is more
+// trouble than it's worth.
+void GlobalISelEmitter::emitCxxPredicateFns(
+ raw_ostream &OS, StringRef CodeFieldName, StringRef TypeIdentifier,
+ StringRef ArgType, StringRef ArgName, StringRef AdditionalArgs,
+ StringRef AdditionalDeclarations,
+ std::function<bool(const Record *R)> Filter) {
+ std::vector<const Record *> MatchedRecords;
+ const auto &Defs = RK.getAllDerivedDefinitions("PatFrags");
+ std::copy_if(Defs.begin(), Defs.end(), std::back_inserter(MatchedRecords),
+ [&](Record *Record) {
+ return !Record->getValueAsString(CodeFieldName).empty() &&
+ Filter(Record);
+ });
+
+ if (!MatchedRecords.empty()) {
+ OS << "// PatFrag predicates.\n"
+ << "enum {\n";
+ std::string EnumeratorSeparator =
+ (" = GIPFP_" + TypeIdentifier + "_Invalid + 1,\n").str();
+ for (const auto *Record : MatchedRecords) {
+ OS << " GIPFP_" << TypeIdentifier << "_Predicate_" << Record->getName()
+ << EnumeratorSeparator;
+ EnumeratorSeparator = ",\n";
+ }
+ OS << "};\n";
+ }
+
+ OS << "bool " << Target.getName() << "InstructionSelector::test" << ArgName
+ << "Predicate_" << TypeIdentifier << "(unsigned PredicateID, " << ArgType << " "
+ << ArgName << AdditionalArgs <<") const {\n"
+ << AdditionalDeclarations;
+ if (!AdditionalDeclarations.empty())
+ OS << "\n";
+ if (!MatchedRecords.empty())
+ OS << " switch (PredicateID) {\n";
+ for (const auto *Record : MatchedRecords) {
+ OS << " case GIPFP_" << TypeIdentifier << "_Predicate_"
+ << Record->getName() << ": {\n"
+ << " " << Record->getValueAsString(CodeFieldName) << "\n"
+ << " llvm_unreachable(\"" << CodeFieldName
+ << " should have returned\");\n"
+ << " return false;\n"
+ << " }\n";
+ }
+ if (!MatchedRecords.empty())
+ OS << " }\n";
+ OS << " llvm_unreachable(\"Unknown predicate\");\n"
+ << " return false;\n"
+ << "}\n";
+}
+
+void GlobalISelEmitter::emitImmPredicateFns(
+ raw_ostream &OS, StringRef TypeIdentifier, StringRef ArgType,
+ std::function<bool(const Record *R)> Filter) {
+ return emitCxxPredicateFns(OS, "ImmediateCode", TypeIdentifier, ArgType,
+ "Imm", "", "", Filter);
+}
+
+void GlobalISelEmitter::emitMIPredicateFns(raw_ostream &OS) {
+ return emitCxxPredicateFns(
+ OS, "GISelPredicateCode", "MI", "const MachineInstr &", "MI",
+ ", const std::array<const MachineOperand *, 3> &Operands",
+ " const MachineFunction &MF = *MI.getParent()->getParent();\n"
+ " const MachineRegisterInfo &MRI = MF.getRegInfo();\n"
+ " (void)MRI;",
+ [](const Record *R) { return true; });
+}
+
+template <class GroupT>
+std::vector<Matcher *> GlobalISelEmitter::optimizeRules(
+ ArrayRef<Matcher *> Rules,
+ std::vector<std::unique_ptr<Matcher>> &MatcherStorage) {
+
+ std::vector<Matcher *> OptRules;
+ std::unique_ptr<GroupT> CurrentGroup = std::make_unique<GroupT>();
+ assert(CurrentGroup->empty() && "Newly created group isn't empty!");
+ unsigned NumGroups = 0;
+
+ auto ProcessCurrentGroup = [&]() {
+ if (CurrentGroup->empty())
+ // An empty group is good to be reused:
+ return;
+
+ // If the group isn't large enough to provide any benefit, move all the
+ // added rules out of it and make sure to re-create the group to properly
+ // re-initialize it:
+ if (CurrentGroup->size() < 2)
+ append_range(OptRules, CurrentGroup->matchers());
+ else {
+ CurrentGroup->finalize();
+ OptRules.push_back(CurrentGroup.get());
+ MatcherStorage.emplace_back(std::move(CurrentGroup));
+ ++NumGroups;
+ }
+ CurrentGroup = std::make_unique<GroupT>();
+ };
+ for (Matcher *Rule : Rules) {
+ // Greedily add as many matchers as possible to the current group:
+ if (CurrentGroup->addMatcher(*Rule))
+ continue;
+
+ ProcessCurrentGroup();
+ assert(CurrentGroup->empty() && "A group wasn't properly re-initialized");
+
+ // Try to add the pending matcher to a newly created empty group:
+ if (!CurrentGroup->addMatcher(*Rule))
+ // If we couldn't add the matcher to an empty group, that group type
+ // doesn't support that kind of matchers at all, so just skip it:
+ OptRules.push_back(Rule);
+ }
+ ProcessCurrentGroup();
+
+ LLVM_DEBUG(dbgs() << "NumGroups: " << NumGroups << "\n");
+ (void) NumGroups;
+ assert(CurrentGroup->empty() && "The last group wasn't properly processed");
+ return OptRules;
+}
+
+MatchTable
+GlobalISelEmitter::buildMatchTable(MutableArrayRef<RuleMatcher> Rules,
+ bool Optimize, bool WithCoverage) {
+ std::vector<Matcher *> InputRules;
+ for (Matcher &Rule : Rules)
+ InputRules.push_back(&Rule);
+
+ if (!Optimize)
+ return MatchTable::buildTable(InputRules, WithCoverage);
+
+ unsigned CurrentOrdering = 0;
+ StringMap<unsigned> OpcodeOrder;
+ for (RuleMatcher &Rule : Rules) {
+ const StringRef Opcode = Rule.getOpcode();
+ assert(!Opcode.empty() && "Didn't expect an undefined opcode");
+ if (OpcodeOrder.count(Opcode) == 0)
+ OpcodeOrder[Opcode] = CurrentOrdering++;
+ }
+
+ llvm::stable_sort(InputRules, [&OpcodeOrder](const Matcher *A,
+ const Matcher *B) {
+ auto *L = static_cast<const RuleMatcher *>(A);
+ auto *R = static_cast<const RuleMatcher *>(B);
+ return std::make_tuple(OpcodeOrder[L->getOpcode()], L->getNumOperands()) <
+ std::make_tuple(OpcodeOrder[R->getOpcode()], R->getNumOperands());
+ });
+
+ for (Matcher *Rule : InputRules)
+ Rule->optimize();
+
+ std::vector<std::unique_ptr<Matcher>> MatcherStorage;
+ std::vector<Matcher *> OptRules =
+ optimizeRules<GroupMatcher>(InputRules, MatcherStorage);
+
+ for (Matcher *Rule : OptRules)
+ Rule->optimize();
+
+ OptRules = optimizeRules<SwitchMatcher>(OptRules, MatcherStorage);
+
+ return MatchTable::buildTable(OptRules, WithCoverage);
+}
+
+void GroupMatcher::optimize() {
+ // Make sure we only sort by a specific predicate within a range of rules that
+ // all have that predicate checked against a specific value (not a wildcard):
+ auto F = Matchers.begin();
+ auto T = F;
+ auto E = Matchers.end();
+ while (T != E) {
+ while (T != E) {
+ auto *R = static_cast<RuleMatcher *>(*T);
+ if (!R->getFirstConditionAsRootType().get().isValid())
+ break;
+ ++T;
+ }
+ std::stable_sort(F, T, [](Matcher *A, Matcher *B) {
+ auto *L = static_cast<RuleMatcher *>(A);
+ auto *R = static_cast<RuleMatcher *>(B);
+ return L->getFirstConditionAsRootType() <
+ R->getFirstConditionAsRootType();
+ });
+ if (T != E)
+ F = ++T;
+ }
+ GlobalISelEmitter::optimizeRules<GroupMatcher>(Matchers, MatcherStorage)
+ .swap(Matchers);
+ GlobalISelEmitter::optimizeRules<SwitchMatcher>(Matchers, MatcherStorage)
+ .swap(Matchers);
+}
+
+void GlobalISelEmitter::run(raw_ostream &OS) {
+ if (!UseCoverageFile.empty()) {
+ RuleCoverage = CodeGenCoverage();
+ auto RuleCoverageBufOrErr = MemoryBuffer::getFile(UseCoverageFile);
+ if (!RuleCoverageBufOrErr) {
+ PrintWarning(SMLoc(), "Missing rule coverage data");
+ RuleCoverage = std::nullopt;
+ } else {
+ if (!RuleCoverage->parse(*RuleCoverageBufOrErr.get(), Target.getName())) {
+ PrintWarning(SMLoc(), "Ignoring invalid or missing rule coverage data");
+ RuleCoverage = std::nullopt;
+ }
+ }
+ }
+
+ // Track the run-time opcode values
+ gatherOpcodeValues();
+ // Track the run-time LLT ID values
+ gatherTypeIDValues();
+
+ // Track the GINodeEquiv definitions.
+ gatherNodeEquivs();
+
+ emitSourceFileHeader(("Global Instruction Selector for the " +
+ Target.getName() + " target").str(), OS);
+ std::vector<RuleMatcher> Rules;
+ // Look through the SelectionDAG patterns we found, possibly emitting some.
+ for (const PatternToMatch &Pat : CGP.ptms()) {
+ ++NumPatternTotal;
+
+ auto MatcherOrErr = runOnPattern(Pat);
+
+ // The pattern analysis can fail, indicating an unsupported pattern.
+ // Report that if we've been asked to do so.
+ if (auto Err = MatcherOrErr.takeError()) {
+ if (WarnOnSkippedPatterns) {
+ PrintWarning(Pat.getSrcRecord()->getLoc(),
+ "Skipped pattern: " + toString(std::move(Err)));
+ } else {
+ consumeError(std::move(Err));
+ }
+ ++NumPatternImportsSkipped;
+ continue;
+ }
+
+ if (RuleCoverage) {
+ if (RuleCoverage->isCovered(MatcherOrErr->getRuleID()))
+ ++NumPatternsTested;
+ else
+ PrintWarning(Pat.getSrcRecord()->getLoc(),
+ "Pattern is not covered by a test");
+ }
+ Rules.push_back(std::move(MatcherOrErr.get()));
+ }
+
+ // Comparison function to order records by name.
+ auto orderByName = [](const Record *A, const Record *B) {
+ return A->getName() < B->getName();
+ };
+
+ std::vector<Record *> ComplexPredicates =
+ RK.getAllDerivedDefinitions("GIComplexOperandMatcher");
+ llvm::sort(ComplexPredicates, orderByName);
+
+ std::vector<StringRef> CustomRendererFns;
+ transform(RK.getAllDerivedDefinitions("GICustomOperandRenderer"),
+ std::back_inserter(CustomRendererFns), [](const auto &Record) {
+ return Record->getValueAsString("RendererFn");
+ });
+ // Sort and remove duplicates to get a list of unique renderer functions, in
+ // case some were mentioned more than once.
+ llvm::sort(CustomRendererFns);
+ CustomRendererFns.erase(
+ std::unique(CustomRendererFns.begin(), CustomRendererFns.end()),
+ CustomRendererFns.end());
+
+ unsigned MaxTemporaries = 0;
+ for (const auto &Rule : Rules)
+ MaxTemporaries = std::max(MaxTemporaries, Rule.countRendererFns());
+
+ OS << "#ifdef GET_GLOBALISEL_PREDICATE_BITSET\n"
+ << "const unsigned MAX_SUBTARGET_PREDICATES = " << SubtargetFeatures.size()
+ << ";\n"
+ << "using PredicateBitset = "
+ "llvm::PredicateBitsetImpl<MAX_SUBTARGET_PREDICATES>;\n"
+ << "#endif // ifdef GET_GLOBALISEL_PREDICATE_BITSET\n\n";
+
+ OS << "#ifdef GET_GLOBALISEL_TEMPORARIES_DECL\n"
+ << " mutable MatcherState State;\n"
+ << " typedef "
+ "ComplexRendererFns("
+ << Target.getName()
+ << "InstructionSelector::*ComplexMatcherMemFn)(MachineOperand &) const;\n"
+
+ << " typedef void(" << Target.getName()
+ << "InstructionSelector::*CustomRendererFn)(MachineInstrBuilder &, const "
+ "MachineInstr &, int) "
+ "const;\n"
+ << " const ISelInfoTy<PredicateBitset, ComplexMatcherMemFn, "
+ "CustomRendererFn> "
+ "ISelInfo;\n";
+ OS << " static " << Target.getName()
+ << "InstructionSelector::ComplexMatcherMemFn ComplexPredicateFns[];\n"
+ << " static " << Target.getName()
+ << "InstructionSelector::CustomRendererFn CustomRenderers[];\n"
+ << " bool testImmPredicate_I64(unsigned PredicateID, int64_t Imm) const "
+ "override;\n"
+ << " bool testImmPredicate_APInt(unsigned PredicateID, const APInt &Imm) "
+ "const override;\n"
+ << " bool testImmPredicate_APFloat(unsigned PredicateID, const APFloat "
+ "&Imm) const override;\n"
+ << " const int64_t *getMatchTable() const override;\n"
+ << " bool testMIPredicate_MI(unsigned PredicateID, const MachineInstr &MI"
+ ", const std::array<const MachineOperand *, 3> &Operands) "
+ "const override;\n"
+ << "#endif // ifdef GET_GLOBALISEL_TEMPORARIES_DECL\n\n";
+
+ OS << "#ifdef GET_GLOBALISEL_TEMPORARIES_INIT\n"
+ << ", State(" << MaxTemporaries << "),\n"
+ << "ISelInfo(TypeObjects, NumTypeObjects, FeatureBitsets"
+ << ", ComplexPredicateFns, CustomRenderers)\n"
+ << "#endif // ifdef GET_GLOBALISEL_TEMPORARIES_INIT\n\n";
+
+ OS << "#ifdef GET_GLOBALISEL_IMPL\n";
+ SubtargetFeatureInfo::emitSubtargetFeatureBitEnumeration(SubtargetFeatures,
+ OS);
+
+ // Separate subtarget features by how often they must be recomputed.
+ SubtargetFeatureInfoMap ModuleFeatures;
+ std::copy_if(SubtargetFeatures.begin(), SubtargetFeatures.end(),
+ std::inserter(ModuleFeatures, ModuleFeatures.end()),
+ [](const SubtargetFeatureInfoMap::value_type &X) {
+ return !X.second.mustRecomputePerFunction();
+ });
+ SubtargetFeatureInfoMap FunctionFeatures;
+ std::copy_if(SubtargetFeatures.begin(), SubtargetFeatures.end(),
+ std::inserter(FunctionFeatures, FunctionFeatures.end()),
+ [](const SubtargetFeatureInfoMap::value_type &X) {
+ return X.second.mustRecomputePerFunction();
+ });
+
+ SubtargetFeatureInfo::emitComputeAvailableFeatures(
+ Target.getName(), "InstructionSelector", "computeAvailableModuleFeatures",
+ ModuleFeatures, OS);
+
+
+ OS << "void " << Target.getName() << "InstructionSelector"
+ "::setupGeneratedPerFunctionState(MachineFunction &MF) {\n"
+ " AvailableFunctionFeatures = computeAvailableFunctionFeatures("
+ "(const " << Target.getName() << "Subtarget *)&MF.getSubtarget(), &MF);\n"
+ "}\n";
+
+ SubtargetFeatureInfo::emitComputeAvailableFeatures(
+ Target.getName(), "InstructionSelector",
+ "computeAvailableFunctionFeatures", FunctionFeatures, OS,
+ "const MachineFunction *MF");
+
+ // Emit a table containing the LLT objects needed by the matcher and an enum
+ // for the matcher to reference them with.
+ std::vector<LLTCodeGen> TypeObjects;
+ append_range(TypeObjects, KnownTypes);
+ llvm::sort(TypeObjects);
+ OS << "// LLT Objects.\n"
+ << "enum {\n";
+ for (const auto &TypeObject : TypeObjects) {
+ OS << " ";
+ TypeObject.emitCxxEnumValue(OS);
+ OS << ",\n";
+ }
+ OS << "};\n";
+ OS << "const static size_t NumTypeObjects = " << TypeObjects.size() << ";\n"
+ << "const static LLT TypeObjects[] = {\n";
+ for (const auto &TypeObject : TypeObjects) {
+ OS << " ";
+ TypeObject.emitCxxConstructorCall(OS);
+ OS << ",\n";
+ }
+ OS << "};\n\n";
+
+ // Emit a table containing the PredicateBitsets objects needed by the matcher
+ // and an enum for the matcher to reference them with.
+ std::vector<std::vector<Record *>> FeatureBitsets;
+ FeatureBitsets.reserve(Rules.size());
+ for (auto &Rule : Rules)
+ FeatureBitsets.push_back(Rule.getRequiredFeatures());
+ llvm::sort(FeatureBitsets, [&](const std::vector<Record *> &A,
+ const std::vector<Record *> &B) {
+ if (A.size() < B.size())
+ return true;
+ if (A.size() > B.size())
+ return false;
+ for (auto Pair : zip(A, B)) {
+ if (std::get<0>(Pair)->getName() < std::get<1>(Pair)->getName())
+ return true;
+ if (std::get<0>(Pair)->getName() > std::get<1>(Pair)->getName())
+ return false;
+ }
+ return false;
+ });
+ FeatureBitsets.erase(
+ std::unique(FeatureBitsets.begin(), FeatureBitsets.end()),
+ FeatureBitsets.end());
+ OS << "// Feature bitsets.\n"
+ << "enum {\n"
+ << " GIFBS_Invalid,\n";
+ for (const auto &FeatureBitset : FeatureBitsets) {
+ if (FeatureBitset.empty())
+ continue;
+ OS << " " << getNameForFeatureBitset(FeatureBitset) << ",\n";
+ }
+ OS << "};\n"
+ << "const static PredicateBitset FeatureBitsets[] {\n"
+ << " {}, // GIFBS_Invalid\n";
+ for (const auto &FeatureBitset : FeatureBitsets) {
+ if (FeatureBitset.empty())
+ continue;
+ OS << " {";
+ for (const auto &Feature : FeatureBitset) {
+ const auto &I = SubtargetFeatures.find(Feature);
+ assert(I != SubtargetFeatures.end() && "Didn't import predicate?");
+ OS << I->second.getEnumBitName() << ", ";
+ }
+ OS << "},\n";
+ }
+ OS << "};\n\n";
+
+ // Emit complex predicate table and an enum to reference them with.
+ OS << "// ComplexPattern predicates.\n"
+ << "enum {\n"
+ << " GICP_Invalid,\n";
+ for (const auto &Record : ComplexPredicates)
+ OS << " GICP_" << Record->getName() << ",\n";
+ OS << "};\n"
+ << "// See constructor for table contents\n\n";
+
+ emitImmPredicateFns(OS, "I64", "int64_t", [](const Record *R) {
+ bool Unset;
+ return !R->getValueAsBitOrUnset("IsAPFloat", Unset) &&
+ !R->getValueAsBit("IsAPInt");
+ });
+ emitImmPredicateFns(OS, "APFloat", "const APFloat &", [](const Record *R) {
+ bool Unset;
+ return R->getValueAsBitOrUnset("IsAPFloat", Unset);
+ });
+ emitImmPredicateFns(OS, "APInt", "const APInt &", [](const Record *R) {
+ return R->getValueAsBit("IsAPInt");
+ });
+ emitMIPredicateFns(OS);
+ OS << "\n";
+
+ OS << Target.getName() << "InstructionSelector::ComplexMatcherMemFn\n"
+ << Target.getName() << "InstructionSelector::ComplexPredicateFns[] = {\n"
+ << " nullptr, // GICP_Invalid\n";
+ for (const auto &Record : ComplexPredicates)
+ OS << " &" << Target.getName()
+ << "InstructionSelector::" << Record->getValueAsString("MatcherFn")
+ << ", // " << Record->getName() << "\n";
+ OS << "};\n\n";
+
+ OS << "// Custom renderers.\n"
+ << "enum {\n"
+ << " GICR_Invalid,\n";
+ for (const auto &Fn : CustomRendererFns)
+ OS << " GICR_" << Fn << ",\n";
+ OS << "};\n";
+
+ OS << Target.getName() << "InstructionSelector::CustomRendererFn\n"
+ << Target.getName() << "InstructionSelector::CustomRenderers[] = {\n"
+ << " nullptr, // GICR_Invalid\n";
+ for (const auto &Fn : CustomRendererFns)
+ OS << " &" << Target.getName() << "InstructionSelector::" << Fn << ",\n";
+ OS << "};\n\n";
+
+ llvm::stable_sort(Rules, [&](const RuleMatcher &A, const RuleMatcher &B) {
+ int ScoreA = RuleMatcherScores[A.getRuleID()];
+ int ScoreB = RuleMatcherScores[B.getRuleID()];
+ if (ScoreA > ScoreB)
+ return true;
+ if (ScoreB > ScoreA)
+ return false;
+ if (A.isHigherPriorityThan(B)) {
+ assert(!B.isHigherPriorityThan(A) && "Cannot be more important "
+ "and less important at "
+ "the same time");
+ return true;
+ }
+ return false;
+ });
+
+ OS << "bool " << Target.getName()
+ << "InstructionSelector::selectImpl(MachineInstr &I, CodeGenCoverage "
+ "&CoverageInfo) const {\n"
+ << " MachineFunction &MF = *I.getParent()->getParent();\n"
+ << " MachineRegisterInfo &MRI = MF.getRegInfo();\n"
+ << " const PredicateBitset AvailableFeatures = getAvailableFeatures();\n"
+ << " NewMIVector OutMIs;\n"
+ << " State.MIs.clear();\n"
+ << " State.MIs.push_back(&I);\n\n"
+ << " if (executeMatchTable(*this, OutMIs, State, ISelInfo"
+ << ", getMatchTable(), TII, MRI, TRI, RBI, AvailableFeatures"
+ << ", CoverageInfo)) {\n"
+ << " return true;\n"
+ << " }\n\n"
+ << " return false;\n"
+ << "}\n\n";
+
+ const MatchTable Table =
+ buildMatchTable(Rules, OptimizeMatchTable, GenerateCoverage);
+ OS << "const int64_t *" << Target.getName()
+ << "InstructionSelector::getMatchTable() const {\n";
+ Table.emitDeclaration(OS);
+ OS << " return ";
+ Table.emitUse(OS);
+ OS << ";\n}\n";
+ OS << "#endif // ifdef GET_GLOBALISEL_IMPL\n";
+
+ OS << "#ifdef GET_GLOBALISEL_PREDICATES_DECL\n"
+ << "PredicateBitset AvailableModuleFeatures;\n"
+ << "mutable PredicateBitset AvailableFunctionFeatures;\n"
+ << "PredicateBitset getAvailableFeatures() const {\n"
+ << " return AvailableModuleFeatures | AvailableFunctionFeatures;\n"
+ << "}\n"
+ << "PredicateBitset\n"
+ << "computeAvailableModuleFeatures(const " << Target.getName()
+ << "Subtarget *Subtarget) const;\n"
+ << "PredicateBitset\n"
+ << "computeAvailableFunctionFeatures(const " << Target.getName()
+ << "Subtarget *Subtarget,\n"
+ << " const MachineFunction *MF) const;\n"
+ << "void setupGeneratedPerFunctionState(MachineFunction &MF) override;\n"
+ << "#endif // ifdef GET_GLOBALISEL_PREDICATES_DECL\n";
+
+ OS << "#ifdef GET_GLOBALISEL_PREDICATES_INIT\n"
+ << "AvailableModuleFeatures(computeAvailableModuleFeatures(&STI)),\n"
+ << "AvailableFunctionFeatures()\n"
+ << "#endif // ifdef GET_GLOBALISEL_PREDICATES_INIT\n";
+}
+
+void GlobalISelEmitter::declareSubtargetFeature(Record *Predicate) {
+ if (SubtargetFeatures.count(Predicate) == 0)
+ SubtargetFeatures.emplace(
+ Predicate, SubtargetFeatureInfo(Predicate, SubtargetFeatures.size()));
+}
+
+void RuleMatcher::optimize() {
+ for (auto &Item : InsnVariableIDs) {
+ InstructionMatcher &InsnMatcher = *Item.first;
+ for (auto &OM : InsnMatcher.operands()) {
+ // Complex Patterns are usually expensive and they relatively rarely fail
+ // on their own: more often we end up throwing away all the work done by a
+ // matching part of a complex pattern because some other part of the
+ // enclosing pattern didn't match. All of this makes it beneficial to
+ // delay complex patterns until the very end of the rule matching,
+ // especially for targets having lots of complex patterns.
+ for (auto &OP : OM->predicates())
+ if (isa<ComplexPatternOperandMatcher>(OP))
+ EpilogueMatchers.emplace_back(std::move(OP));
+ OM->eraseNullPredicates();
+ }
+ InsnMatcher.optimize();
+ }
+ llvm::sort(EpilogueMatchers, [](const std::unique_ptr<PredicateMatcher> &L,
+ const std::unique_ptr<PredicateMatcher> &R) {
+ return std::make_tuple(L->getKind(), L->getInsnVarID(), L->getOpIdx()) <
+ std::make_tuple(R->getKind(), R->getInsnVarID(), R->getOpIdx());
+ });
+}
+
+bool RuleMatcher::hasFirstCondition() const {
+ if (insnmatchers_empty())
+ return false;
+ InstructionMatcher &Matcher = insnmatchers_front();
+ if (!Matcher.predicates_empty())
+ return true;
+ for (auto &OM : Matcher.operands())
+ for (auto &OP : OM->predicates())
+ if (!isa<InstructionOperandMatcher>(OP))
+ return true;
+ return false;
+}
+
+const PredicateMatcher &RuleMatcher::getFirstCondition() const {
+ assert(!insnmatchers_empty() &&
+ "Trying to get a condition from an empty RuleMatcher");
+
+ InstructionMatcher &Matcher = insnmatchers_front();
+ if (!Matcher.predicates_empty())
+ return **Matcher.predicates_begin();
+ // If there is no more predicate on the instruction itself, look at its
+ // operands.
+ for (auto &OM : Matcher.operands())
+ for (auto &OP : OM->predicates())
+ if (!isa<InstructionOperandMatcher>(OP))
+ return *OP;
+
+ llvm_unreachable("Trying to get a condition from an InstructionMatcher with "
+ "no conditions");
+}
+
+std::unique_ptr<PredicateMatcher> RuleMatcher::popFirstCondition() {
+ assert(!insnmatchers_empty() &&
+ "Trying to pop a condition from an empty RuleMatcher");
+
+ InstructionMatcher &Matcher = insnmatchers_front();
+ if (!Matcher.predicates_empty())
+ return Matcher.predicates_pop_front();
+ // If there is no more predicate on the instruction itself, look at its
+ // operands.
+ for (auto &OM : Matcher.operands())
+ for (auto &OP : OM->predicates())
+ if (!isa<InstructionOperandMatcher>(OP)) {
+ std::unique_ptr<PredicateMatcher> Result = std::move(OP);
+ OM->eraseNullPredicates();
+ return Result;
+ }
+
+ llvm_unreachable("Trying to pop a condition from an InstructionMatcher with "
+ "no conditions");
+}
+
+bool GroupMatcher::candidateConditionMatches(
+ const PredicateMatcher &Predicate) const {
+
+ if (empty()) {
+ // Sharing predicates for nested instructions is not supported yet as we
+ // currently don't hoist the GIM_RecordInsn's properly, therefore we can
+ // only work on the original root instruction (InsnVarID == 0):
+ if (Predicate.getInsnVarID() != 0)
+ return false;
+ // ... otherwise an empty group can handle any predicate with no specific
+ // requirements:
+ return true;
+ }
+
+ const Matcher &Representative = **Matchers.begin();
+ const auto &RepresentativeCondition = Representative.getFirstCondition();
+ // ... if not empty, the group can only accomodate matchers with the exact
+ // same first condition:
+ return Predicate.isIdentical(RepresentativeCondition);
+}
+
+bool GroupMatcher::addMatcher(Matcher &Candidate) {
+ if (!Candidate.hasFirstCondition())
+ return false;
+
+ const PredicateMatcher &Predicate = Candidate.getFirstCondition();
+ if (!candidateConditionMatches(Predicate))
+ return false;
+
+ Matchers.push_back(&Candidate);
+ return true;
+}
+
+void GroupMatcher::finalize() {
+ assert(Conditions.empty() && "Already finalized?");
+ if (empty())
+ return;
+
+ Matcher &FirstRule = **Matchers.begin();
+ for (;;) {
+ // All the checks are expected to succeed during the first iteration:
+ for (const auto &Rule : Matchers)
+ if (!Rule->hasFirstCondition())
+ return;
+ const auto &FirstCondition = FirstRule.getFirstCondition();
+ for (unsigned I = 1, E = Matchers.size(); I < E; ++I)
+ if (!Matchers[I]->getFirstCondition().isIdentical(FirstCondition))
+ return;
+
+ Conditions.push_back(FirstRule.popFirstCondition());
+ for (unsigned I = 1, E = Matchers.size(); I < E; ++I)
+ Matchers[I]->popFirstCondition();
+ }
+}
+
+void GroupMatcher::emit(MatchTable &Table) {
+ unsigned LabelID = ~0U;
+ if (!Conditions.empty()) {
+ LabelID = Table.allocateLabelID();
+ Table << MatchTable::Opcode("GIM_Try", +1)
+ << MatchTable::Comment("On fail goto")
+ << MatchTable::JumpTarget(LabelID) << MatchTable::LineBreak;
+ }
+ for (auto &Condition : Conditions)
+ Condition->emitPredicateOpcodes(
+ Table, *static_cast<RuleMatcher *>(*Matchers.begin()));
+
+ for (const auto &M : Matchers)
+ M->emit(Table);
+
+ // Exit the group
+ if (!Conditions.empty())
+ Table << MatchTable::Opcode("GIM_Reject", -1) << MatchTable::LineBreak
+ << MatchTable::Label(LabelID);
+}
+
+bool SwitchMatcher::isSupportedPredicateType(const PredicateMatcher &P) {
+ return isa<InstructionOpcodeMatcher>(P) || isa<LLTOperandMatcher>(P);
+}
+
+bool SwitchMatcher::candidateConditionMatches(
+ const PredicateMatcher &Predicate) const {
+
+ if (empty()) {
+ // Sharing predicates for nested instructions is not supported yet as we
+ // currently don't hoist the GIM_RecordInsn's properly, therefore we can
+ // only work on the original root instruction (InsnVarID == 0):
+ if (Predicate.getInsnVarID() != 0)
+ return false;
+ // ... while an attempt to add even a root matcher to an empty SwitchMatcher
+ // could fail as not all the types of conditions are supported:
+ if (!isSupportedPredicateType(Predicate))
+ return false;
+ // ... or the condition might not have a proper implementation of
+ // getValue() / isIdenticalDownToValue() yet:
+ if (!Predicate.hasValue())
+ return false;
+ // ... otherwise an empty Switch can accomodate the condition with no
+ // further requirements:
+ return true;
+ }
+
+ const Matcher &CaseRepresentative = **Matchers.begin();
+ const auto &RepresentativeCondition = CaseRepresentative.getFirstCondition();
+ // Switch-cases must share the same kind of condition and path to the value it
+ // checks:
+ if (!Predicate.isIdenticalDownToValue(RepresentativeCondition))
+ return false;
+
+ const auto Value = Predicate.getValue();
+ // ... but be unique with respect to the actual value they check:
+ return Values.count(Value) == 0;
+}
+
+bool SwitchMatcher::addMatcher(Matcher &Candidate) {
+ if (!Candidate.hasFirstCondition())
+ return false;
+
+ const PredicateMatcher &Predicate = Candidate.getFirstCondition();
+ if (!candidateConditionMatches(Predicate))
+ return false;
+ const auto Value = Predicate.getValue();
+ Values.insert(Value);
+
+ Matchers.push_back(&Candidate);
+ return true;
+}
+
+void SwitchMatcher::finalize() {
+ assert(Condition == nullptr && "Already finalized");
+ assert(Values.size() == Matchers.size() && "Broken SwitchMatcher");
+ if (empty())
+ return;
+
+ llvm::stable_sort(Matchers, [](const Matcher *L, const Matcher *R) {
+ return L->getFirstCondition().getValue() <
+ R->getFirstCondition().getValue();
+ });
+ Condition = Matchers[0]->popFirstCondition();
+ for (unsigned I = 1, E = Values.size(); I < E; ++I)
+ Matchers[I]->popFirstCondition();
+}
+
+void SwitchMatcher::emitPredicateSpecificOpcodes(const PredicateMatcher &P,
+ MatchTable &Table) {
+ assert(isSupportedPredicateType(P) && "Predicate type is not supported");
+
+ if (const auto *Condition = dyn_cast<InstructionOpcodeMatcher>(&P)) {
+ Table << MatchTable::Opcode("GIM_SwitchOpcode") << MatchTable::Comment("MI")
+ << MatchTable::IntValue(Condition->getInsnVarID());
+ return;
+ }
+ if (const auto *Condition = dyn_cast<LLTOperandMatcher>(&P)) {
+ Table << MatchTable::Opcode("GIM_SwitchType") << MatchTable::Comment("MI")
+ << MatchTable::IntValue(Condition->getInsnVarID())
+ << MatchTable::Comment("Op")
+ << MatchTable::IntValue(Condition->getOpIdx());
+ return;
+ }
+
+ llvm_unreachable("emitPredicateSpecificOpcodes is broken: can not handle a "
+ "predicate type that is claimed to be supported");
+}
+
+void SwitchMatcher::emit(MatchTable &Table) {
+ assert(Values.size() == Matchers.size() && "Broken SwitchMatcher");
+ if (empty())
+ return;
+ assert(Condition != nullptr &&
+ "Broken SwitchMatcher, hasn't been finalized?");
+
+ std::vector<unsigned> LabelIDs(Values.size());
+ std::generate(LabelIDs.begin(), LabelIDs.end(),
+ [&Table]() { return Table.allocateLabelID(); });
+ const unsigned Default = Table.allocateLabelID();
+
+ const int64_t LowerBound = Values.begin()->getRawValue();
+ const int64_t UpperBound = Values.rbegin()->getRawValue() + 1;
+
+ emitPredicateSpecificOpcodes(*Condition, Table);
+
+ Table << MatchTable::Comment("[") << MatchTable::IntValue(LowerBound)
+ << MatchTable::IntValue(UpperBound) << MatchTable::Comment(")")
+ << MatchTable::Comment("default:") << MatchTable::JumpTarget(Default);
+
+ int64_t J = LowerBound;
+ auto VI = Values.begin();
+ for (unsigned I = 0, E = Values.size(); I < E; ++I) {
+ auto V = *VI++;
+ while (J++ < V.getRawValue())
+ Table << MatchTable::IntValue(0);
+ V.turnIntoComment();
+ Table << MatchTable::LineBreak << V << MatchTable::JumpTarget(LabelIDs[I]);
+ }
+ Table << MatchTable::LineBreak;
+
+ for (unsigned I = 0, E = Values.size(); I < E; ++I) {
+ Table << MatchTable::Label(LabelIDs[I]);
+ Matchers[I]->emit(Table);
+ Table << MatchTable::Opcode("GIM_Reject") << MatchTable::LineBreak;
+ }
+ Table << MatchTable::Label(Default);
+}
+
+unsigned OperandMatcher::getInsnVarID() const { return Insn.getInsnVarID(); }
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+
+namespace llvm {
+void EmitGlobalISel(RecordKeeper &RK, raw_ostream &OS) {
+ GlobalISelEmitter(RK).run(OS);
+}
+} // End llvm namespace
diff --git a/contrib/libs/llvm16/utils/TableGen/InfoByHwMode.cpp b/contrib/libs/llvm16/utils/TableGen/InfoByHwMode.cpp
new file mode 100644
index 0000000000..73c4fbf0a5
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/InfoByHwMode.cpp
@@ -0,0 +1,214 @@
+//===--- InfoByHwMode.cpp -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Classes that implement data parameterized by HW modes for instruction
+// selection. Currently it is ValueTypeByHwMode (parameterized ValueType),
+// and RegSizeInfoByHwMode (parameterized register/spill size and alignment
+// data).
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenTarget.h"
+#include "InfoByHwMode.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <string>
+
+using namespace llvm;
+
+std::string llvm::getModeName(unsigned Mode) {
+ if (Mode == DefaultMode)
+ return "*";
+ return (Twine('m') + Twine(Mode)).str();
+}
+
+ValueTypeByHwMode::ValueTypeByHwMode(Record *R, const CodeGenHwModes &CGH) {
+ const HwModeSelect &MS = CGH.getHwModeSelect(R);
+ for (const HwModeSelect::PairType &P : MS.Items) {
+ auto I = Map.insert({P.first, MVT(llvm::getValueType(P.second))});
+ assert(I.second && "Duplicate entry?");
+ (void)I;
+ }
+}
+
+ValueTypeByHwMode::ValueTypeByHwMode(Record *R, MVT T) : ValueTypeByHwMode(T) {
+ if (R->isSubClassOf("PtrValueType"))
+ PtrAddrSpace = R->getValueAsInt("AddrSpace");
+}
+
+bool ValueTypeByHwMode::operator== (const ValueTypeByHwMode &T) const {
+ assert(isValid() && T.isValid() && "Invalid type in assignment");
+ bool Simple = isSimple();
+ if (Simple != T.isSimple())
+ return false;
+ if (Simple)
+ return getSimple() == T.getSimple();
+
+ return Map == T.Map;
+}
+
+bool ValueTypeByHwMode::operator< (const ValueTypeByHwMode &T) const {
+ assert(isValid() && T.isValid() && "Invalid type in comparison");
+ // Default order for maps.
+ return Map < T.Map;
+}
+
+MVT &ValueTypeByHwMode::getOrCreateTypeForMode(unsigned Mode, MVT Type) {
+ auto F = Map.find(Mode);
+ if (F != Map.end())
+ return F->second;
+ // If Mode is not in the map, look up the default mode. If it exists,
+ // make a copy of it for Mode and return it.
+ auto D = Map.find(DefaultMode);
+ if (D != Map.end())
+ return Map.insert(std::make_pair(Mode, D->second)).first->second;
+ // If default mode is not present either, use provided Type.
+ return Map.insert(std::make_pair(Mode, Type)).first->second;
+}
+
+StringRef ValueTypeByHwMode::getMVTName(MVT T) {
+ StringRef N = llvm::getEnumName(T.SimpleTy);
+ N.consume_front("MVT::");
+ return N;
+}
+
+void ValueTypeByHwMode::writeToStream(raw_ostream &OS) const {
+ if (isSimple()) {
+ OS << getMVTName(getSimple());
+ return;
+ }
+
+ std::vector<const PairType*> Pairs;
+ for (const auto &P : Map)
+ Pairs.push_back(&P);
+ llvm::sort(Pairs, deref<std::less<PairType>>());
+
+ OS << '{';
+ ListSeparator LS(",");
+ for (const PairType *P : Pairs)
+ OS << LS << '(' << getModeName(P->first) << ':'
+ << getMVTName(P->second).str() << ')';
+ OS << '}';
+}
+
+LLVM_DUMP_METHOD
+void ValueTypeByHwMode::dump() const {
+ dbgs() << *this << '\n';
+}
+
+ValueTypeByHwMode llvm::getValueTypeByHwMode(Record *Rec,
+ const CodeGenHwModes &CGH) {
+#ifndef NDEBUG
+ if (!Rec->isSubClassOf("ValueType"))
+ Rec->dump();
+#endif
+ assert(Rec->isSubClassOf("ValueType") &&
+ "Record must be derived from ValueType");
+ if (Rec->isSubClassOf("HwModeSelect"))
+ return ValueTypeByHwMode(Rec, CGH);
+ return ValueTypeByHwMode(Rec, llvm::getValueType(Rec));
+}
+
+RegSizeInfo::RegSizeInfo(Record *R, const CodeGenHwModes &CGH) {
+ RegSize = R->getValueAsInt("RegSize");
+ SpillSize = R->getValueAsInt("SpillSize");
+ SpillAlignment = R->getValueAsInt("SpillAlignment");
+}
+
+bool RegSizeInfo::operator< (const RegSizeInfo &I) const {
+ return std::tie(RegSize, SpillSize, SpillAlignment) <
+ std::tie(I.RegSize, I.SpillSize, I.SpillAlignment);
+}
+
+bool RegSizeInfo::isSubClassOf(const RegSizeInfo &I) const {
+ return RegSize <= I.RegSize &&
+ SpillAlignment && I.SpillAlignment % SpillAlignment == 0 &&
+ SpillSize <= I.SpillSize;
+}
+
+void RegSizeInfo::writeToStream(raw_ostream &OS) const {
+ OS << "[R=" << RegSize << ",S=" << SpillSize
+ << ",A=" << SpillAlignment << ']';
+}
+
+RegSizeInfoByHwMode::RegSizeInfoByHwMode(Record *R,
+ const CodeGenHwModes &CGH) {
+ const HwModeSelect &MS = CGH.getHwModeSelect(R);
+ for (const HwModeSelect::PairType &P : MS.Items) {
+ auto I = Map.insert({P.first, RegSizeInfo(P.second, CGH)});
+ assert(I.second && "Duplicate entry?");
+ (void)I;
+ }
+}
+
+bool RegSizeInfoByHwMode::operator< (const RegSizeInfoByHwMode &I) const {
+ unsigned M0 = Map.begin()->first;
+ return get(M0) < I.get(M0);
+}
+
+bool RegSizeInfoByHwMode::operator== (const RegSizeInfoByHwMode &I) const {
+ unsigned M0 = Map.begin()->first;
+ return get(M0) == I.get(M0);
+}
+
+bool RegSizeInfoByHwMode::isSubClassOf(const RegSizeInfoByHwMode &I) const {
+ unsigned M0 = Map.begin()->first;
+ return get(M0).isSubClassOf(I.get(M0));
+}
+
+bool RegSizeInfoByHwMode::hasStricterSpillThan(const RegSizeInfoByHwMode &I)
+ const {
+ unsigned M0 = Map.begin()->first;
+ const RegSizeInfo &A0 = get(M0);
+ const RegSizeInfo &B0 = I.get(M0);
+ return std::tie(A0.SpillSize, A0.SpillAlignment) >
+ std::tie(B0.SpillSize, B0.SpillAlignment);
+}
+
+void RegSizeInfoByHwMode::writeToStream(raw_ostream &OS) const {
+ typedef typename decltype(Map)::value_type PairType;
+ std::vector<const PairType*> Pairs;
+ for (const auto &P : Map)
+ Pairs.push_back(&P);
+ llvm::sort(Pairs, deref<std::less<PairType>>());
+
+ OS << '{';
+ ListSeparator LS(",");
+ for (const PairType *P : Pairs)
+ OS << LS << '(' << getModeName(P->first) << ':' << P->second << ')';
+ OS << '}';
+}
+
+EncodingInfoByHwMode::EncodingInfoByHwMode(Record *R, const CodeGenHwModes &CGH) {
+ const HwModeSelect &MS = CGH.getHwModeSelect(R);
+ for (const HwModeSelect::PairType &P : MS.Items) {
+ assert(P.second && P.second->isSubClassOf("InstructionEncoding") &&
+ "Encoding must subclass InstructionEncoding");
+ auto I = Map.insert({P.first, P.second});
+ assert(I.second && "Duplicate entry?");
+ (void)I;
+ }
+}
+
+namespace llvm {
+ raw_ostream &operator<<(raw_ostream &OS, const ValueTypeByHwMode &T) {
+ T.writeToStream(OS);
+ return OS;
+ }
+
+ raw_ostream &operator<<(raw_ostream &OS, const RegSizeInfo &T) {
+ T.writeToStream(OS);
+ return OS;
+ }
+
+ raw_ostream &operator<<(raw_ostream &OS, const RegSizeInfoByHwMode &T) {
+ T.writeToStream(OS);
+ return OS;
+ }
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/InfoByHwMode.h b/contrib/libs/llvm16/utils/TableGen/InfoByHwMode.h
new file mode 100644
index 0000000000..44927d0bf0
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/InfoByHwMode.h
@@ -0,0 +1,196 @@
+//===--- InfoByHwMode.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Classes that implement data parameterized by HW modes for instruction
+// selection. Currently it is ValueTypeByHwMode (parameterized ValueType),
+// and RegSizeInfoByHwMode (parameterized register/spill size and alignment
+// data).
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_INFOBYHWMODE_H
+#define LLVM_UTILS_TABLEGEN_INFOBYHWMODE_H
+
+#include "CodeGenHwModes.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/Support/MachineValueType.h"
+
+#include <map>
+#include <string>
+
+namespace llvm {
+
+class Record;
+class raw_ostream;
+
+template <typename InfoT> struct InfoByHwMode;
+
+std::string getModeName(unsigned Mode);
+
+enum : unsigned {
+ DefaultMode = CodeGenHwModes::DefaultMode,
+};
+
+template <typename InfoT>
+void union_modes(const InfoByHwMode<InfoT> &A,
+ const InfoByHwMode<InfoT> &B,
+ SmallVectorImpl<unsigned> &Modes) {
+ SmallSet<unsigned, 4> U;
+ for (const auto &P : A)
+ U.insert(P.first);
+ for (const auto &P : B)
+ U.insert(P.first);
+ // Make sure that the default mode is last on the list.
+ bool HasDefault = false;
+ for (unsigned M : U)
+ if (M != DefaultMode)
+ Modes.push_back(M);
+ else
+ HasDefault = true;
+ if (HasDefault)
+ Modes.push_back(DefaultMode);
+}
+
+template <typename InfoT>
+struct InfoByHwMode {
+ typedef std::map<unsigned,InfoT> MapType;
+ typedef typename MapType::value_type PairType;
+ typedef typename MapType::iterator iterator;
+ typedef typename MapType::const_iterator const_iterator;
+
+ InfoByHwMode() = default;
+ InfoByHwMode(const MapType &M) : Map(M) {}
+
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ iterator begin() { return Map.begin(); }
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ iterator end() { return Map.end(); }
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ const_iterator begin() const { return Map.begin(); }
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ const_iterator end() const { return Map.end(); }
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ bool empty() const { return Map.empty(); }
+
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ bool hasMode(unsigned M) const { return Map.find(M) != Map.end(); }
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ bool hasDefault() const { return hasMode(DefaultMode); }
+
+ InfoT &get(unsigned Mode) {
+ if (!hasMode(Mode)) {
+ assert(hasMode(DefaultMode));
+ Map.insert({Mode, Map.at(DefaultMode)});
+ }
+ return Map.at(Mode);
+ }
+ const InfoT &get(unsigned Mode) const {
+ auto F = Map.find(Mode);
+ if (Mode != DefaultMode && F == Map.end())
+ F = Map.find(DefaultMode);
+ assert(F != Map.end());
+ return F->second;
+ }
+
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ bool isSimple() const {
+ return Map.size() == 1 && Map.begin()->first == DefaultMode;
+ }
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ InfoT getSimple() const {
+ assert(isSimple());
+ return Map.begin()->second;
+ }
+ void makeSimple(unsigned Mode) {
+ assert(hasMode(Mode) || hasDefault());
+ InfoT I = get(Mode);
+ Map.clear();
+ Map.insert(std::make_pair(DefaultMode, I));
+ }
+
+protected:
+ MapType Map;
+};
+
+struct ValueTypeByHwMode : public InfoByHwMode<MVT> {
+ ValueTypeByHwMode(Record *R, const CodeGenHwModes &CGH);
+ ValueTypeByHwMode(Record *R, MVT T);
+ ValueTypeByHwMode(MVT T) { Map.insert({DefaultMode,T}); }
+ ValueTypeByHwMode() = default;
+
+ bool operator== (const ValueTypeByHwMode &T) const;
+ bool operator< (const ValueTypeByHwMode &T) const;
+
+ bool isValid() const {
+ return !Map.empty();
+ }
+ MVT getType(unsigned Mode) const { return get(Mode); }
+ MVT &getOrCreateTypeForMode(unsigned Mode, MVT Type);
+
+ static StringRef getMVTName(MVT T);
+ void writeToStream(raw_ostream &OS) const;
+ void dump() const;
+
+ unsigned PtrAddrSpace = std::numeric_limits<unsigned>::max();
+ bool isPointer() const {
+ return PtrAddrSpace != std::numeric_limits<unsigned>::max();
+ }
+};
+
+ValueTypeByHwMode getValueTypeByHwMode(Record *Rec,
+ const CodeGenHwModes &CGH);
+
+struct RegSizeInfo {
+ unsigned RegSize;
+ unsigned SpillSize;
+ unsigned SpillAlignment;
+
+ RegSizeInfo(Record *R, const CodeGenHwModes &CGH);
+ RegSizeInfo() = default;
+ bool operator< (const RegSizeInfo &I) const;
+ bool operator== (const RegSizeInfo &I) const {
+ return std::tie(RegSize, SpillSize, SpillAlignment) ==
+ std::tie(I.RegSize, I.SpillSize, I.SpillAlignment);
+ }
+ bool operator!= (const RegSizeInfo &I) const {
+ return !(*this == I);
+ }
+
+ bool isSubClassOf(const RegSizeInfo &I) const;
+ void writeToStream(raw_ostream &OS) const;
+};
+
+struct RegSizeInfoByHwMode : public InfoByHwMode<RegSizeInfo> {
+ RegSizeInfoByHwMode(Record *R, const CodeGenHwModes &CGH);
+ RegSizeInfoByHwMode() = default;
+ bool operator< (const RegSizeInfoByHwMode &VI) const;
+ bool operator== (const RegSizeInfoByHwMode &VI) const;
+ bool operator!= (const RegSizeInfoByHwMode &VI) const {
+ return !(*this == VI);
+ }
+
+ bool isSubClassOf(const RegSizeInfoByHwMode &I) const;
+ bool hasStricterSpillThan(const RegSizeInfoByHwMode &I) const;
+
+ void writeToStream(raw_ostream &OS) const;
+
+ void insertRegSizeForMode(unsigned Mode, RegSizeInfo Info) {
+ Map.insert(std::make_pair(Mode, Info));
+ }
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const ValueTypeByHwMode &T);
+raw_ostream &operator<<(raw_ostream &OS, const RegSizeInfo &T);
+raw_ostream &operator<<(raw_ostream &OS, const RegSizeInfoByHwMode &T);
+
+struct EncodingInfoByHwMode : public InfoByHwMode<Record*> {
+ EncodingInfoByHwMode(Record *R, const CodeGenHwModes &CGH);
+ EncodingInfoByHwMode() = default;
+};
+
+} // namespace llvm
+
+#endif // LLVM_UTILS_TABLEGEN_INFOBYHWMODE_H
diff --git a/contrib/libs/llvm16/utils/TableGen/InstrDocsEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/InstrDocsEmitter.cpp
new file mode 100644
index 0000000000..bc391227ed
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/InstrDocsEmitter.cpp
@@ -0,0 +1,219 @@
+//===- InstrDocsEmitter.cpp - Opcode Documentation Generator --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// InstrDocsEmitter generates restructured text documentation for the opcodes
+// that can be used by MachineInstr. For each opcode, the documentation lists:
+// * Opcode name
+// * Assembly string
+// * Flags (e.g. mayLoad, isBranch, ...)
+// * Operands, including type and name
+// * Operand constraints
+// * Implicit register uses & defs
+// * Predicates
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenDAGPatterns.h"
+#include "CodeGenInstruction.h"
+#include "CodeGenTarget.h"
+#include "TableGenBackends.h"
+#include "llvm/TableGen/Record.h"
+#include <string>
+#include <vector>
+
+using namespace llvm;
+
+namespace llvm {
+
+void writeTitle(StringRef Str, raw_ostream &OS, char Kind = '-') {
+ OS << std::string(Str.size(), Kind) << "\n" << Str << "\n"
+ << std::string(Str.size(), Kind) << "\n";
+}
+
+void writeHeader(StringRef Str, raw_ostream &OS, char Kind = '-') {
+ OS << Str << "\n" << std::string(Str.size(), Kind) << "\n";
+}
+
+std::string escapeForRST(StringRef Str) {
+ std::string Result;
+ Result.reserve(Str.size() + 4);
+ for (char C : Str) {
+ switch (C) {
+ // We want special characters to be shown as their C escape codes.
+ case '\n': Result += "\\n"; break;
+ case '\t': Result += "\\t"; break;
+ // Underscore at the end of a line has a special meaning in rst.
+ case '_': Result += "\\_"; break;
+ default: Result += C;
+ }
+ }
+ return Result;
+}
+
+void EmitInstrDocs(RecordKeeper &RK, raw_ostream &OS) {
+ CodeGenDAGPatterns CDP(RK);
+ CodeGenTarget &Target = CDP.getTargetInfo();
+ unsigned VariantCount = Target.getAsmParserVariantCount();
+
+ // Page title.
+ std::string Title = std::string(Target.getName());
+ Title += " Instructions";
+ writeTitle(Title, OS);
+ OS << "\n";
+
+ for (const CodeGenInstruction *II : Target.getInstructionsByEnumValue()) {
+ Record *Inst = II->TheDef;
+
+ // Don't print the target-independent instructions.
+ if (II->Namespace == "TargetOpcode")
+ continue;
+
+ // Heading (instruction name).
+ writeHeader(escapeForRST(Inst->getName()), OS, '=');
+ OS << "\n";
+
+ // Assembly string(s).
+ if (!II->AsmString.empty()) {
+ for (unsigned VarNum = 0; VarNum < VariantCount; ++VarNum) {
+ Record *AsmVariant = Target.getAsmParserVariant(VarNum);
+ OS << "Assembly string";
+ if (VariantCount != 1)
+ OS << " (" << AsmVariant->getValueAsString("Name") << ")";
+ std::string AsmString =
+ CodeGenInstruction::FlattenAsmStringVariants(II->AsmString, VarNum);
+ // We trim spaces at each end of the asm string because rst needs the
+ // formatting backticks to be next to a non-whitespace character.
+ OS << ": ``" << escapeForRST(StringRef(AsmString).trim(" "))
+ << "``\n\n";
+ }
+ }
+
+ // Boolean flags.
+ std::vector<const char *> FlagStrings;
+#define xstr(s) str(s)
+#define str(s) #s
+#define FLAG(f) if (II->f) { FlagStrings.push_back(str(f)); }
+ FLAG(isReturn)
+ FLAG(isEHScopeReturn)
+ FLAG(isBranch)
+ FLAG(isIndirectBranch)
+ FLAG(isCompare)
+ FLAG(isMoveImm)
+ FLAG(isBitcast)
+ FLAG(isSelect)
+ FLAG(isBarrier)
+ FLAG(isCall)
+ FLAG(isAdd)
+ FLAG(isTrap)
+ FLAG(canFoldAsLoad)
+ FLAG(mayLoad)
+ //FLAG(mayLoad_Unset) // Deliberately omitted.
+ FLAG(mayStore)
+ //FLAG(mayStore_Unset) // Deliberately omitted.
+ FLAG(isPredicable)
+ FLAG(isConvertibleToThreeAddress)
+ FLAG(isCommutable)
+ FLAG(isTerminator)
+ FLAG(isReMaterializable)
+ FLAG(hasDelaySlot)
+ FLAG(usesCustomInserter)
+ FLAG(hasPostISelHook)
+ FLAG(hasCtrlDep)
+ FLAG(isNotDuplicable)
+ FLAG(hasSideEffects)
+ //FLAG(hasSideEffects_Unset) // Deliberately omitted.
+ FLAG(isAsCheapAsAMove)
+ FLAG(hasExtraSrcRegAllocReq)
+ FLAG(hasExtraDefRegAllocReq)
+ FLAG(isCodeGenOnly)
+ FLAG(isPseudo)
+ FLAG(isRegSequence)
+ FLAG(isExtractSubreg)
+ FLAG(isInsertSubreg)
+ FLAG(isConvergent)
+ FLAG(hasNoSchedulingInfo)
+ FLAG(variadicOpsAreDefs)
+ FLAG(isAuthenticated)
+ if (!FlagStrings.empty()) {
+ OS << "Flags: ";
+ ListSeparator LS;
+ for (auto FlagString : FlagStrings)
+ OS << LS << "``" << FlagString << "``";
+ OS << "\n\n";
+ }
+
+ // Operands.
+ for (unsigned i = 0; i < II->Operands.size(); ++i) {
+ bool IsDef = i < II->Operands.NumDefs;
+ auto Op = II->Operands[i];
+
+ if (Op.MINumOperands > 1) {
+ // This operand corresponds to multiple operands on the
+ // MachineInstruction, so print all of them, showing the types and
+ // names of both the compound operand and the basic operands it
+ // contains.
+ for (unsigned SubOpIdx = 0; SubOpIdx < Op.MINumOperands; ++SubOpIdx) {
+ Record *SubRec =
+ cast<DefInit>(Op.MIOperandInfo->getArg(SubOpIdx))->getDef();
+ StringRef SubOpName = Op.MIOperandInfo->getArgNameStr(SubOpIdx);
+ StringRef SubOpTypeName = SubRec->getName();
+
+ OS << "* " << (IsDef ? "DEF" : "USE") << " ``" << Op.Rec->getName()
+ << "/" << SubOpTypeName << ":$" << Op.Name << ".";
+ // Not all sub-operands are named, make up a name for these.
+ if (SubOpName.empty())
+ OS << "anon" << SubOpIdx;
+ else
+ OS << SubOpName;
+ OS << "``\n\n";
+ }
+ } else {
+ // The operand corresponds to only one MachineInstruction operand.
+ OS << "* " << (IsDef ? "DEF" : "USE") << " ``" << Op.Rec->getName()
+ << ":$" << Op.Name << "``\n\n";
+ }
+ }
+
+ // Constraints.
+ StringRef Constraints = Inst->getValueAsString("Constraints");
+ if (!Constraints.empty()) {
+ OS << "Constraints: ``" << Constraints << "``\n\n";
+ }
+
+ // Implicit definitions.
+ if (!II->ImplicitDefs.empty()) {
+ OS << "Implicit defs: ";
+ ListSeparator LS;
+ for (Record *Def : II->ImplicitDefs)
+ OS << LS << "``" << Def->getName() << "``";
+ OS << "\n\n";
+ }
+
+ // Implicit uses.
+ if (!II->ImplicitUses.empty()) {
+ OS << "Implicit uses: ";
+ ListSeparator LS;
+ for (Record *Use : II->ImplicitUses)
+ OS << LS << "``" << Use->getName() << "``";
+ OS << "\n\n";
+ }
+
+ // Predicates.
+ std::vector<Record *> Predicates =
+ II->TheDef->getValueAsListOfDefs("Predicates");
+ if (!Predicates.empty()) {
+ OS << "Predicates: ";
+ ListSeparator LS;
+ for (Record *P : Predicates)
+ OS << LS << "``" << P->getName() << "``";
+ OS << "\n\n";
+ }
+ }
+}
+
+} // end namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/InstrInfoEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/InstrInfoEmitter.cpp
new file mode 100644
index 0000000000..153a1243d1
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/InstrInfoEmitter.cpp
@@ -0,0 +1,1257 @@
+//===- InstrInfoEmitter.cpp - Generate a Instruction Set Desc. --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend is responsible for emitting a description of the target
+// instruction set for the code generator.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenDAGPatterns.h"
+#include "CodeGenInstruction.h"
+#include "CodeGenSchedule.h"
+#include "CodeGenTarget.h"
+#include "PredicateExpander.h"
+#include "SequenceToOffsetTable.h"
+#include "SubtargetFeatureInfo.h"
+#include "TableGenBackends.h"
+#include "Types.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include <cassert>
+#include <cstdint>
+#include <iterator>
+#include <map>
+#include <string>
+#include <utility>
+#include <vector>
+
+using namespace llvm;
+
+cl::OptionCategory InstrInfoEmitterCat("Options for -gen-instr-info");
+static cl::opt<bool> ExpandMIOperandInfo(
+ "instr-info-expand-mi-operand-info",
+ cl::desc("Expand operand's MIOperandInfo DAG into suboperands"),
+ cl::cat(InstrInfoEmitterCat), cl::init(true));
+
+namespace {
+
+class InstrInfoEmitter {
+ RecordKeeper &Records;
+ CodeGenDAGPatterns CDP;
+ const CodeGenSchedModels &SchedModels;
+
+public:
+ InstrInfoEmitter(RecordKeeper &R):
+ Records(R), CDP(R), SchedModels(CDP.getTargetInfo().getSchedModels()) {}
+
+ // run - Output the instruction set description.
+ void run(raw_ostream &OS);
+
+private:
+ void emitEnums(raw_ostream &OS);
+
+ typedef std::map<std::vector<std::string>, unsigned> OperandInfoMapTy;
+
+ /// The keys of this map are maps which have OpName enum values as their keys
+ /// and instruction operand indices as their values. The values of this map
+ /// are lists of instruction names.
+ typedef std::map<std::map<unsigned, unsigned>,
+ std::vector<std::string>> OpNameMapTy;
+ typedef std::map<std::string, unsigned>::iterator StrUintMapIter;
+
+ /// Generate member functions in the target-specific GenInstrInfo class.
+ ///
+ /// This method is used to custom expand TIIPredicate definitions.
+ /// See file llvm/Target/TargetInstPredicates.td for a description of what is
+ /// a TIIPredicate and how to use it.
+ void emitTIIHelperMethods(raw_ostream &OS, StringRef TargetName,
+ bool ExpandDefinition = true);
+
+ /// Expand TIIPredicate definitions to functions that accept a const MCInst
+ /// reference.
+ void emitMCIIHelperMethods(raw_ostream &OS, StringRef TargetName);
+
+ /// Write verifyInstructionPredicates methods.
+ void emitFeatureVerifier(raw_ostream &OS, const CodeGenTarget &Target);
+ void emitRecord(const CodeGenInstruction &Inst, unsigned Num,
+ Record *InstrInfo,
+ std::map<std::vector<Record*>, unsigned> &EL,
+ const OperandInfoMapTy &OpInfo,
+ raw_ostream &OS);
+ void emitOperandTypeMappings(
+ raw_ostream &OS, const CodeGenTarget &Target,
+ ArrayRef<const CodeGenInstruction *> NumberedInstructions);
+ void initOperandMapData(
+ ArrayRef<const CodeGenInstruction *> NumberedInstructions,
+ StringRef Namespace,
+ std::map<std::string, unsigned> &Operands,
+ OpNameMapTy &OperandMap);
+ void emitOperandNameMappings(raw_ostream &OS, const CodeGenTarget &Target,
+ ArrayRef<const CodeGenInstruction*> NumberedInstructions);
+
+ void emitLogicalOperandSizeMappings(
+ raw_ostream &OS, StringRef Namespace,
+ ArrayRef<const CodeGenInstruction *> NumberedInstructions);
+ void emitLogicalOperandTypeMappings(
+ raw_ostream &OS, StringRef Namespace,
+ ArrayRef<const CodeGenInstruction *> NumberedInstructions);
+
+ // Operand information.
+ void EmitOperandInfo(raw_ostream &OS, OperandInfoMapTy &OperandInfoIDs);
+ std::vector<std::string> GetOperandInfo(const CodeGenInstruction &Inst);
+};
+
+} // end anonymous namespace
+
+static void PrintDefList(const std::vector<Record*> &Uses,
+ unsigned Num, raw_ostream &OS) {
+ OS << "static const MCPhysReg ImplicitList" << Num << "[] = { ";
+ for (auto [Idx, U] : enumerate(Uses))
+ OS << (Idx ? ", " : "") << getQualifiedName(U);
+ OS << " };\n";
+}
+
+//===----------------------------------------------------------------------===//
+// Operand Info Emission.
+//===----------------------------------------------------------------------===//
+
+std::vector<std::string>
+InstrInfoEmitter::GetOperandInfo(const CodeGenInstruction &Inst) {
+ std::vector<std::string> Result;
+
+ for (auto &Op : Inst.Operands) {
+ // Handle aggregate operands and normal operands the same way by expanding
+ // either case into a list of operands for this op.
+ std::vector<CGIOperandList::OperandInfo> OperandList;
+
+ // This might be a multiple operand thing. Targets like X86 have
+ // registers in their multi-operand operands. It may also be an anonymous
+ // operand, which has a single operand, but no declared class for the
+ // operand.
+ DagInit *MIOI = Op.MIOperandInfo;
+
+ if (!MIOI || MIOI->getNumArgs() == 0) {
+ // Single, anonymous, operand.
+ OperandList.push_back(Op);
+ } else {
+ for (unsigned j = 0, e = Op.MINumOperands; j != e; ++j) {
+ OperandList.push_back(Op);
+
+ auto *OpR = cast<DefInit>(MIOI->getArg(j))->getDef();
+ OperandList.back().Rec = OpR;
+ }
+ }
+
+ for (unsigned j = 0, e = OperandList.size(); j != e; ++j) {
+ Record *OpR = OperandList[j].Rec;
+ std::string Res;
+
+ if (OpR->isSubClassOf("RegisterOperand"))
+ OpR = OpR->getValueAsDef("RegClass");
+ if (OpR->isSubClassOf("RegisterClass"))
+ Res += getQualifiedName(OpR) + "RegClassID, ";
+ else if (OpR->isSubClassOf("PointerLikeRegClass"))
+ Res += utostr(OpR->getValueAsInt("RegClassKind")) + ", ";
+ else
+ // -1 means the operand does not have a fixed register class.
+ Res += "-1, ";
+
+ // Fill in applicable flags.
+ Res += "0";
+
+ // Ptr value whose register class is resolved via callback.
+ if (OpR->isSubClassOf("PointerLikeRegClass"))
+ Res += "|(1<<MCOI::LookupPtrRegClass)";
+
+ // Predicate operands. Check to see if the original unexpanded operand
+ // was of type PredicateOp.
+ if (Op.Rec->isSubClassOf("PredicateOp"))
+ Res += "|(1<<MCOI::Predicate)";
+
+ // Optional def operands. Check to see if the original unexpanded operand
+ // was of type OptionalDefOperand.
+ if (Op.Rec->isSubClassOf("OptionalDefOperand"))
+ Res += "|(1<<MCOI::OptionalDef)";
+
+ // Branch target operands. Check to see if the original unexpanded
+ // operand was of type BranchTargetOperand.
+ if (Op.Rec->isSubClassOf("BranchTargetOperand"))
+ Res += "|(1<<MCOI::BranchTarget)";
+
+ // Fill in operand type.
+ Res += ", ";
+ assert(!Op.OperandType.empty() && "Invalid operand type.");
+ Res += Op.OperandType;
+
+ // Fill in constraint info.
+ Res += ", ";
+
+ const CGIOperandList::ConstraintInfo &Constraint =
+ Op.Constraints[j];
+ if (Constraint.isNone())
+ Res += "0";
+ else if (Constraint.isEarlyClobber())
+ Res += "MCOI_EARLY_CLOBBER";
+ else {
+ assert(Constraint.isTied());
+ Res += "MCOI_TIED_TO(" + utostr(Constraint.getTiedOperand()) + ")";
+ }
+
+ Result.push_back(Res);
+ }
+ }
+
+ return Result;
+}
+
+void InstrInfoEmitter::EmitOperandInfo(raw_ostream &OS,
+ OperandInfoMapTy &OperandInfoIDs) {
+ // ID #0 is for no operand info.
+ unsigned OperandListNum = 0;
+ OperandInfoIDs[std::vector<std::string>()] = ++OperandListNum;
+
+ OS << "\n";
+ const CodeGenTarget &Target = CDP.getTargetInfo();
+ for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) {
+ std::vector<std::string> OperandInfo = GetOperandInfo(*Inst);
+ unsigned &N = OperandInfoIDs[OperandInfo];
+ if (N != 0) continue;
+
+ N = ++OperandListNum;
+ OS << "static const MCOperandInfo OperandInfo" << N << "[] = { ";
+ for (const std::string &Info : OperandInfo)
+ OS << "{ " << Info << " }, ";
+ OS << "};\n";
+ }
+}
+
+/// Initialize data structures for generating operand name mappings.
+///
+/// \param Operands [out] A map used to generate the OpName enum with operand
+/// names as its keys and operand enum values as its values.
+/// \param OperandMap [out] A map for representing the operand name mappings for
+/// each instructions. This is used to generate the OperandMap table as
+/// well as the getNamedOperandIdx() function.
+void InstrInfoEmitter::initOperandMapData(
+ ArrayRef<const CodeGenInstruction *> NumberedInstructions,
+ StringRef Namespace,
+ std::map<std::string, unsigned> &Operands,
+ OpNameMapTy &OperandMap) {
+ unsigned NumOperands = 0;
+ for (const CodeGenInstruction *Inst : NumberedInstructions) {
+ if (!Inst->TheDef->getValueAsBit("UseNamedOperandTable"))
+ continue;
+ std::map<unsigned, unsigned> OpList;
+ for (const auto &Info : Inst->Operands) {
+ StrUintMapIter I = Operands.find(Info.Name);
+
+ if (I == Operands.end()) {
+ I = Operands.insert(Operands.begin(),
+ std::pair<std::string, unsigned>(Info.Name, NumOperands++));
+ }
+ OpList[I->second] = Info.MIOperandNo;
+ }
+ OperandMap[OpList].push_back(Namespace.str() + "::" +
+ Inst->TheDef->getName().str());
+ }
+}
+
+/// Generate a table and function for looking up the indices of operands by
+/// name.
+///
+/// This code generates:
+/// - An enum in the llvm::TargetNamespace::OpName namespace, with one entry
+/// for each operand name.
+/// - A 2-dimensional table called OperandMap for mapping OpName enum values to
+/// operand indices.
+/// - A function called getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
+/// for looking up the operand index for an instruction, given a value from
+/// OpName enum
+void InstrInfoEmitter::emitOperandNameMappings(raw_ostream &OS,
+ const CodeGenTarget &Target,
+ ArrayRef<const CodeGenInstruction*> NumberedInstructions) {
+ StringRef Namespace = Target.getInstNamespace();
+ std::string OpNameNS = "OpName";
+ // Map of operand names to their enumeration value. This will be used to
+ // generate the OpName enum.
+ std::map<std::string, unsigned> Operands;
+ OpNameMapTy OperandMap;
+
+ initOperandMapData(NumberedInstructions, Namespace, Operands, OperandMap);
+
+ OS << "#ifdef GET_INSTRINFO_OPERAND_ENUM\n";
+ OS << "#undef GET_INSTRINFO_OPERAND_ENUM\n";
+ OS << "namespace llvm {\n";
+ OS << "namespace " << Namespace << " {\n";
+ OS << "namespace " << OpNameNS << " {\n";
+ OS << "enum {\n";
+ for (const auto &Op : Operands)
+ OS << " " << Op.first << " = " << Op.second << ",\n";
+
+ OS << " OPERAND_LAST";
+ OS << "\n};\n";
+ OS << "} // end namespace OpName\n";
+ OS << "} // end namespace " << Namespace << "\n";
+ OS << "} // end namespace llvm\n";
+ OS << "#endif //GET_INSTRINFO_OPERAND_ENUM\n\n";
+
+ OS << "#ifdef GET_INSTRINFO_NAMED_OPS\n";
+ OS << "#undef GET_INSTRINFO_NAMED_OPS\n";
+ OS << "namespace llvm {\n";
+ OS << "namespace " << Namespace << " {\n";
+ OS << "LLVM_READONLY\n";
+ OS << "int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx) {\n";
+ if (!Operands.empty()) {
+ OS << " static const int16_t OperandMap [][" << Operands.size()
+ << "] = {\n";
+ for (const auto &Entry : OperandMap) {
+ const std::map<unsigned, unsigned> &OpList = Entry.first;
+ OS << "{";
+
+ // Emit a row of the OperandMap table
+ for (unsigned i = 0, e = Operands.size(); i != e; ++i)
+ OS << (OpList.count(i) == 0 ? -1 : (int)OpList.find(i)->second) << ", ";
+
+ OS << "},\n";
+ }
+ OS << "};\n";
+
+ OS << " switch(Opcode) {\n";
+ unsigned TableIndex = 0;
+ for (const auto &Entry : OperandMap) {
+ for (const std::string &Name : Entry.second)
+ OS << " case " << Name << ":\n";
+
+ OS << " return OperandMap[" << TableIndex++ << "][NamedIdx];\n";
+ }
+ OS << " default: return -1;\n";
+ OS << " }\n";
+ } else {
+ // There are no operands, so no need to emit anything
+ OS << " return -1;\n";
+ }
+ OS << "}\n";
+ OS << "} // end namespace " << Namespace << "\n";
+ OS << "} // end namespace llvm\n";
+ OS << "#endif //GET_INSTRINFO_NAMED_OPS\n\n";
+}
+
+/// Generate an enum for all the operand types for this target, under the
+/// llvm::TargetNamespace::OpTypes namespace.
+/// Operand types are all definitions derived of the Operand Target.td class.
+void InstrInfoEmitter::emitOperandTypeMappings(
+ raw_ostream &OS, const CodeGenTarget &Target,
+ ArrayRef<const CodeGenInstruction *> NumberedInstructions) {
+
+ StringRef Namespace = Target.getInstNamespace();
+ std::vector<Record *> Operands = Records.getAllDerivedDefinitions("Operand");
+ std::vector<Record *> RegisterOperands =
+ Records.getAllDerivedDefinitions("RegisterOperand");
+ std::vector<Record *> RegisterClasses =
+ Records.getAllDerivedDefinitions("RegisterClass");
+
+ OS << "#ifdef GET_INSTRINFO_OPERAND_TYPES_ENUM\n";
+ OS << "#undef GET_INSTRINFO_OPERAND_TYPES_ENUM\n";
+ OS << "namespace llvm {\n";
+ OS << "namespace " << Namespace << " {\n";
+ OS << "namespace OpTypes {\n";
+ OS << "enum OperandType {\n";
+
+ unsigned EnumVal = 0;
+ for (const std::vector<Record *> *RecordsToAdd :
+ {&Operands, &RegisterOperands, &RegisterClasses}) {
+ for (const Record *Op : *RecordsToAdd) {
+ if (!Op->isAnonymous())
+ OS << " " << Op->getName() << " = " << EnumVal << ",\n";
+ ++EnumVal;
+ }
+ }
+
+ OS << " OPERAND_TYPE_LIST_END" << "\n};\n";
+ OS << "} // end namespace OpTypes\n";
+ OS << "} // end namespace " << Namespace << "\n";
+ OS << "} // end namespace llvm\n";
+ OS << "#endif // GET_INSTRINFO_OPERAND_TYPES_ENUM\n\n";
+
+ OS << "#ifdef GET_INSTRINFO_OPERAND_TYPE\n";
+ OS << "#undef GET_INSTRINFO_OPERAND_TYPE\n";
+ OS << "namespace llvm {\n";
+ OS << "namespace " << Namespace << " {\n";
+ OS << "LLVM_READONLY\n";
+ OS << "static int getOperandType(uint16_t Opcode, uint16_t OpIdx) {\n";
+ auto getInstrName = [&](int I) -> StringRef {
+ return NumberedInstructions[I]->TheDef->getName();
+ };
+ // TODO: Factor out duplicate operand lists to compress the tables.
+ if (!NumberedInstructions.empty()) {
+ std::vector<int> OperandOffsets;
+ std::vector<Record *> OperandRecords;
+ int CurrentOffset = 0;
+ for (const CodeGenInstruction *Inst : NumberedInstructions) {
+ OperandOffsets.push_back(CurrentOffset);
+ for (const auto &Op : Inst->Operands) {
+ const DagInit *MIOI = Op.MIOperandInfo;
+ if (!ExpandMIOperandInfo || !MIOI || MIOI->getNumArgs() == 0) {
+ // Single, anonymous, operand.
+ OperandRecords.push_back(Op.Rec);
+ ++CurrentOffset;
+ } else {
+ for (Init *Arg : MIOI->getArgs()) {
+ OperandRecords.push_back(cast<DefInit>(Arg)->getDef());
+ ++CurrentOffset;
+ }
+ }
+ }
+ }
+
+ // Emit the table of offsets (indexes) into the operand type table.
+ // Size the unsigned integer offset to save space.
+ assert(OperandRecords.size() <= UINT32_MAX &&
+ "Too many operands for offset table");
+ OS << ((OperandRecords.size() <= UINT16_MAX) ? " const uint16_t"
+ : " const uint32_t");
+ OS << " Offsets[] = {\n";
+ for (int I = 0, E = OperandOffsets.size(); I != E; ++I) {
+ OS << " /* " << getInstrName(I) << " */\n";
+ OS << " " << OperandOffsets[I] << ",\n";
+ }
+ OS << " };\n";
+
+ // Add an entry for the end so that we don't need to special case it below.
+ OperandOffsets.push_back(OperandRecords.size());
+
+ // Emit the actual operand types in a flat table.
+ // Size the signed integer operand type to save space.
+ assert(EnumVal <= INT16_MAX &&
+ "Too many operand types for operand types table");
+ OS << "\n using namespace OpTypes;\n";
+ OS << ((EnumVal <= INT8_MAX) ? " const int8_t" : " const int16_t");
+ OS << " OpcodeOperandTypes[] = {\n ";
+ for (int I = 0, E = OperandRecords.size(), CurOffset = 0; I != E; ++I) {
+ // We print each Opcode's operands in its own row.
+ if (I == OperandOffsets[CurOffset]) {
+ OS << "\n /* " << getInstrName(CurOffset) << " */\n ";
+ while (OperandOffsets[++CurOffset] == I)
+ OS << "/* " << getInstrName(CurOffset) << " */\n ";
+ }
+ Record *OpR = OperandRecords[I];
+ if ((OpR->isSubClassOf("Operand") ||
+ OpR->isSubClassOf("RegisterOperand") ||
+ OpR->isSubClassOf("RegisterClass")) &&
+ !OpR->isAnonymous())
+ OS << OpR->getName();
+ else
+ OS << -1;
+ OS << ", ";
+ }
+ OS << "\n };\n";
+
+ OS << " return OpcodeOperandTypes[Offsets[Opcode] + OpIdx];\n";
+ } else {
+ OS << " llvm_unreachable(\"No instructions defined\");\n";
+ }
+ OS << "}\n";
+ OS << "} // end namespace " << Namespace << "\n";
+ OS << "} // end namespace llvm\n";
+ OS << "#endif // GET_INSTRINFO_OPERAND_TYPE\n\n";
+
+ OS << "#ifdef GET_INSTRINFO_MEM_OPERAND_SIZE\n";
+ OS << "#undef GET_INSTRINFO_MEM_OPERAND_SIZE\n";
+ OS << "namespace llvm {\n";
+ OS << "namespace " << Namespace << " {\n";
+ OS << "LLVM_READONLY\n";
+ OS << "static int getMemOperandSize(int OpType) {\n";
+ OS << " switch (OpType) {\n";
+ std::map<int, std::vector<StringRef>> SizeToOperandName;
+ for (const Record *Op : Operands) {
+ if (!Op->isSubClassOf("X86MemOperand"))
+ continue;
+ if (int Size = Op->getValueAsInt("Size"))
+ SizeToOperandName[Size].push_back(Op->getName());
+ }
+ OS << " default: return 0;\n";
+ for (auto KV : SizeToOperandName) {
+ for (const StringRef &OperandName : KV.second)
+ OS << " case OpTypes::" << OperandName << ":\n";
+ OS << " return " << KV.first << ";\n\n";
+ }
+ OS << " }\n}\n";
+ OS << "} // end namespace " << Namespace << "\n";
+ OS << "} // end namespace llvm\n";
+ OS << "#endif // GET_INSTRINFO_MEM_OPERAND_SIZE\n\n";
+}
+
+void InstrInfoEmitter::emitLogicalOperandSizeMappings(
+ raw_ostream &OS, StringRef Namespace,
+ ArrayRef<const CodeGenInstruction *> NumberedInstructions) {
+ std::map<std::vector<unsigned>, unsigned> LogicalOpSizeMap;
+
+ std::map<unsigned, std::vector<std::string>> InstMap;
+
+ size_t LogicalOpListSize = 0U;
+ std::vector<unsigned> LogicalOpList;
+ for (const auto *Inst : NumberedInstructions) {
+ if (!Inst->TheDef->getValueAsBit("UseLogicalOperandMappings"))
+ continue;
+
+ LogicalOpList.clear();
+ llvm::transform(Inst->Operands, std::back_inserter(LogicalOpList),
+ [](const CGIOperandList::OperandInfo &Op) -> unsigned {
+ auto *MIOI = Op.MIOperandInfo;
+ if (!MIOI || MIOI->getNumArgs() == 0)
+ return 1;
+ return MIOI->getNumArgs();
+ });
+ LogicalOpListSize = std::max(LogicalOpList.size(), LogicalOpListSize);
+
+ auto I =
+ LogicalOpSizeMap.insert({LogicalOpList, LogicalOpSizeMap.size()}).first;
+ InstMap[I->second].push_back(
+ (Namespace + "::" + Inst->TheDef->getName()).str());
+ }
+
+ OS << "#ifdef GET_INSTRINFO_LOGICAL_OPERAND_SIZE_MAP\n";
+ OS << "#undef GET_INSTRINFO_LOGICAL_OPERAND_SIZE_MAP\n";
+ OS << "namespace llvm {\n";
+ OS << "namespace " << Namespace << " {\n";
+ OS << "LLVM_READONLY static unsigned\n";
+ OS << "getLogicalOperandSize(uint16_t Opcode, uint16_t LogicalOpIdx) {\n";
+ if (!InstMap.empty()) {
+ std::vector<const std::vector<unsigned> *> LogicalOpSizeList(
+ LogicalOpSizeMap.size());
+ for (auto &P : LogicalOpSizeMap) {
+ LogicalOpSizeList[P.second] = &P.first;
+ }
+ OS << " static const unsigned SizeMap[][" << LogicalOpListSize
+ << "] = {\n";
+ for (auto &R : LogicalOpSizeList) {
+ const auto &Row = *R;
+ OS << " {";
+ int i;
+ for (i = 0; i < static_cast<int>(Row.size()); ++i) {
+ OS << Row[i] << ", ";
+ }
+ for (; i < static_cast<int>(LogicalOpListSize); ++i) {
+ OS << "0, ";
+ }
+ OS << "}, ";
+ OS << "\n";
+ }
+ OS << " };\n";
+
+ OS << " switch (Opcode) {\n";
+ OS << " default: return LogicalOpIdx;\n";
+ for (auto &P : InstMap) {
+ auto OpMapIdx = P.first;
+ const auto &Insts = P.second;
+ for (const auto &Inst : Insts) {
+ OS << " case " << Inst << ":\n";
+ }
+ OS << " return SizeMap[" << OpMapIdx << "][LogicalOpIdx];\n";
+ }
+ OS << " }\n";
+ } else {
+ OS << " return LogicalOpIdx;\n";
+ }
+ OS << "}\n";
+
+ OS << "LLVM_READONLY static inline unsigned\n";
+ OS << "getLogicalOperandIdx(uint16_t Opcode, uint16_t LogicalOpIdx) {\n";
+ OS << " auto S = 0U;\n";
+ OS << " for (auto i = 0U; i < LogicalOpIdx; ++i)\n";
+ OS << " S += getLogicalOperandSize(Opcode, i);\n";
+ OS << " return S;\n";
+ OS << "}\n";
+
+ OS << "} // end namespace " << Namespace << "\n";
+ OS << "} // end namespace llvm\n";
+ OS << "#endif // GET_INSTRINFO_LOGICAL_OPERAND_SIZE_MAP\n\n";
+}
+
+void InstrInfoEmitter::emitLogicalOperandTypeMappings(
+ raw_ostream &OS, StringRef Namespace,
+ ArrayRef<const CodeGenInstruction *> NumberedInstructions) {
+ std::map<std::vector<std::string>, unsigned> LogicalOpTypeMap;
+
+ std::map<unsigned, std::vector<std::string>> InstMap;
+
+ size_t OpTypeListSize = 0U;
+ std::vector<std::string> LogicalOpTypeList;
+ for (const auto *Inst : NumberedInstructions) {
+ if (!Inst->TheDef->getValueAsBit("UseLogicalOperandMappings"))
+ continue;
+
+ LogicalOpTypeList.clear();
+ for (const auto &Op : Inst->Operands) {
+ auto *OpR = Op.Rec;
+ if ((OpR->isSubClassOf("Operand") ||
+ OpR->isSubClassOf("RegisterOperand") ||
+ OpR->isSubClassOf("RegisterClass")) &&
+ !OpR->isAnonymous()) {
+ LogicalOpTypeList.push_back(
+ (Namespace + "::OpTypes::" + Op.Rec->getName()).str());
+ } else {
+ LogicalOpTypeList.push_back("-1");
+ }
+ }
+ OpTypeListSize = std::max(LogicalOpTypeList.size(), OpTypeListSize);
+
+ auto I =
+ LogicalOpTypeMap.insert({LogicalOpTypeList, LogicalOpTypeMap.size()})
+ .first;
+ InstMap[I->second].push_back(
+ (Namespace + "::" + Inst->TheDef->getName()).str());
+ }
+
+ OS << "#ifdef GET_INSTRINFO_LOGICAL_OPERAND_TYPE_MAP\n";
+ OS << "#undef GET_INSTRINFO_LOGICAL_OPERAND_TYPE_MAP\n";
+ OS << "namespace llvm {\n";
+ OS << "namespace " << Namespace << " {\n";
+ OS << "LLVM_READONLY static int\n";
+ OS << "getLogicalOperandType(uint16_t Opcode, uint16_t LogicalOpIdx) {\n";
+ if (!InstMap.empty()) {
+ std::vector<const std::vector<std::string> *> LogicalOpTypeList(
+ LogicalOpTypeMap.size());
+ for (auto &P : LogicalOpTypeMap) {
+ LogicalOpTypeList[P.second] = &P.first;
+ }
+ OS << " static const int TypeMap[][" << OpTypeListSize << "] = {\n";
+ for (int r = 0, rs = LogicalOpTypeList.size(); r < rs; ++r) {
+ const auto &Row = *LogicalOpTypeList[r];
+ OS << " {";
+ int i, s = Row.size();
+ for (i = 0; i < s; ++i) {
+ if (i > 0)
+ OS << ", ";
+ OS << Row[i];
+ }
+ for (; i < static_cast<int>(OpTypeListSize); ++i) {
+ if (i > 0)
+ OS << ", ";
+ OS << "-1";
+ }
+ OS << "}";
+ if (r != rs - 1)
+ OS << ",";
+ OS << "\n";
+ }
+ OS << " };\n";
+
+ OS << " switch (Opcode) {\n";
+ OS << " default: return -1;\n";
+ for (auto &P : InstMap) {
+ auto OpMapIdx = P.first;
+ const auto &Insts = P.second;
+ for (const auto &Inst : Insts) {
+ OS << " case " << Inst << ":\n";
+ }
+ OS << " return TypeMap[" << OpMapIdx << "][LogicalOpIdx];\n";
+ }
+ OS << " }\n";
+ } else {
+ OS << " return -1;\n";
+ }
+ OS << "}\n";
+ OS << "} // end namespace " << Namespace << "\n";
+ OS << "} // end namespace llvm\n";
+ OS << "#endif // GET_INSTRINFO_LOGICAL_OPERAND_TYPE_MAP\n\n";
+}
+
+void InstrInfoEmitter::emitMCIIHelperMethods(raw_ostream &OS,
+ StringRef TargetName) {
+ RecVec TIIPredicates = Records.getAllDerivedDefinitions("TIIPredicate");
+
+ OS << "#ifdef GET_INSTRINFO_MC_HELPER_DECLS\n";
+ OS << "#undef GET_INSTRINFO_MC_HELPER_DECLS\n\n";
+
+ OS << "namespace llvm {\n";
+ OS << "class MCInst;\n";
+ OS << "class FeatureBitset;\n\n";
+
+ OS << "namespace " << TargetName << "_MC {\n\n";
+
+ for (const Record *Rec : TIIPredicates) {
+ OS << "bool " << Rec->getValueAsString("FunctionName")
+ << "(const MCInst &MI);\n";
+ }
+
+ OS << "void verifyInstructionPredicates(unsigned Opcode, const FeatureBitset "
+ "&Features);\n";
+
+ OS << "\n} // end namespace " << TargetName << "_MC\n";
+ OS << "} // end namespace llvm\n\n";
+
+ OS << "#endif // GET_INSTRINFO_MC_HELPER_DECLS\n\n";
+
+ OS << "#ifdef GET_INSTRINFO_MC_HELPERS\n";
+ OS << "#undef GET_INSTRINFO_MC_HELPERS\n\n";
+
+ OS << "namespace llvm {\n";
+ OS << "namespace " << TargetName << "_MC {\n\n";
+
+ PredicateExpander PE(TargetName);
+ PE.setExpandForMC(true);
+
+ for (const Record *Rec : TIIPredicates) {
+ OS << "bool " << Rec->getValueAsString("FunctionName");
+ OS << "(const MCInst &MI) {\n";
+
+ OS.indent(PE.getIndentLevel() * 2);
+ PE.expandStatement(OS, Rec->getValueAsDef("Body"));
+ OS << "\n}\n\n";
+ }
+
+ OS << "} // end namespace " << TargetName << "_MC\n";
+ OS << "} // end namespace llvm\n\n";
+
+ OS << "#endif // GET_GENISTRINFO_MC_HELPERS\n\n";
+}
+
+static std::string
+getNameForFeatureBitset(const std::vector<Record *> &FeatureBitset) {
+ std::string Name = "CEFBS";
+ for (const auto &Feature : FeatureBitset)
+ Name += ("_" + Feature->getName()).str();
+ return Name;
+}
+
+void InstrInfoEmitter::emitFeatureVerifier(raw_ostream &OS,
+ const CodeGenTarget &Target) {
+ const auto &All = SubtargetFeatureInfo::getAll(Records);
+ std::map<Record *, SubtargetFeatureInfo, LessRecordByID> SubtargetFeatures;
+ SubtargetFeatures.insert(All.begin(), All.end());
+
+ OS << "#ifdef ENABLE_INSTR_PREDICATE_VERIFIER\n"
+ << "#undef ENABLE_INSTR_PREDICATE_VERIFIER\n"
+ << "#include <_llvm_sstream.h>\n\n";
+
+ OS << "namespace llvm {\n";
+ OS << "namespace " << Target.getName() << "_MC {\n\n";
+
+ // Emit the subtarget feature enumeration.
+ SubtargetFeatureInfo::emitSubtargetFeatureBitEnumeration(SubtargetFeatures,
+ OS);
+
+ // Emit the name table for error messages.
+ OS << "#ifndef NDEBUG\n";
+ SubtargetFeatureInfo::emitNameTable(SubtargetFeatures, OS);
+ OS << "#endif // NDEBUG\n\n";
+
+ // Emit the available features compute function.
+ SubtargetFeatureInfo::emitComputeAssemblerAvailableFeatures(
+ Target.getName(), "", "computeAvailableFeatures", SubtargetFeatures, OS);
+
+ std::vector<std::vector<Record *>> FeatureBitsets;
+ for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) {
+ FeatureBitsets.emplace_back();
+ for (Record *Predicate : Inst->TheDef->getValueAsListOfDefs("Predicates")) {
+ const auto &I = SubtargetFeatures.find(Predicate);
+ if (I != SubtargetFeatures.end())
+ FeatureBitsets.back().push_back(I->second.TheDef);
+ }
+ }
+
+ llvm::sort(FeatureBitsets, [&](const std::vector<Record *> &A,
+ const std::vector<Record *> &B) {
+ if (A.size() < B.size())
+ return true;
+ if (A.size() > B.size())
+ return false;
+ for (auto Pair : zip(A, B)) {
+ if (std::get<0>(Pair)->getName() < std::get<1>(Pair)->getName())
+ return true;
+ if (std::get<0>(Pair)->getName() > std::get<1>(Pair)->getName())
+ return false;
+ }
+ return false;
+ });
+ FeatureBitsets.erase(
+ std::unique(FeatureBitsets.begin(), FeatureBitsets.end()),
+ FeatureBitsets.end());
+ OS << "#ifndef NDEBUG\n"
+ << "// Feature bitsets.\n"
+ << "enum : " << getMinimalTypeForRange(FeatureBitsets.size()) << " {\n"
+ << " CEFBS_None,\n";
+ for (const auto &FeatureBitset : FeatureBitsets) {
+ if (FeatureBitset.empty())
+ continue;
+ OS << " " << getNameForFeatureBitset(FeatureBitset) << ",\n";
+ }
+ OS << "};\n\n"
+ << "static constexpr FeatureBitset FeatureBitsets[] = {\n"
+ << " {}, // CEFBS_None\n";
+ for (const auto &FeatureBitset : FeatureBitsets) {
+ if (FeatureBitset.empty())
+ continue;
+ OS << " {";
+ for (const auto &Feature : FeatureBitset) {
+ const auto &I = SubtargetFeatures.find(Feature);
+ assert(I != SubtargetFeatures.end() && "Didn't import predicate?");
+ OS << I->second.getEnumBitName() << ", ";
+ }
+ OS << "},\n";
+ }
+ OS << "};\n"
+ << "#endif // NDEBUG\n\n";
+
+ // Emit the predicate verifier.
+ OS << "void verifyInstructionPredicates(\n"
+ << " unsigned Opcode, const FeatureBitset &Features) {\n"
+ << "#ifndef NDEBUG\n"
+ << " static " << getMinimalTypeForRange(FeatureBitsets.size())
+ << " RequiredFeaturesRefs[] = {\n";
+ unsigned InstIdx = 0;
+ for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) {
+ OS << " CEFBS";
+ unsigned NumPredicates = 0;
+ for (Record *Predicate : Inst->TheDef->getValueAsListOfDefs("Predicates")) {
+ const auto &I = SubtargetFeatures.find(Predicate);
+ if (I != SubtargetFeatures.end()) {
+ OS << '_' << I->second.TheDef->getName();
+ NumPredicates++;
+ }
+ }
+ if (!NumPredicates)
+ OS << "_None";
+ OS << ", // " << Inst->TheDef->getName() << " = " << InstIdx << "\n";
+ InstIdx++;
+ }
+ OS << " };\n\n";
+ OS << " assert(Opcode < " << InstIdx << ");\n";
+ OS << " FeatureBitset AvailableFeatures = "
+ "computeAvailableFeatures(Features);\n";
+ OS << " const FeatureBitset &RequiredFeatures = "
+ "FeatureBitsets[RequiredFeaturesRefs[Opcode]];\n";
+ OS << " FeatureBitset MissingFeatures =\n"
+ << " (AvailableFeatures & RequiredFeatures) ^\n"
+ << " RequiredFeatures;\n"
+ << " if (MissingFeatures.any()) {\n"
+ << " std::ostringstream Msg;\n"
+ << " Msg << \"Attempting to emit \" << &" << Target.getName()
+ << "InstrNameData[" << Target.getName() << "InstrNameIndices[Opcode]]\n"
+ << " << \" instruction but the \";\n"
+ << " for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i)\n"
+ << " if (MissingFeatures.test(i))\n"
+ << " Msg << SubtargetFeatureNames[i] << \" \";\n"
+ << " Msg << \"predicate(s) are not met\";\n"
+ << " report_fatal_error(Msg.str().c_str());\n"
+ << " }\n"
+ << "#endif // NDEBUG\n";
+ OS << "}\n";
+ OS << "} // end namespace " << Target.getName() << "_MC\n";
+ OS << "} // end namespace llvm\n";
+ OS << "#endif // ENABLE_INSTR_PREDICATE_VERIFIER\n\n";
+}
+
+void InstrInfoEmitter::emitTIIHelperMethods(raw_ostream &OS,
+ StringRef TargetName,
+ bool ExpandDefinition) {
+ RecVec TIIPredicates = Records.getAllDerivedDefinitions("TIIPredicate");
+ if (TIIPredicates.empty())
+ return;
+
+ PredicateExpander PE(TargetName);
+ PE.setExpandForMC(false);
+
+ for (const Record *Rec : TIIPredicates) {
+ OS << (ExpandDefinition ? "" : "static ") << "bool ";
+ if (ExpandDefinition)
+ OS << TargetName << "InstrInfo::";
+ OS << Rec->getValueAsString("FunctionName");
+ OS << "(const MachineInstr &MI)";
+ if (!ExpandDefinition) {
+ OS << ";\n";
+ continue;
+ }
+
+ OS << " {\n";
+ OS.indent(PE.getIndentLevel() * 2);
+ PE.expandStatement(OS, Rec->getValueAsDef("Body"));
+ OS << "\n}\n\n";
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Main Output.
+//===----------------------------------------------------------------------===//
+
+// run - Emit the main instruction description records for the target...
+void InstrInfoEmitter::run(raw_ostream &OS) {
+ emitSourceFileHeader("Target Instruction Enum Values and Descriptors", OS);
+ emitEnums(OS);
+
+ OS << "#ifdef GET_INSTRINFO_MC_DESC\n";
+ OS << "#undef GET_INSTRINFO_MC_DESC\n";
+
+ OS << "namespace llvm {\n\n";
+
+ CodeGenTarget &Target = CDP.getTargetInfo();
+ const std::string &TargetName = std::string(Target.getName());
+ Record *InstrInfo = Target.getInstructionSet();
+
+ // Keep track of all of the def lists we have emitted already.
+ std::map<std::vector<Record*>, unsigned> EmittedLists;
+ unsigned ListNumber = 0;
+
+ // Emit all of the instruction's implicit uses and defs.
+ Records.startTimer("Emit uses/defs");
+ for (const CodeGenInstruction *II : Target.getInstructionsByEnumValue()) {
+ std::vector<Record *> ImplicitOps = II->ImplicitUses;
+ llvm::append_range(ImplicitOps, II->ImplicitDefs);
+ if (!ImplicitOps.empty()) {
+ unsigned &IL = EmittedLists[ImplicitOps];
+ if (!IL) {
+ IL = ++ListNumber;
+ PrintDefList(ImplicitOps, IL, OS);
+ }
+ }
+ }
+
+ OperandInfoMapTy OperandInfoIDs;
+
+ // Emit all of the operand info records.
+ Records.startTimer("Emit operand info");
+ EmitOperandInfo(OS, OperandInfoIDs);
+
+ // Emit all of the MCInstrDesc records in reverse ENUM ordering.
+ Records.startTimer("Emit InstrDesc records");
+ OS << "\nextern const MCInstrDesc " << TargetName << "Insts[] = {\n";
+ ArrayRef<const CodeGenInstruction*> NumberedInstructions =
+ Target.getInstructionsByEnumValue();
+
+ SequenceToOffsetTable<std::string> InstrNames;
+ unsigned Num = NumberedInstructions.size();
+ for (const CodeGenInstruction *Inst : reverse(NumberedInstructions)) {
+ // Keep a list of the instruction names.
+ InstrNames.add(std::string(Inst->TheDef->getName()));
+ // Emit the record into the table.
+ emitRecord(*Inst, --Num, InstrInfo, EmittedLists, OperandInfoIDs, OS);
+ }
+ OS << "};\n\n";
+
+ // Emit the array of instruction names.
+ Records.startTimer("Emit instruction names");
+ InstrNames.layout();
+ InstrNames.emitStringLiteralDef(OS, Twine("extern const char ") + TargetName +
+ "InstrNameData[]");
+
+ OS << "extern const unsigned " << TargetName <<"InstrNameIndices[] = {";
+ Num = 0;
+ for (const CodeGenInstruction *Inst : NumberedInstructions) {
+ // Newline every eight entries.
+ if (Num % 8 == 0)
+ OS << "\n ";
+ OS << InstrNames.get(std::string(Inst->TheDef->getName())) << "U, ";
+ ++Num;
+ }
+ OS << "\n};\n\n";
+
+ bool HasDeprecationFeatures =
+ llvm::any_of(NumberedInstructions, [](const CodeGenInstruction *Inst) {
+ return !Inst->HasComplexDeprecationPredicate &&
+ !Inst->DeprecatedReason.empty();
+ });
+ if (HasDeprecationFeatures) {
+ OS << "extern const uint8_t " << TargetName
+ << "InstrDeprecationFeatures[] = {";
+ Num = 0;
+ for (const CodeGenInstruction *Inst : NumberedInstructions) {
+ if (Num % 8 == 0)
+ OS << "\n ";
+ if (!Inst->HasComplexDeprecationPredicate &&
+ !Inst->DeprecatedReason.empty())
+ OS << Target.getInstNamespace() << "::" << Inst->DeprecatedReason
+ << ", ";
+ else
+ OS << "uint8_t(-1), ";
+ ++Num;
+ }
+ OS << "\n};\n\n";
+ }
+
+ bool HasComplexDeprecationInfos =
+ llvm::any_of(NumberedInstructions, [](const CodeGenInstruction *Inst) {
+ return Inst->HasComplexDeprecationPredicate;
+ });
+ if (HasComplexDeprecationInfos) {
+ OS << "extern const MCInstrInfo::ComplexDeprecationPredicate " << TargetName
+ << "InstrComplexDeprecationInfos[] = {";
+ Num = 0;
+ for (const CodeGenInstruction *Inst : NumberedInstructions) {
+ if (Num % 8 == 0)
+ OS << "\n ";
+ if (Inst->HasComplexDeprecationPredicate)
+ // Emit a function pointer to the complex predicate method.
+ OS << "&get" << Inst->DeprecatedReason << "DeprecationInfo, ";
+ else
+ OS << "nullptr, ";
+ ++Num;
+ }
+ OS << "\n};\n\n";
+ }
+
+ // MCInstrInfo initialization routine.
+ Records.startTimer("Emit initialization routine");
+ OS << "static inline void Init" << TargetName
+ << "MCInstrInfo(MCInstrInfo *II) {\n";
+ OS << " II->InitMCInstrInfo(" << TargetName << "Insts, " << TargetName
+ << "InstrNameIndices, " << TargetName << "InstrNameData, ";
+ if (HasDeprecationFeatures)
+ OS << TargetName << "InstrDeprecationFeatures, ";
+ else
+ OS << "nullptr, ";
+ if (HasComplexDeprecationInfos)
+ OS << TargetName << "InstrComplexDeprecationInfos, ";
+ else
+ OS << "nullptr, ";
+ OS << NumberedInstructions.size() << ");\n}\n\n";
+
+ OS << "} // end namespace llvm\n";
+
+ OS << "#endif // GET_INSTRINFO_MC_DESC\n\n";
+
+ // Create a TargetInstrInfo subclass to hide the MC layer initialization.
+ OS << "#ifdef GET_INSTRINFO_HEADER\n";
+ OS << "#undef GET_INSTRINFO_HEADER\n";
+
+ std::string ClassName = TargetName + "GenInstrInfo";
+ OS << "namespace llvm {\n";
+ OS << "struct " << ClassName << " : public TargetInstrInfo {\n"
+ << " explicit " << ClassName
+ << "(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u, "
+ "unsigned CatchRetOpcode = ~0u, unsigned ReturnOpcode = ~0u);\n"
+ << " ~" << ClassName << "() override = default;\n";
+
+
+ OS << "\n};\n} // end namespace llvm\n";
+
+ OS << "#endif // GET_INSTRINFO_HEADER\n\n";
+
+ OS << "#ifdef GET_INSTRINFO_HELPER_DECLS\n";
+ OS << "#undef GET_INSTRINFO_HELPER_DECLS\n\n";
+ emitTIIHelperMethods(OS, TargetName, /* ExpandDefinition = */ false);
+ OS << "\n";
+ OS << "#endif // GET_INSTRINFO_HELPER_DECLS\n\n";
+
+ OS << "#ifdef GET_INSTRINFO_HELPERS\n";
+ OS << "#undef GET_INSTRINFO_HELPERS\n\n";
+ emitTIIHelperMethods(OS, TargetName, /* ExpandDefinition = */ true);
+ OS << "#endif // GET_INSTRINFO_HELPERS\n\n";
+
+ OS << "#ifdef GET_INSTRINFO_CTOR_DTOR\n";
+ OS << "#undef GET_INSTRINFO_CTOR_DTOR\n";
+
+ OS << "namespace llvm {\n";
+ OS << "extern const MCInstrDesc " << TargetName << "Insts[];\n";
+ OS << "extern const unsigned " << TargetName << "InstrNameIndices[];\n";
+ OS << "extern const char " << TargetName << "InstrNameData[];\n";
+ if (HasDeprecationFeatures)
+ OS << "extern const uint8_t " << TargetName
+ << "InstrDeprecationFeatures[];\n";
+ if (HasComplexDeprecationInfos)
+ OS << "extern const MCInstrInfo::ComplexDeprecationPredicate " << TargetName
+ << "InstrComplexDeprecationInfos[];\n";
+ OS << ClassName << "::" << ClassName
+ << "(unsigned CFSetupOpcode, unsigned CFDestroyOpcode, unsigned "
+ "CatchRetOpcode, unsigned ReturnOpcode)\n"
+ << " : TargetInstrInfo(CFSetupOpcode, CFDestroyOpcode, CatchRetOpcode, "
+ "ReturnOpcode) {\n"
+ << " InitMCInstrInfo(" << TargetName << "Insts, " << TargetName
+ << "InstrNameIndices, " << TargetName << "InstrNameData, ";
+ if (HasDeprecationFeatures)
+ OS << TargetName << "InstrDeprecationFeatures, ";
+ else
+ OS << "nullptr, ";
+ if (HasComplexDeprecationInfos)
+ OS << TargetName << "InstrComplexDeprecationInfos, ";
+ else
+ OS << "nullptr, ";
+ OS << NumberedInstructions.size() << ");\n}\n";
+ OS << "} // end namespace llvm\n";
+
+ OS << "#endif // GET_INSTRINFO_CTOR_DTOR\n\n";
+
+ Records.startTimer("Emit operand name mappings");
+ emitOperandNameMappings(OS, Target, NumberedInstructions);
+
+ Records.startTimer("Emit operand type mappings");
+ emitOperandTypeMappings(OS, Target, NumberedInstructions);
+
+ Records.startTimer("Emit logical operand size mappings");
+ emitLogicalOperandSizeMappings(OS, TargetName, NumberedInstructions);
+
+ Records.startTimer("Emit logical operand type mappings");
+ emitLogicalOperandTypeMappings(OS, TargetName, NumberedInstructions);
+
+ Records.startTimer("Emit helper methods");
+ emitMCIIHelperMethods(OS, TargetName);
+
+ Records.startTimer("Emit verifier methods");
+ emitFeatureVerifier(OS, Target);
+}
+
+void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num,
+ Record *InstrInfo,
+ std::map<std::vector<Record*>, unsigned> &EmittedLists,
+ const OperandInfoMapTy &OpInfo,
+ raw_ostream &OS) {
+ int MinOperands = 0;
+ if (!Inst.Operands.empty())
+ // Each logical operand can be multiple MI operands.
+ MinOperands = Inst.Operands.back().MIOperandNo +
+ Inst.Operands.back().MINumOperands;
+
+ OS << " { ";
+ OS << Num << ",\t" << MinOperands << ",\t"
+ << Inst.Operands.NumDefs << ",\t"
+ << Inst.TheDef->getValueAsInt("Size") << ",\t"
+ << SchedModels.getSchedClassIdx(Inst) << ",\t"
+ << Inst.ImplicitUses.size() << ",\t"
+ << Inst.ImplicitDefs.size() << ",\t0";
+
+ CodeGenTarget &Target = CDP.getTargetInfo();
+
+ // Emit all of the target independent flags...
+ if (Inst.isPreISelOpcode) OS << "|(1ULL<<MCID::PreISelOpcode)";
+ if (Inst.isPseudo) OS << "|(1ULL<<MCID::Pseudo)";
+ if (Inst.isMeta) OS << "|(1ULL<<MCID::Meta)";
+ if (Inst.isReturn) OS << "|(1ULL<<MCID::Return)";
+ if (Inst.isEHScopeReturn) OS << "|(1ULL<<MCID::EHScopeReturn)";
+ if (Inst.isBranch) OS << "|(1ULL<<MCID::Branch)";
+ if (Inst.isIndirectBranch) OS << "|(1ULL<<MCID::IndirectBranch)";
+ if (Inst.isCompare) OS << "|(1ULL<<MCID::Compare)";
+ if (Inst.isMoveImm) OS << "|(1ULL<<MCID::MoveImm)";
+ if (Inst.isMoveReg) OS << "|(1ULL<<MCID::MoveReg)";
+ if (Inst.isBitcast) OS << "|(1ULL<<MCID::Bitcast)";
+ if (Inst.isAdd) OS << "|(1ULL<<MCID::Add)";
+ if (Inst.isTrap) OS << "|(1ULL<<MCID::Trap)";
+ if (Inst.isSelect) OS << "|(1ULL<<MCID::Select)";
+ if (Inst.isBarrier) OS << "|(1ULL<<MCID::Barrier)";
+ if (Inst.hasDelaySlot) OS << "|(1ULL<<MCID::DelaySlot)";
+ if (Inst.isCall) OS << "|(1ULL<<MCID::Call)";
+ if (Inst.canFoldAsLoad) OS << "|(1ULL<<MCID::FoldableAsLoad)";
+ if (Inst.mayLoad) OS << "|(1ULL<<MCID::MayLoad)";
+ if (Inst.mayStore) OS << "|(1ULL<<MCID::MayStore)";
+ if (Inst.mayRaiseFPException) OS << "|(1ULL<<MCID::MayRaiseFPException)";
+ if (Inst.isPredicable) OS << "|(1ULL<<MCID::Predicable)";
+ if (Inst.isConvertibleToThreeAddress) OS << "|(1ULL<<MCID::ConvertibleTo3Addr)";
+ if (Inst.isCommutable) OS << "|(1ULL<<MCID::Commutable)";
+ if (Inst.isTerminator) OS << "|(1ULL<<MCID::Terminator)";
+ if (Inst.isReMaterializable) OS << "|(1ULL<<MCID::Rematerializable)";
+ if (Inst.isNotDuplicable) OS << "|(1ULL<<MCID::NotDuplicable)";
+ if (Inst.Operands.hasOptionalDef) OS << "|(1ULL<<MCID::HasOptionalDef)";
+ if (Inst.usesCustomInserter) OS << "|(1ULL<<MCID::UsesCustomInserter)";
+ if (Inst.hasPostISelHook) OS << "|(1ULL<<MCID::HasPostISelHook)";
+ if (Inst.Operands.isVariadic)OS << "|(1ULL<<MCID::Variadic)";
+ if (Inst.hasSideEffects) OS << "|(1ULL<<MCID::UnmodeledSideEffects)";
+ if (Inst.isAsCheapAsAMove) OS << "|(1ULL<<MCID::CheapAsAMove)";
+ if (!Target.getAllowRegisterRenaming() || Inst.hasExtraSrcRegAllocReq)
+ OS << "|(1ULL<<MCID::ExtraSrcRegAllocReq)";
+ if (!Target.getAllowRegisterRenaming() || Inst.hasExtraDefRegAllocReq)
+ OS << "|(1ULL<<MCID::ExtraDefRegAllocReq)";
+ if (Inst.isRegSequence) OS << "|(1ULL<<MCID::RegSequence)";
+ if (Inst.isExtractSubreg) OS << "|(1ULL<<MCID::ExtractSubreg)";
+ if (Inst.isInsertSubreg) OS << "|(1ULL<<MCID::InsertSubreg)";
+ if (Inst.isConvergent) OS << "|(1ULL<<MCID::Convergent)";
+ if (Inst.variadicOpsAreDefs) OS << "|(1ULL<<MCID::VariadicOpsAreDefs)";
+ if (Inst.isAuthenticated) OS << "|(1ULL<<MCID::Authenticated)";
+
+ // Emit all of the target-specific flags...
+ BitsInit *TSF = Inst.TheDef->getValueAsBitsInit("TSFlags");
+ if (!TSF)
+ PrintFatalError(Inst.TheDef->getLoc(), "no TSFlags?");
+ uint64_t Value = 0;
+ for (unsigned i = 0, e = TSF->getNumBits(); i != e; ++i) {
+ if (const auto *Bit = dyn_cast<BitInit>(TSF->getBit(i)))
+ Value |= uint64_t(Bit->getValue()) << i;
+ else
+ PrintFatalError(Inst.TheDef->getLoc(),
+ "Invalid TSFlags bit in " + Inst.TheDef->getName());
+ }
+ OS << ", 0x";
+ OS.write_hex(Value);
+ OS << "ULL, ";
+
+ // Emit the implicit use/def list...
+ std::vector<Record *> ImplicitOps = Inst.ImplicitUses;
+ llvm::append_range(ImplicitOps, Inst.ImplicitDefs);
+ if (ImplicitOps.empty())
+ OS << "nullptr, ";
+ else
+ OS << "ImplicitList" << EmittedLists[ImplicitOps] << ", ";
+
+ // Emit the operand info.
+ std::vector<std::string> OperandInfo = GetOperandInfo(Inst);
+ if (OperandInfo.empty())
+ OS << "nullptr";
+ else
+ OS << "OperandInfo" << OpInfo.find(OperandInfo)->second;
+
+ OS << " }, // Inst #" << Num << " = " << Inst.TheDef->getName() << "\n";
+}
+
+// emitEnums - Print out enum values for all of the instructions.
+void InstrInfoEmitter::emitEnums(raw_ostream &OS) {
+ OS << "#ifdef GET_INSTRINFO_ENUM\n";
+ OS << "#undef GET_INSTRINFO_ENUM\n";
+
+ OS << "namespace llvm {\n\n";
+
+ const CodeGenTarget &Target = CDP.getTargetInfo();
+
+ // We must emit the PHI opcode first...
+ StringRef Namespace = Target.getInstNamespace();
+
+ if (Namespace.empty())
+ PrintFatalError("No instructions defined!");
+
+ OS << "namespace " << Namespace << " {\n";
+ OS << " enum {\n";
+ unsigned Num = 0;
+ for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue())
+ OS << " " << Inst->TheDef->getName() << "\t= " << Num++ << ",\n";
+ OS << " INSTRUCTION_LIST_END = " << Num << "\n";
+ OS << " };\n\n";
+ OS << "} // end namespace " << Namespace << "\n";
+ OS << "} // end namespace llvm\n";
+ OS << "#endif // GET_INSTRINFO_ENUM\n\n";
+
+ OS << "#ifdef GET_INSTRINFO_SCHED_ENUM\n";
+ OS << "#undef GET_INSTRINFO_SCHED_ENUM\n";
+ OS << "namespace llvm {\n\n";
+ OS << "namespace " << Namespace << " {\n";
+ OS << "namespace Sched {\n";
+ OS << " enum {\n";
+ Num = 0;
+ for (const auto &Class : SchedModels.explicit_classes())
+ OS << " " << Class.Name << "\t= " << Num++ << ",\n";
+ OS << " SCHED_LIST_END = " << Num << "\n";
+ OS << " };\n";
+ OS << "} // end namespace Sched\n";
+ OS << "} // end namespace " << Namespace << "\n";
+ OS << "} // end namespace llvm\n";
+
+ OS << "#endif // GET_INSTRINFO_SCHED_ENUM\n\n";
+}
+
+namespace llvm {
+
+void EmitInstrInfo(RecordKeeper &RK, raw_ostream &OS) {
+ RK.startTimer("Analyze DAG patterns");
+ InstrInfoEmitter(RK).run(OS);
+ RK.startTimer("Emit map table");
+ EmitMapTable(RK, OS);
+}
+
+} // end namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/IntrinsicEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/IntrinsicEmitter.cpp
new file mode 100644
index 0000000000..946a584175
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/IntrinsicEmitter.cpp
@@ -0,0 +1,961 @@
+//===- IntrinsicEmitter.cpp - Generate intrinsic information --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend emits information about intrinsic functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenIntrinsics.h"
+#include "CodeGenTarget.h"
+#include "SequenceToOffsetTable.h"
+#include "TableGenBackends.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/StringToOffsetTable.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include <algorithm>
+using namespace llvm;
+
+cl::OptionCategory GenIntrinsicCat("Options for -gen-intrinsic-enums");
+cl::opt<std::string>
+ IntrinsicPrefix("intrinsic-prefix",
+ cl::desc("Generate intrinsics with this target prefix"),
+ cl::value_desc("target prefix"), cl::cat(GenIntrinsicCat));
+
+namespace {
+class IntrinsicEmitter {
+ RecordKeeper &Records;
+
+public:
+ IntrinsicEmitter(RecordKeeper &R) : Records(R) {}
+
+ void run(raw_ostream &OS, bool Enums);
+
+ void EmitEnumInfo(const CodeGenIntrinsicTable &Ints, raw_ostream &OS);
+ void EmitTargetInfo(const CodeGenIntrinsicTable &Ints, raw_ostream &OS);
+ void EmitIntrinsicToNameTable(const CodeGenIntrinsicTable &Ints,
+ raw_ostream &OS);
+ void EmitIntrinsicToOverloadTable(const CodeGenIntrinsicTable &Ints,
+ raw_ostream &OS);
+ void EmitGenerator(const CodeGenIntrinsicTable &Ints, raw_ostream &OS);
+ void EmitAttributes(const CodeGenIntrinsicTable &Ints, raw_ostream &OS);
+ void EmitIntrinsicToBuiltinMap(const CodeGenIntrinsicTable &Ints, bool IsClang,
+ raw_ostream &OS);
+};
+} // End anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// IntrinsicEmitter Implementation
+//===----------------------------------------------------------------------===//
+
+void IntrinsicEmitter::run(raw_ostream &OS, bool Enums) {
+ emitSourceFileHeader("Intrinsic Function Source Fragment", OS);
+
+ CodeGenIntrinsicTable Ints(Records);
+
+ if (Enums) {
+ // Emit the enum information.
+ EmitEnumInfo(Ints, OS);
+ } else {
+ // Emit the target metadata.
+ EmitTargetInfo(Ints, OS);
+
+ // Emit the intrinsic ID -> name table.
+ EmitIntrinsicToNameTable(Ints, OS);
+
+ // Emit the intrinsic ID -> overload table.
+ EmitIntrinsicToOverloadTable(Ints, OS);
+
+ // Emit the intrinsic declaration generator.
+ EmitGenerator(Ints, OS);
+
+ // Emit the intrinsic parameter attributes.
+ EmitAttributes(Ints, OS);
+
+ // Emit code to translate GCC builtins into LLVM intrinsics.
+ EmitIntrinsicToBuiltinMap(Ints, true, OS);
+
+ // Emit code to translate MS builtins into LLVM intrinsics.
+ EmitIntrinsicToBuiltinMap(Ints, false, OS);
+ }
+}
+
+void IntrinsicEmitter::EmitEnumInfo(const CodeGenIntrinsicTable &Ints,
+ raw_ostream &OS) {
+ // Find the TargetSet for which to generate enums. There will be an initial
+ // set with an empty target prefix which will include target independent
+ // intrinsics like dbg.value.
+ const CodeGenIntrinsicTable::TargetSet *Set = nullptr;
+ for (const auto &Target : Ints.Targets) {
+ if (Target.Name == IntrinsicPrefix) {
+ Set = &Target;
+ break;
+ }
+ }
+ if (!Set) {
+ std::vector<std::string> KnownTargets;
+ for (const auto &Target : Ints.Targets)
+ if (!Target.Name.empty())
+ KnownTargets.push_back(Target.Name);
+ PrintFatalError("tried to generate intrinsics for unknown target " +
+ IntrinsicPrefix +
+ "\nKnown targets are: " + join(KnownTargets, ", ") + "\n");
+ }
+
+ // Generate a complete header for target specific intrinsics.
+ if (!IntrinsicPrefix.empty()) {
+ std::string UpperPrefix = StringRef(IntrinsicPrefix).upper();
+ OS << "#ifndef LLVM_IR_INTRINSIC_" << UpperPrefix << "_ENUMS_H\n";
+ OS << "#define LLVM_IR_INTRINSIC_" << UpperPrefix << "_ENUMS_H\n\n";
+ OS << "namespace llvm {\n";
+ OS << "namespace Intrinsic {\n";
+ OS << "enum " << UpperPrefix << "Intrinsics : unsigned {\n";
+ }
+
+ OS << "// Enum values for intrinsics\n";
+ for (unsigned i = Set->Offset, e = Set->Offset + Set->Count; i != e; ++i) {
+ OS << " " << Ints[i].EnumName;
+
+ // Assign a value to the first intrinsic in this target set so that all
+ // intrinsic ids are distinct.
+ if (i == Set->Offset)
+ OS << " = " << (Set->Offset + 1);
+
+ OS << ", ";
+ if (Ints[i].EnumName.size() < 40)
+ OS.indent(40 - Ints[i].EnumName.size());
+ OS << " // " << Ints[i].Name << "\n";
+ }
+
+ // Emit num_intrinsics into the target neutral enum.
+ if (IntrinsicPrefix.empty()) {
+ OS << " num_intrinsics = " << (Ints.size() + 1) << "\n";
+ } else {
+ OS << "}; // enum\n";
+ OS << "} // namespace Intrinsic\n";
+ OS << "} // namespace llvm\n\n";
+ OS << "#endif\n";
+ }
+}
+
+void IntrinsicEmitter::EmitTargetInfo(const CodeGenIntrinsicTable &Ints,
+ raw_ostream &OS) {
+ OS << "// Target mapping\n";
+ OS << "#ifdef GET_INTRINSIC_TARGET_DATA\n";
+ OS << "struct IntrinsicTargetInfo {\n"
+ << " llvm::StringLiteral Name;\n"
+ << " size_t Offset;\n"
+ << " size_t Count;\n"
+ << "};\n";
+ OS << "static constexpr IntrinsicTargetInfo TargetInfos[] = {\n";
+ for (auto Target : Ints.Targets)
+ OS << " {llvm::StringLiteral(\"" << Target.Name << "\"), " << Target.Offset
+ << ", " << Target.Count << "},\n";
+ OS << "};\n";
+ OS << "#endif\n\n";
+}
+
+void IntrinsicEmitter::EmitIntrinsicToNameTable(
+ const CodeGenIntrinsicTable &Ints, raw_ostream &OS) {
+ OS << "// Intrinsic ID to name table\n";
+ OS << "#ifdef GET_INTRINSIC_NAME_TABLE\n";
+ OS << " // Note that entry #0 is the invalid intrinsic!\n";
+ for (unsigned i = 0, e = Ints.size(); i != e; ++i)
+ OS << " \"" << Ints[i].Name << "\",\n";
+ OS << "#endif\n\n";
+}
+
+void IntrinsicEmitter::EmitIntrinsicToOverloadTable(
+ const CodeGenIntrinsicTable &Ints, raw_ostream &OS) {
+ OS << "// Intrinsic ID to overload bitset\n";
+ OS << "#ifdef GET_INTRINSIC_OVERLOAD_TABLE\n";
+ OS << "static const uint8_t OTable[] = {\n";
+ OS << " 0";
+ for (unsigned i = 0, e = Ints.size(); i != e; ++i) {
+ // Add one to the index so we emit a null bit for the invalid #0 intrinsic.
+ if ((i+1)%8 == 0)
+ OS << ",\n 0";
+ if (Ints[i].isOverloaded)
+ OS << " | (1<<" << (i+1)%8 << ')';
+ }
+ OS << "\n};\n\n";
+ // OTable contains a true bit at the position if the intrinsic is overloaded.
+ OS << "return (OTable[id/8] & (1 << (id%8))) != 0;\n";
+ OS << "#endif\n\n";
+}
+
+
+// NOTE: This must be kept in synch with the copy in lib/IR/Function.cpp!
+enum IIT_Info {
+ // Common values should be encoded with 0-15.
+ IIT_Done = 0,
+ IIT_I1 = 1,
+ IIT_I8 = 2,
+ IIT_I16 = 3,
+ IIT_I32 = 4,
+ IIT_I64 = 5,
+ IIT_F16 = 6,
+ IIT_F32 = 7,
+ IIT_F64 = 8,
+ IIT_V2 = 9,
+ IIT_V4 = 10,
+ IIT_V8 = 11,
+ IIT_V16 = 12,
+ IIT_V32 = 13,
+ IIT_PTR = 14,
+ IIT_ARG = 15,
+
+ // Values from 16+ are only encodable with the inefficient encoding.
+ IIT_V64 = 16,
+ IIT_MMX = 17,
+ IIT_TOKEN = 18,
+ IIT_METADATA = 19,
+ IIT_EMPTYSTRUCT = 20,
+ IIT_STRUCT2 = 21,
+ IIT_STRUCT3 = 22,
+ IIT_STRUCT4 = 23,
+ IIT_STRUCT5 = 24,
+ IIT_EXTEND_ARG = 25,
+ IIT_TRUNC_ARG = 26,
+ IIT_ANYPTR = 27,
+ IIT_V1 = 28,
+ IIT_VARARG = 29,
+ IIT_HALF_VEC_ARG = 30,
+ IIT_SAME_VEC_WIDTH_ARG = 31,
+ IIT_PTR_TO_ARG = 32,
+ IIT_PTR_TO_ELT = 33,
+ IIT_VEC_OF_ANYPTRS_TO_ELT = 34,
+ IIT_I128 = 35,
+ IIT_V512 = 36,
+ IIT_V1024 = 37,
+ IIT_STRUCT6 = 38,
+ IIT_STRUCT7 = 39,
+ IIT_STRUCT8 = 40,
+ IIT_F128 = 41,
+ IIT_VEC_ELEMENT = 42,
+ IIT_SCALABLE_VEC = 43,
+ IIT_SUBDIVIDE2_ARG = 44,
+ IIT_SUBDIVIDE4_ARG = 45,
+ IIT_VEC_OF_BITCASTS_TO_INT = 46,
+ IIT_V128 = 47,
+ IIT_BF16 = 48,
+ IIT_STRUCT9 = 49,
+ IIT_V256 = 50,
+ IIT_AMX = 51,
+ IIT_PPCF128 = 52,
+ IIT_V3 = 53,
+ IIT_EXTERNREF = 54,
+ IIT_FUNCREF = 55,
+ IIT_ANYPTR_TO_ELT = 56,
+ IIT_I2 = 57,
+ IIT_I4 = 58,
+};
+
+static void EncodeFixedValueType(MVT::SimpleValueType VT,
+ std::vector<unsigned char> &Sig) {
+ // clang-format off
+ if (MVT(VT).isInteger()) {
+ unsigned BitWidth = MVT(VT).getFixedSizeInBits();
+ switch (BitWidth) {
+ default: PrintFatalError("unhandled integer type width in intrinsic!");
+ case 1: return Sig.push_back(IIT_I1);
+ case 2: return Sig.push_back(IIT_I2);
+ case 4: return Sig.push_back(IIT_I4);
+ case 8: return Sig.push_back(IIT_I8);
+ case 16: return Sig.push_back(IIT_I16);
+ case 32: return Sig.push_back(IIT_I32);
+ case 64: return Sig.push_back(IIT_I64);
+ case 128: return Sig.push_back(IIT_I128);
+ }
+ }
+
+ switch (VT) {
+ default: PrintFatalError("unhandled MVT in intrinsic!");
+ case MVT::f16: return Sig.push_back(IIT_F16);
+ case MVT::bf16: return Sig.push_back(IIT_BF16);
+ case MVT::f32: return Sig.push_back(IIT_F32);
+ case MVT::f64: return Sig.push_back(IIT_F64);
+ case MVT::f128: return Sig.push_back(IIT_F128);
+ case MVT::ppcf128: return Sig.push_back(IIT_PPCF128);
+ case MVT::token: return Sig.push_back(IIT_TOKEN);
+ case MVT::Metadata: return Sig.push_back(IIT_METADATA);
+ case MVT::x86mmx: return Sig.push_back(IIT_MMX);
+ case MVT::x86amx: return Sig.push_back(IIT_AMX);
+ // MVT::OtherVT is used to mean the empty struct type here.
+ case MVT::Other: return Sig.push_back(IIT_EMPTYSTRUCT);
+ // MVT::isVoid is used to represent varargs here.
+ case MVT::isVoid: return Sig.push_back(IIT_VARARG);
+ case MVT::externref:
+ return Sig.push_back(IIT_EXTERNREF);
+ case MVT::funcref:
+ return Sig.push_back(IIT_FUNCREF);
+ }
+ // clang-format on
+}
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#pragma optimize("",off) // MSVC 2015 optimizer can't deal with this function.
+#endif
+
+static void EncodeFixedType(Record *R, std::vector<unsigned char> &ArgCodes,
+ unsigned &NextArgCode,
+ std::vector<unsigned char> &Sig,
+ ArrayRef<unsigned char> Mapping) {
+
+ if (R->isSubClassOf("LLVMMatchType")) {
+ unsigned Number = Mapping[R->getValueAsInt("Number")];
+ assert(Number < ArgCodes.size() && "Invalid matching number!");
+ if (R->isSubClassOf("LLVMExtendedType"))
+ Sig.push_back(IIT_EXTEND_ARG);
+ else if (R->isSubClassOf("LLVMTruncatedType"))
+ Sig.push_back(IIT_TRUNC_ARG);
+ else if (R->isSubClassOf("LLVMHalfElementsVectorType"))
+ Sig.push_back(IIT_HALF_VEC_ARG);
+ else if (R->isSubClassOf("LLVMScalarOrSameVectorWidth")) {
+ Sig.push_back(IIT_SAME_VEC_WIDTH_ARG);
+ Sig.push_back((Number << 3) | ArgCodes[Number]);
+ MVT::SimpleValueType VT = getValueType(R->getValueAsDef("ElTy"));
+ EncodeFixedValueType(VT, Sig);
+ return;
+ }
+ else if (R->isSubClassOf("LLVMPointerTo"))
+ Sig.push_back(IIT_PTR_TO_ARG);
+ else if (R->isSubClassOf("LLVMVectorOfAnyPointersToElt")) {
+ Sig.push_back(IIT_VEC_OF_ANYPTRS_TO_ELT);
+ // Encode overloaded ArgNo
+ Sig.push_back(NextArgCode++);
+ // Encode LLVMMatchType<Number> ArgNo
+ Sig.push_back(Number);
+ return;
+ } else if (R->isSubClassOf("LLVMAnyPointerToElt")) {
+ Sig.push_back(IIT_ANYPTR_TO_ELT);
+ // Encode overloaded ArgNo
+ Sig.push_back(NextArgCode++);
+ // Encode LLVMMatchType<Number> ArgNo
+ Sig.push_back(Number);
+ return;
+ } else if (R->isSubClassOf("LLVMPointerToElt"))
+ Sig.push_back(IIT_PTR_TO_ELT);
+ else if (R->isSubClassOf("LLVMVectorElementType"))
+ Sig.push_back(IIT_VEC_ELEMENT);
+ else if (R->isSubClassOf("LLVMSubdivide2VectorType"))
+ Sig.push_back(IIT_SUBDIVIDE2_ARG);
+ else if (R->isSubClassOf("LLVMSubdivide4VectorType"))
+ Sig.push_back(IIT_SUBDIVIDE4_ARG);
+ else if (R->isSubClassOf("LLVMVectorOfBitcastsToInt"))
+ Sig.push_back(IIT_VEC_OF_BITCASTS_TO_INT);
+ else
+ Sig.push_back(IIT_ARG);
+ return Sig.push_back((Number << 3) | 7 /*IITDescriptor::AK_MatchType*/);
+ }
+
+ MVT::SimpleValueType VT = getValueType(R->getValueAsDef("VT"));
+
+ unsigned Tmp = 0;
+ switch (VT) {
+ default: break;
+ case MVT::iPTRAny: ++Tmp; [[fallthrough]];
+ case MVT::vAny: ++Tmp; [[fallthrough]];
+ case MVT::fAny: ++Tmp; [[fallthrough]];
+ case MVT::iAny: ++Tmp; [[fallthrough]];
+ case MVT::Any: {
+ // If this is an "any" valuetype, then the type is the type of the next
+ // type in the list specified to getIntrinsic().
+ Sig.push_back(IIT_ARG);
+
+ // Figure out what arg # this is consuming, and remember what kind it was.
+ assert(NextArgCode < ArgCodes.size() && ArgCodes[NextArgCode] == Tmp &&
+ "Invalid or no ArgCode associated with overloaded VT!");
+ unsigned ArgNo = NextArgCode++;
+
+ // Encode what sort of argument it must be in the low 3 bits of the ArgNo.
+ return Sig.push_back((ArgNo << 3) | Tmp);
+ }
+
+ case MVT::iPTR: {
+ unsigned AddrSpace = 0;
+ if (R->isSubClassOf("LLVMQualPointerType")) {
+ AddrSpace = R->getValueAsInt("AddrSpace");
+ assert(AddrSpace < 256 && "Address space exceeds 255");
+ }
+ if (AddrSpace) {
+ Sig.push_back(IIT_ANYPTR);
+ Sig.push_back(AddrSpace);
+ } else {
+ Sig.push_back(IIT_PTR);
+ }
+ return EncodeFixedType(R->getValueAsDef("ElTy"), ArgCodes, NextArgCode, Sig,
+ Mapping);
+ }
+ }
+
+ if (MVT(VT).isVector()) {
+ MVT VVT = VT;
+ if (VVT.isScalableVector())
+ Sig.push_back(IIT_SCALABLE_VEC);
+ switch (VVT.getVectorMinNumElements()) {
+ default: PrintFatalError("unhandled vector type width in intrinsic!");
+ case 1: Sig.push_back(IIT_V1); break;
+ case 2: Sig.push_back(IIT_V2); break;
+ case 3: Sig.push_back(IIT_V3); break;
+ case 4: Sig.push_back(IIT_V4); break;
+ case 8: Sig.push_back(IIT_V8); break;
+ case 16: Sig.push_back(IIT_V16); break;
+ case 32: Sig.push_back(IIT_V32); break;
+ case 64: Sig.push_back(IIT_V64); break;
+ case 128: Sig.push_back(IIT_V128); break;
+ case 256: Sig.push_back(IIT_V256); break;
+ case 512: Sig.push_back(IIT_V512); break;
+ case 1024: Sig.push_back(IIT_V1024); break;
+ }
+
+ return EncodeFixedValueType(VVT.getVectorElementType().SimpleTy, Sig);
+ }
+
+ EncodeFixedValueType(VT, Sig);
+}
+
+static void UpdateArgCodes(Record *R, std::vector<unsigned char> &ArgCodes,
+ unsigned int &NumInserted,
+ SmallVectorImpl<unsigned char> &Mapping) {
+ if (R->isSubClassOf("LLVMMatchType")) {
+ if (R->isSubClassOf("LLVMVectorOfAnyPointersToElt")) {
+ ArgCodes.push_back(3 /*vAny*/);
+ ++NumInserted;
+ } else if (R->isSubClassOf("LLVMAnyPointerToElt")) {
+ ArgCodes.push_back(4 /*iPTRAny*/);
+ ++NumInserted;
+ }
+ return;
+ }
+
+ unsigned Tmp = 0;
+ switch (getValueType(R->getValueAsDef("VT"))) {
+ default: break;
+ case MVT::iPTR:
+ UpdateArgCodes(R->getValueAsDef("ElTy"), ArgCodes, NumInserted, Mapping);
+ break;
+ case MVT::iPTRAny:
+ ++Tmp;
+ [[fallthrough]];
+ case MVT::vAny:
+ ++Tmp;
+ [[fallthrough]];
+ case MVT::fAny:
+ ++Tmp;
+ [[fallthrough]];
+ case MVT::iAny:
+ ++Tmp;
+ [[fallthrough]];
+ case MVT::Any:
+ unsigned OriginalIdx = ArgCodes.size() - NumInserted;
+ assert(OriginalIdx >= Mapping.size());
+ Mapping.resize(OriginalIdx+1);
+ Mapping[OriginalIdx] = ArgCodes.size();
+ ArgCodes.push_back(Tmp);
+ break;
+ }
+}
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#pragma optimize("",on)
+#endif
+
+/// ComputeFixedEncoding - If we can encode the type signature for this
+/// intrinsic into 32 bits, return it. If not, return ~0U.
+static void ComputeFixedEncoding(const CodeGenIntrinsic &Int,
+ std::vector<unsigned char> &TypeSig) {
+ std::vector<unsigned char> ArgCodes;
+
+ // Add codes for any overloaded result VTs.
+ unsigned int NumInserted = 0;
+ SmallVector<unsigned char, 8> ArgMapping;
+ for (unsigned i = 0, e = Int.IS.RetVTs.size(); i != e; ++i)
+ UpdateArgCodes(Int.IS.RetTypeDefs[i], ArgCodes, NumInserted, ArgMapping);
+
+ // Add codes for any overloaded operand VTs.
+ for (unsigned i = 0, e = Int.IS.ParamTypeDefs.size(); i != e; ++i)
+ UpdateArgCodes(Int.IS.ParamTypeDefs[i], ArgCodes, NumInserted, ArgMapping);
+
+ unsigned NextArgCode = 0;
+ if (Int.IS.RetVTs.empty())
+ TypeSig.push_back(IIT_Done);
+ else if (Int.IS.RetVTs.size() == 1 &&
+ Int.IS.RetVTs[0] == MVT::isVoid)
+ TypeSig.push_back(IIT_Done);
+ else {
+ switch (Int.IS.RetVTs.size()) {
+ case 1: break;
+ case 2: TypeSig.push_back(IIT_STRUCT2); break;
+ case 3: TypeSig.push_back(IIT_STRUCT3); break;
+ case 4: TypeSig.push_back(IIT_STRUCT4); break;
+ case 5: TypeSig.push_back(IIT_STRUCT5); break;
+ case 6: TypeSig.push_back(IIT_STRUCT6); break;
+ case 7: TypeSig.push_back(IIT_STRUCT7); break;
+ case 8: TypeSig.push_back(IIT_STRUCT8); break;
+ case 9: TypeSig.push_back(IIT_STRUCT9); break;
+ default: llvm_unreachable("Unhandled case in struct");
+ }
+
+ for (unsigned i = 0, e = Int.IS.RetVTs.size(); i != e; ++i)
+ EncodeFixedType(Int.IS.RetTypeDefs[i], ArgCodes, NextArgCode, TypeSig,
+ ArgMapping);
+ }
+
+ for (unsigned i = 0, e = Int.IS.ParamTypeDefs.size(); i != e; ++i)
+ EncodeFixedType(Int.IS.ParamTypeDefs[i], ArgCodes, NextArgCode, TypeSig,
+ ArgMapping);
+}
+
+static void printIITEntry(raw_ostream &OS, unsigned char X) {
+ OS << (unsigned)X;
+}
+
+void IntrinsicEmitter::EmitGenerator(const CodeGenIntrinsicTable &Ints,
+ raw_ostream &OS) {
+ // If we can compute a 32-bit fixed encoding for this intrinsic, do so and
+ // capture it in this vector, otherwise store a ~0U.
+ std::vector<unsigned> FixedEncodings;
+
+ SequenceToOffsetTable<std::vector<unsigned char> > LongEncodingTable;
+
+ std::vector<unsigned char> TypeSig;
+
+ // Compute the unique argument type info.
+ for (unsigned i = 0, e = Ints.size(); i != e; ++i) {
+ // Get the signature for the intrinsic.
+ TypeSig.clear();
+ ComputeFixedEncoding(Ints[i], TypeSig);
+
+ // Check to see if we can encode it into a 32-bit word. We can only encode
+ // 8 nibbles into a 32-bit word.
+ if (TypeSig.size() <= 8) {
+ bool Failed = false;
+ unsigned Result = 0;
+ for (unsigned i = 0, e = TypeSig.size(); i != e; ++i) {
+ // If we had an unencodable argument, bail out.
+ if (TypeSig[i] > 15) {
+ Failed = true;
+ break;
+ }
+ Result = (Result << 4) | TypeSig[e-i-1];
+ }
+
+ // If this could be encoded into a 31-bit word, return it.
+ if (!Failed && (Result >> 31) == 0) {
+ FixedEncodings.push_back(Result);
+ continue;
+ }
+ }
+
+ // Otherwise, we're going to unique the sequence into the
+ // LongEncodingTable, and use its offset in the 32-bit table instead.
+ LongEncodingTable.add(TypeSig);
+
+ // This is a placehold that we'll replace after the table is laid out.
+ FixedEncodings.push_back(~0U);
+ }
+
+ LongEncodingTable.layout();
+
+ OS << "// Global intrinsic function declaration type table.\n";
+ OS << "#ifdef GET_INTRINSIC_GENERATOR_GLOBAL\n";
+
+ OS << "static const unsigned IIT_Table[] = {\n ";
+
+ for (unsigned i = 0, e = FixedEncodings.size(); i != e; ++i) {
+ if ((i & 7) == 7)
+ OS << "\n ";
+
+ // If the entry fit in the table, just emit it.
+ if (FixedEncodings[i] != ~0U) {
+ OS << "0x" << Twine::utohexstr(FixedEncodings[i]) << ", ";
+ continue;
+ }
+
+ TypeSig.clear();
+ ComputeFixedEncoding(Ints[i], TypeSig);
+
+
+ // Otherwise, emit the offset into the long encoding table. We emit it this
+ // way so that it is easier to read the offset in the .def file.
+ OS << "(1U<<31) | " << LongEncodingTable.get(TypeSig) << ", ";
+ }
+
+ OS << "0\n};\n\n";
+
+ // Emit the shared table of register lists.
+ OS << "static const unsigned char IIT_LongEncodingTable[] = {\n";
+ if (!LongEncodingTable.empty())
+ LongEncodingTable.emit(OS, printIITEntry);
+ OS << " 255\n};\n\n";
+
+ OS << "#endif\n\n"; // End of GET_INTRINSIC_GENERATOR_GLOBAL
+}
+
+namespace {
+std::optional<bool> compareFnAttributes(const CodeGenIntrinsic *L,
+ const CodeGenIntrinsic *R) {
+ // Sort throwing intrinsics after non-throwing intrinsics.
+ if (L->canThrow != R->canThrow)
+ return R->canThrow;
+
+ if (L->isNoDuplicate != R->isNoDuplicate)
+ return R->isNoDuplicate;
+
+ if (L->isNoMerge != R->isNoMerge)
+ return R->isNoMerge;
+
+ if (L->isNoReturn != R->isNoReturn)
+ return R->isNoReturn;
+
+ if (L->isNoCallback != R->isNoCallback)
+ return R->isNoCallback;
+
+ if (L->isNoSync != R->isNoSync)
+ return R->isNoSync;
+
+ if (L->isNoFree != R->isNoFree)
+ return R->isNoFree;
+
+ if (L->isWillReturn != R->isWillReturn)
+ return R->isWillReturn;
+
+ if (L->isCold != R->isCold)
+ return R->isCold;
+
+ if (L->isConvergent != R->isConvergent)
+ return R->isConvergent;
+
+ if (L->isSpeculatable != R->isSpeculatable)
+ return R->isSpeculatable;
+
+ if (L->hasSideEffects != R->hasSideEffects)
+ return R->hasSideEffects;
+
+ // Try to order by readonly/readnone attribute.
+ uint32_t LK = L->ME.toIntValue();
+ uint32_t RK = R->ME.toIntValue();
+ if (LK != RK) return (LK > RK);
+
+ return std::nullopt;
+}
+
+struct FnAttributeComparator {
+ bool operator()(const CodeGenIntrinsic *L, const CodeGenIntrinsic *R) const {
+ return compareFnAttributes(L, R).value_or(false);
+ }
+};
+
+struct AttributeComparator {
+ bool operator()(const CodeGenIntrinsic *L, const CodeGenIntrinsic *R) const {
+ if (std::optional<bool> Res = compareFnAttributes(L, R))
+ return *Res;
+
+ // Order by argument attributes.
+ // This is reliable because each side is already sorted internally.
+ return (L->ArgumentAttributes < R->ArgumentAttributes);
+ }
+};
+} // End anonymous namespace
+
+/// EmitAttributes - This emits the Intrinsic::getAttributes method.
+void IntrinsicEmitter::EmitAttributes(const CodeGenIntrinsicTable &Ints,
+ raw_ostream &OS) {
+ OS << "// Add parameter attributes that are not common to all intrinsics.\n";
+ OS << "#ifdef GET_INTRINSIC_ATTRIBUTES\n";
+
+ // Compute unique argument attribute sets.
+ std::map<SmallVector<CodeGenIntrinsic::ArgAttribute, 0>, unsigned>
+ UniqArgAttributes;
+ OS << "static AttributeSet getIntrinsicArgAttributeSet("
+ << "LLVMContext &C, unsigned ID) {\n"
+ << " switch (ID) {\n"
+ << " default: llvm_unreachable(\"Invalid attribute set number\");\n";
+ for (const CodeGenIntrinsic &Int : Ints) {
+ for (auto &Attrs : Int.ArgumentAttributes) {
+ if (Attrs.empty())
+ continue;
+
+ unsigned ID = UniqArgAttributes.size();
+ if (!UniqArgAttributes.try_emplace(Attrs, ID).second)
+ continue;
+
+ assert(is_sorted(Attrs) &&
+ "Argument attributes are not sorted");
+
+ OS << " case " << ID << ":\n";
+ OS << " return AttributeSet::get(C, {\n";
+ for (const CodeGenIntrinsic::ArgAttribute &Attr : Attrs) {
+ switch (Attr.Kind) {
+ case CodeGenIntrinsic::NoCapture:
+ OS << " Attribute::get(C, Attribute::NoCapture),\n";
+ break;
+ case CodeGenIntrinsic::NoAlias:
+ OS << " Attribute::get(C, Attribute::NoAlias),\n";
+ break;
+ case CodeGenIntrinsic::NoUndef:
+ OS << " Attribute::get(C, Attribute::NoUndef),\n";
+ break;
+ case CodeGenIntrinsic::NonNull:
+ OS << " Attribute::get(C, Attribute::NonNull),\n";
+ break;
+ case CodeGenIntrinsic::Returned:
+ OS << " Attribute::get(C, Attribute::Returned),\n";
+ break;
+ case CodeGenIntrinsic::ReadOnly:
+ OS << " Attribute::get(C, Attribute::ReadOnly),\n";
+ break;
+ case CodeGenIntrinsic::WriteOnly:
+ OS << " Attribute::get(C, Attribute::WriteOnly),\n";
+ break;
+ case CodeGenIntrinsic::ReadNone:
+ OS << " Attribute::get(C, Attribute::ReadNone),\n";
+ break;
+ case CodeGenIntrinsic::ImmArg:
+ OS << " Attribute::get(C, Attribute::ImmArg),\n";
+ break;
+ case CodeGenIntrinsic::Alignment:
+ OS << " Attribute::get(C, Attribute::Alignment, "
+ << Attr.Value << "),\n";
+ break;
+ }
+ }
+ OS << " });\n";
+ }
+ }
+ OS << " }\n";
+ OS << "}\n\n";
+
+ // Compute unique function attribute sets.
+ std::map<const CodeGenIntrinsic*, unsigned, FnAttributeComparator>
+ UniqFnAttributes;
+ OS << "static AttributeSet getIntrinsicFnAttributeSet("
+ << "LLVMContext &C, unsigned ID) {\n"
+ << " switch (ID) {\n"
+ << " default: llvm_unreachable(\"Invalid attribute set number\");\n";
+ for (const CodeGenIntrinsic &Intrinsic : Ints) {
+ unsigned ID = UniqFnAttributes.size();
+ if (!UniqFnAttributes.try_emplace(&Intrinsic, ID).second)
+ continue;
+
+ OS << " case " << ID << ":\n"
+ << " return AttributeSet::get(C, {\n";
+ if (!Intrinsic.canThrow)
+ OS << " Attribute::get(C, Attribute::NoUnwind),\n";
+ if (Intrinsic.isNoReturn)
+ OS << " Attribute::get(C, Attribute::NoReturn),\n";
+ if (Intrinsic.isNoCallback)
+ OS << " Attribute::get(C, Attribute::NoCallback),\n";
+ if (Intrinsic.isNoSync)
+ OS << " Attribute::get(C, Attribute::NoSync),\n";
+ if (Intrinsic.isNoFree)
+ OS << " Attribute::get(C, Attribute::NoFree),\n";
+ if (Intrinsic.isWillReturn)
+ OS << " Attribute::get(C, Attribute::WillReturn),\n";
+ if (Intrinsic.isCold)
+ OS << " Attribute::get(C, Attribute::Cold),\n";
+ if (Intrinsic.isNoDuplicate)
+ OS << " Attribute::get(C, Attribute::NoDuplicate),\n";
+ if (Intrinsic.isNoMerge)
+ OS << " Attribute::get(C, Attribute::NoMerge),\n";
+ if (Intrinsic.isConvergent)
+ OS << " Attribute::get(C, Attribute::Convergent),\n";
+ if (Intrinsic.isSpeculatable)
+ OS << " Attribute::get(C, Attribute::Speculatable),\n";
+
+ MemoryEffects ME = Intrinsic.ME;
+ // TODO: IntrHasSideEffects should affect not only readnone intrinsics.
+ if (ME.doesNotAccessMemory() && Intrinsic.hasSideEffects)
+ ME = MemoryEffects::unknown();
+ if (ME != MemoryEffects::unknown()) {
+ OS << " Attribute::getWithMemoryEffects(C, "
+ << "MemoryEffects::createFromIntValue(" << ME.toIntValue() << ")),\n";
+ }
+ OS << " });\n";
+ }
+ OS << " }\n";
+ OS << "}\n\n";
+ OS << "AttributeList Intrinsic::getAttributes(LLVMContext &C, ID id) {\n";
+
+ // Compute the maximum number of attribute arguments and the map
+ typedef std::map<const CodeGenIntrinsic*, unsigned,
+ AttributeComparator> UniqAttrMapTy;
+ UniqAttrMapTy UniqAttributes;
+ unsigned maxArgAttrs = 0;
+ unsigned AttrNum = 0;
+ for (unsigned i = 0, e = Ints.size(); i != e; ++i) {
+ const CodeGenIntrinsic &intrinsic = Ints[i];
+ maxArgAttrs =
+ std::max(maxArgAttrs, unsigned(intrinsic.ArgumentAttributes.size()));
+ unsigned &N = UniqAttributes[&intrinsic];
+ if (N) continue;
+ N = ++AttrNum;
+ assert(N < 65536 && "Too many unique attributes for table!");
+ }
+
+ // Emit an array of AttributeList. Most intrinsics will have at least one
+ // entry, for the function itself (index ~1), which is usually nounwind.
+ OS << " static const uint16_t IntrinsicsToAttributesMap[] = {\n";
+
+ for (unsigned i = 0, e = Ints.size(); i != e; ++i) {
+ const CodeGenIntrinsic &intrinsic = Ints[i];
+
+ OS << " " << UniqAttributes[&intrinsic] << ", // "
+ << intrinsic.Name << "\n";
+ }
+ OS << " };\n\n";
+
+ OS << " std::pair<unsigned, AttributeSet> AS[" << maxArgAttrs + 1 << "];\n";
+ OS << " unsigned NumAttrs = 0;\n";
+ OS << " if (id != 0) {\n";
+ OS << " switch(IntrinsicsToAttributesMap[id - 1]) {\n";
+ OS << " default: llvm_unreachable(\"Invalid attribute number\");\n";
+ for (auto UniqAttribute : UniqAttributes) {
+ OS << " case " << UniqAttribute.second << ": {\n";
+
+ const CodeGenIntrinsic &Intrinsic = *(UniqAttribute.first);
+
+ // Keep track of the number of attributes we're writing out.
+ unsigned numAttrs = 0;
+
+ for (const auto &[AttrIdx, Attrs] :
+ enumerate(Intrinsic.ArgumentAttributes)) {
+ if (Attrs.empty())
+ continue;
+
+ unsigned ID = UniqArgAttributes.find(Attrs)->second;
+ OS << " AS[" << numAttrs++ << "] = {" << AttrIdx
+ << ", getIntrinsicArgAttributeSet(C, " << ID << ")};\n";
+ }
+
+ if (!Intrinsic.canThrow ||
+ (Intrinsic.ME != MemoryEffects::unknown() &&
+ !Intrinsic.hasSideEffects) ||
+ Intrinsic.isNoReturn || Intrinsic.isNoCallback || Intrinsic.isNoSync ||
+ Intrinsic.isNoFree || Intrinsic.isWillReturn || Intrinsic.isCold ||
+ Intrinsic.isNoDuplicate || Intrinsic.isNoMerge ||
+ Intrinsic.isConvergent || Intrinsic.isSpeculatable) {
+ unsigned ID = UniqFnAttributes.find(&Intrinsic)->second;
+ OS << " AS[" << numAttrs++ << "] = {AttributeList::FunctionIndex, "
+ << "getIntrinsicFnAttributeSet(C, " << ID << ")};\n";
+ }
+
+ if (numAttrs) {
+ OS << " NumAttrs = " << numAttrs << ";\n";
+ OS << " break;\n";
+ OS << " }\n";
+ } else {
+ OS << " return AttributeList();\n";
+ OS << " }\n";
+ }
+ }
+
+ OS << " }\n";
+ OS << " }\n";
+ OS << " return AttributeList::get(C, ArrayRef(AS, NumAttrs));\n";
+ OS << "}\n";
+ OS << "#endif // GET_INTRINSIC_ATTRIBUTES\n\n";
+}
+
+void IntrinsicEmitter::EmitIntrinsicToBuiltinMap(
+ const CodeGenIntrinsicTable &Ints, bool IsClang, raw_ostream &OS) {
+ StringRef CompilerName = (IsClang ? "Clang" : "MS");
+ StringRef UpperCompilerName = (IsClang ? "CLANG" : "MS");
+ typedef std::map<std::string, std::map<std::string, std::string>> BIMTy;
+ BIMTy BuiltinMap;
+ StringToOffsetTable Table;
+ for (unsigned i = 0, e = Ints.size(); i != e; ++i) {
+ const std::string &BuiltinName =
+ IsClang ? Ints[i].ClangBuiltinName : Ints[i].MSBuiltinName;
+ if (!BuiltinName.empty()) {
+ // Get the map for this target prefix.
+ std::map<std::string, std::string> &BIM =
+ BuiltinMap[Ints[i].TargetPrefix];
+
+ if (!BIM.insert(std::make_pair(BuiltinName, Ints[i].EnumName)).second)
+ PrintFatalError(Ints[i].TheDef->getLoc(),
+ "Intrinsic '" + Ints[i].TheDef->getName() +
+ "': duplicate " + CompilerName + " builtin name!");
+ Table.GetOrAddStringOffset(BuiltinName);
+ }
+ }
+
+ OS << "// Get the LLVM intrinsic that corresponds to a builtin.\n";
+ OS << "// This is used by the C front-end. The builtin name is passed\n";
+ OS << "// in as BuiltinName, and a target prefix (e.g. 'ppc') is passed\n";
+ OS << "// in as TargetPrefix. The result is assigned to 'IntrinsicID'.\n";
+ OS << "#ifdef GET_LLVM_INTRINSIC_FOR_" << UpperCompilerName << "_BUILTIN\n";
+
+ OS << "Intrinsic::ID Intrinsic::getIntrinsicFor" << CompilerName
+ << "Builtin(const char "
+ << "*TargetPrefixStr, StringRef BuiltinNameStr) {\n";
+
+ if (Table.Empty()) {
+ OS << " return Intrinsic::not_intrinsic;\n";
+ OS << "}\n";
+ OS << "#endif\n\n";
+ return;
+ }
+
+ OS << " static const char BuiltinNames[] = {\n";
+ Table.EmitCharArray(OS);
+ OS << " };\n\n";
+
+ OS << " struct BuiltinEntry {\n";
+ OS << " Intrinsic::ID IntrinID;\n";
+ OS << " unsigned StrTabOffset;\n";
+ OS << " const char *getName() const {\n";
+ OS << " return &BuiltinNames[StrTabOffset];\n";
+ OS << " }\n";
+ OS << " bool operator<(StringRef RHS) const {\n";
+ OS << " return strncmp(getName(), RHS.data(), RHS.size()) < 0;\n";
+ OS << " }\n";
+ OS << " };\n";
+
+ OS << " StringRef TargetPrefix(TargetPrefixStr);\n\n";
+
+ // Note: this could emit significantly better code if we cared.
+ for (auto &I : BuiltinMap) {
+ OS << " ";
+ if (!I.first.empty())
+ OS << "if (TargetPrefix == \"" << I.first << "\") ";
+ else
+ OS << "/* Target Independent Builtins */ ";
+ OS << "{\n";
+
+ // Emit the comparisons for this target prefix.
+ OS << " static const BuiltinEntry " << I.first << "Names[] = {\n";
+ for (const auto &P : I.second) {
+ OS << " {Intrinsic::" << P.second << ", "
+ << Table.GetOrAddStringOffset(P.first) << "}, // " << P.first << "\n";
+ }
+ OS << " };\n";
+ OS << " auto I = std::lower_bound(std::begin(" << I.first << "Names),\n";
+ OS << " std::end(" << I.first << "Names),\n";
+ OS << " BuiltinNameStr);\n";
+ OS << " if (I != std::end(" << I.first << "Names) &&\n";
+ OS << " I->getName() == BuiltinNameStr)\n";
+ OS << " return I->IntrinID;\n";
+ OS << " }\n";
+ }
+ OS << " return ";
+ OS << "Intrinsic::not_intrinsic;\n";
+ OS << "}\n";
+ OS << "#endif\n\n";
+}
+
+void llvm::EmitIntrinsicEnums(RecordKeeper &RK, raw_ostream &OS) {
+ IntrinsicEmitter(RK).run(OS, /*Enums=*/true);
+}
+
+void llvm::EmitIntrinsicImpl(RecordKeeper &RK, raw_ostream &OS) {
+ IntrinsicEmitter(RK).run(OS, /*Enums=*/false);
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/OptEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/OptEmitter.cpp
new file mode 100644
index 0000000000..7fcf3074e0
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/OptEmitter.cpp
@@ -0,0 +1,84 @@
+//===- OptEmitter.cpp - Helper for emitting options.----------- -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "OptEmitter.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include <cctype>
+#include <cstring>
+
+namespace llvm {
+
+// Ordering on Info. The logic should match with the consumer-side function in
+// llvm/Option/OptTable.h.
+// FIXME: Make this take StringRefs instead of null terminated strings to
+// simplify callers.
+static int StrCmpOptionName(const char *A, const char *B) {
+ const char *X = A, *Y = B;
+ char a = tolower(*A), b = tolower(*B);
+ while (a == b) {
+ if (a == '\0')
+ return strcmp(A, B);
+
+ a = tolower(*++X);
+ b = tolower(*++Y);
+ }
+
+ if (a == '\0') // A is a prefix of B.
+ return 1;
+ if (b == '\0') // B is a prefix of A.
+ return -1;
+
+ // Otherwise lexicographic.
+ return (a < b) ? -1 : 1;
+}
+
+int CompareOptionRecords(Record *const *Av, Record *const *Bv) {
+ const Record *A = *Av;
+ const Record *B = *Bv;
+
+ // Sentinel options precede all others and are only ordered by precedence.
+ bool ASent = A->getValueAsDef("Kind")->getValueAsBit("Sentinel");
+ bool BSent = B->getValueAsDef("Kind")->getValueAsBit("Sentinel");
+ if (ASent != BSent)
+ return ASent ? -1 : 1;
+
+ // Compare options by name, unless they are sentinels.
+ if (!ASent)
+ if (int Cmp = StrCmpOptionName(A->getValueAsString("Name").str().c_str(),
+ B->getValueAsString("Name").str().c_str()))
+ return Cmp;
+
+ if (!ASent) {
+ std::vector<StringRef> APrefixes = A->getValueAsListOfStrings("Prefixes");
+ std::vector<StringRef> BPrefixes = B->getValueAsListOfStrings("Prefixes");
+
+ for (std::vector<StringRef>::const_iterator APre = APrefixes.begin(),
+ AEPre = APrefixes.end(),
+ BPre = BPrefixes.begin(),
+ BEPre = BPrefixes.end();
+ APre != AEPre && BPre != BEPre; ++APre, ++BPre) {
+ if (int Cmp = StrCmpOptionName(APre->str().c_str(), BPre->str().c_str()))
+ return Cmp;
+ }
+ }
+
+ // Then by the kind precedence;
+ int APrec = A->getValueAsDef("Kind")->getValueAsInt("Precedence");
+ int BPrec = B->getValueAsDef("Kind")->getValueAsInt("Precedence");
+ if (APrec == BPrec && A->getValueAsListOfStrings("Prefixes") ==
+ B->getValueAsListOfStrings("Prefixes")) {
+ PrintError(A->getLoc(), Twine("Option is equivalent to"));
+ PrintError(B->getLoc(), Twine("Other defined here"));
+ PrintFatalError("Equivalent Options found.");
+ }
+ return APrec < BPrec ? -1 : 1;
+}
+
+} // namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/OptEmitter.h b/contrib/libs/llvm16/utils/TableGen/OptEmitter.h
new file mode 100644
index 0000000000..c8f9246ef1
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/OptEmitter.h
@@ -0,0 +1,16 @@
+//===- OptEmitter.h - Helper for emitting options. --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_OPTEMITTER_H
+#define LLVM_UTILS_TABLEGEN_OPTEMITTER_H
+
+namespace llvm {
+class Record;
+int CompareOptionRecords(Record *const *Av, Record *const *Bv);
+} // namespace llvm
+#endif
diff --git a/contrib/libs/llvm16/utils/TableGen/OptParserEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/OptParserEmitter.cpp
new file mode 100644
index 0000000000..d363191bd9
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/OptParserEmitter.cpp
@@ -0,0 +1,502 @@
+//===- OptParserEmitter.cpp - Table Driven Command Line Parsing -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "OptEmitter.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include <cstring>
+#include <map>
+#include <memory>
+
+using namespace llvm;
+
+static std::string getOptionName(const Record &R) {
+ // Use the record name unless EnumName is defined.
+ if (isa<UnsetInit>(R.getValueInit("EnumName")))
+ return std::string(R.getName());
+
+ return std::string(R.getValueAsString("EnumName"));
+}
+
+static raw_ostream &write_cstring(raw_ostream &OS, llvm::StringRef Str) {
+ OS << '"';
+ OS.write_escaped(Str);
+ OS << '"';
+ return OS;
+}
+
+static std::string getOptionSpelling(const Record &R, size_t &PrefixLength) {
+ std::vector<StringRef> Prefixes = R.getValueAsListOfStrings("Prefixes");
+ StringRef Name = R.getValueAsString("Name");
+
+ if (Prefixes.empty()) {
+ PrefixLength = 0;
+ return Name.str();
+ }
+
+ PrefixLength = Prefixes[0].size();
+ return (Twine(Prefixes[0]) + Twine(Name)).str();
+}
+
+static std::string getOptionSpelling(const Record &R) {
+ size_t PrefixLength;
+ return getOptionSpelling(R, PrefixLength);
+}
+
+static void emitNameUsingSpelling(raw_ostream &OS, const Record &R) {
+ size_t PrefixLength;
+ OS << "llvm::StringLiteral(";
+ write_cstring(
+ OS, StringRef(getOptionSpelling(R, PrefixLength)).substr(PrefixLength));
+ OS << ")";
+}
+
+class MarshallingInfo {
+public:
+ static constexpr const char *MacroName = "OPTION_WITH_MARSHALLING";
+ const Record &R;
+ bool ShouldAlwaysEmit;
+ StringRef MacroPrefix;
+ StringRef KeyPath;
+ StringRef DefaultValue;
+ StringRef NormalizedValuesScope;
+ StringRef ImpliedCheck;
+ StringRef ImpliedValue;
+ StringRef ShouldParse;
+ StringRef Normalizer;
+ StringRef Denormalizer;
+ StringRef ValueMerger;
+ StringRef ValueExtractor;
+ int TableIndex = -1;
+ std::vector<StringRef> Values;
+ std::vector<StringRef> NormalizedValues;
+ std::string ValueTableName;
+
+ static size_t NextTableIndex;
+
+ static constexpr const char *ValueTablePreamble = R"(
+struct SimpleEnumValue {
+ const char *Name;
+ unsigned Value;
+};
+
+struct SimpleEnumValueTable {
+ const SimpleEnumValue *Table;
+ unsigned Size;
+};
+)";
+
+ static constexpr const char *ValueTablesDecl =
+ "static const SimpleEnumValueTable SimpleEnumValueTables[] = ";
+
+ MarshallingInfo(const Record &R) : R(R) {}
+
+ std::string getMacroName() const {
+ return (MacroPrefix + MarshallingInfo::MacroName).str();
+ }
+
+ void emit(raw_ostream &OS) const {
+ write_cstring(OS, StringRef(getOptionSpelling(R)));
+ OS << ", ";
+ OS << ShouldParse;
+ OS << ", ";
+ OS << ShouldAlwaysEmit;
+ OS << ", ";
+ OS << KeyPath;
+ OS << ", ";
+ emitScopedNormalizedValue(OS, DefaultValue);
+ OS << ", ";
+ OS << ImpliedCheck;
+ OS << ", ";
+ emitScopedNormalizedValue(OS, ImpliedValue);
+ OS << ", ";
+ OS << Normalizer;
+ OS << ", ";
+ OS << Denormalizer;
+ OS << ", ";
+ OS << ValueMerger;
+ OS << ", ";
+ OS << ValueExtractor;
+ OS << ", ";
+ OS << TableIndex;
+ }
+
+ std::optional<StringRef> emitValueTable(raw_ostream &OS) const {
+ if (TableIndex == -1)
+ return {};
+ OS << "static const SimpleEnumValue " << ValueTableName << "[] = {\n";
+ for (unsigned I = 0, E = Values.size(); I != E; ++I) {
+ OS << "{";
+ write_cstring(OS, Values[I]);
+ OS << ",";
+ OS << "static_cast<unsigned>(";
+ emitScopedNormalizedValue(OS, NormalizedValues[I]);
+ OS << ")},";
+ }
+ OS << "};\n";
+ return StringRef(ValueTableName);
+ }
+
+private:
+ void emitScopedNormalizedValue(raw_ostream &OS,
+ StringRef NormalizedValue) const {
+ if (!NormalizedValuesScope.empty())
+ OS << NormalizedValuesScope << "::";
+ OS << NormalizedValue;
+ }
+};
+
+size_t MarshallingInfo::NextTableIndex = 0;
+
+static MarshallingInfo createMarshallingInfo(const Record &R) {
+ assert(!isa<UnsetInit>(R.getValueInit("KeyPath")) &&
+ !isa<UnsetInit>(R.getValueInit("DefaultValue")) &&
+ !isa<UnsetInit>(R.getValueInit("ValueMerger")) &&
+ "MarshallingInfo must have a provide a keypath, default value and a "
+ "value merger");
+
+ MarshallingInfo Ret(R);
+
+ Ret.ShouldAlwaysEmit = R.getValueAsBit("ShouldAlwaysEmit");
+ Ret.MacroPrefix = R.getValueAsString("MacroPrefix");
+ Ret.KeyPath = R.getValueAsString("KeyPath");
+ Ret.DefaultValue = R.getValueAsString("DefaultValue");
+ Ret.NormalizedValuesScope = R.getValueAsString("NormalizedValuesScope");
+ Ret.ImpliedCheck = R.getValueAsString("ImpliedCheck");
+ Ret.ImpliedValue =
+ R.getValueAsOptionalString("ImpliedValue").value_or(Ret.DefaultValue);
+
+ Ret.ShouldParse = R.getValueAsString("ShouldParse");
+ Ret.Normalizer = R.getValueAsString("Normalizer");
+ Ret.Denormalizer = R.getValueAsString("Denormalizer");
+ Ret.ValueMerger = R.getValueAsString("ValueMerger");
+ Ret.ValueExtractor = R.getValueAsString("ValueExtractor");
+
+ if (!isa<UnsetInit>(R.getValueInit("NormalizedValues"))) {
+ assert(!isa<UnsetInit>(R.getValueInit("Values")) &&
+ "Cannot provide normalized values for value-less options");
+ Ret.TableIndex = MarshallingInfo::NextTableIndex++;
+ Ret.NormalizedValues = R.getValueAsListOfStrings("NormalizedValues");
+ Ret.Values.reserve(Ret.NormalizedValues.size());
+ Ret.ValueTableName = getOptionName(R) + "ValueTable";
+
+ StringRef ValuesStr = R.getValueAsString("Values");
+ for (;;) {
+ size_t Idx = ValuesStr.find(',');
+ if (Idx == StringRef::npos)
+ break;
+ if (Idx > 0)
+ Ret.Values.push_back(ValuesStr.slice(0, Idx));
+ ValuesStr = ValuesStr.slice(Idx + 1, StringRef::npos);
+ }
+ if (!ValuesStr.empty())
+ Ret.Values.push_back(ValuesStr);
+
+ assert(Ret.Values.size() == Ret.NormalizedValues.size() &&
+ "The number of normalized values doesn't match the number of "
+ "values");
+ }
+
+ return Ret;
+}
+
+/// OptParserEmitter - This tablegen backend takes an input .td file
+/// describing a list of options and emits a data structure for parsing and
+/// working with those options when given an input command line.
+namespace llvm {
+void EmitOptParser(RecordKeeper &Records, raw_ostream &OS) {
+ // Get the option groups and options.
+ const std::vector<Record*> &Groups =
+ Records.getAllDerivedDefinitions("OptionGroup");
+ std::vector<Record*> Opts = Records.getAllDerivedDefinitions("Option");
+
+ emitSourceFileHeader("Option Parsing Definitions", OS);
+
+ array_pod_sort(Opts.begin(), Opts.end(), CompareOptionRecords);
+ // Generate prefix groups.
+ typedef SmallVector<SmallString<2>, 2> PrefixKeyT;
+ typedef std::map<PrefixKeyT, std::string> PrefixesT;
+ PrefixesT Prefixes;
+ Prefixes.insert(std::make_pair(PrefixKeyT(), "prefix_0"));
+ unsigned CurPrefix = 0;
+ for (const Record &R : llvm::make_pointee_range(Opts)) {
+ std::vector<StringRef> RPrefixes = R.getValueAsListOfStrings("Prefixes");
+ PrefixKeyT PrefixKey(RPrefixes.begin(), RPrefixes.end());
+ unsigned NewPrefix = CurPrefix + 1;
+ std::string Prefix = (Twine("prefix_") + Twine(NewPrefix)).str();
+ if (Prefixes.insert(std::make_pair(PrefixKey, Prefix)).second)
+ CurPrefix = NewPrefix;
+ }
+
+ DenseSet<StringRef> PrefixesUnionSet;
+ for (const auto &Prefix : Prefixes)
+ PrefixesUnionSet.insert(Prefix.first.begin(), Prefix.first.end());
+ SmallVector<StringRef> PrefixesUnion(PrefixesUnionSet.begin(),
+ PrefixesUnionSet.end());
+ array_pod_sort(PrefixesUnion.begin(), PrefixesUnion.end());
+
+ // Dump prefixes.
+ OS << "/////////\n";
+ OS << "// Prefixes\n\n";
+ OS << "#ifdef PREFIX\n";
+ OS << "#define COMMA ,\n";
+ for (const auto &Prefix : Prefixes) {
+ OS << "PREFIX(";
+
+ // Prefix name.
+ OS << Prefix.second;
+
+ // Prefix values.
+ OS << ", {";
+ for (const auto &PrefixKey : Prefix.first)
+ OS << "llvm::StringLiteral(\"" << PrefixKey << "\") COMMA ";
+ // Append an empty element to avoid ending up with an empty array.
+ OS << "llvm::StringLiteral(\"\")})\n";
+ }
+ OS << "#undef COMMA\n";
+ OS << "#endif // PREFIX\n\n";
+
+ // Dump prefix unions.
+ OS << "/////////\n";
+ OS << "// Prefix Union\n\n";
+ OS << "#ifdef PREFIX_UNION\n";
+ OS << "#define COMMA ,\n";
+ OS << "PREFIX_UNION({\n";
+ for (const auto &Prefix : PrefixesUnion) {
+ OS << "llvm::StringLiteral(\"" << Prefix << "\") COMMA ";
+ }
+ OS << "llvm::StringLiteral(\"\")})\n";
+ OS << "#undef COMMA\n";
+ OS << "#endif // PREFIX_UNION\n\n";
+
+ // Dump groups.
+ OS << "/////////\n";
+ OS << "// ValuesCode\n\n";
+ OS << "#ifdef OPTTABLE_VALUES_CODE\n";
+ for (const Record &R : llvm::make_pointee_range(Opts)) {
+ // The option values, if any;
+ if (!isa<UnsetInit>(R.getValueInit("ValuesCode"))) {
+ assert(isa<UnsetInit>(R.getValueInit("Values")) &&
+ "Cannot choose between Values and ValuesCode");
+ OS << "#define VALUES_CODE " << getOptionName(R) << "_Values\n";
+ OS << R.getValueAsString("ValuesCode") << "\n";
+ OS << "#undef VALUES_CODE\n";
+ }
+ }
+ OS << "#endif\n";
+
+ OS << "/////////\n";
+ OS << "// Groups\n\n";
+ OS << "#ifdef OPTION\n";
+ for (const Record &R : llvm::make_pointee_range(Groups)) {
+ // Start a single option entry.
+ OS << "OPTION(";
+
+ // The option prefix;
+ OS << "llvm::ArrayRef<llvm::StringLiteral>()";
+
+ // The option string.
+ OS << ", \"" << R.getValueAsString("Name") << '"';
+
+ // The option identifier name.
+ OS << ", " << getOptionName(R);
+
+ // The option kind.
+ OS << ", Group";
+
+ // The containing option group (if any).
+ OS << ", ";
+ if (const DefInit *DI = dyn_cast<DefInit>(R.getValueInit("Group")))
+ OS << getOptionName(*DI->getDef());
+ else
+ OS << "INVALID";
+
+ // The other option arguments (unused for groups).
+ OS << ", INVALID, nullptr, 0, 0";
+
+ // The option help text.
+ if (!isa<UnsetInit>(R.getValueInit("HelpText"))) {
+ OS << ",\n";
+ OS << " ";
+ write_cstring(OS, R.getValueAsString("HelpText"));
+ } else
+ OS << ", nullptr";
+
+ // The option meta-variable name (unused).
+ OS << ", nullptr";
+
+ // The option Values (unused for groups).
+ OS << ", nullptr)\n";
+ }
+ OS << "\n";
+
+ OS << "//////////\n";
+ OS << "// Options\n\n";
+
+ auto WriteOptRecordFields = [&](raw_ostream &OS, const Record &R) {
+ // The option prefix;
+ std::vector<StringRef> RPrefixes = R.getValueAsListOfStrings("Prefixes");
+ OS << Prefixes[PrefixKeyT(RPrefixes.begin(), RPrefixes.end())] << ", ";
+
+ // The option string.
+ emitNameUsingSpelling(OS, R);
+
+ // The option identifier name.
+ OS << ", " << getOptionName(R);
+
+ // The option kind.
+ OS << ", " << R.getValueAsDef("Kind")->getValueAsString("Name");
+
+ // The containing option group (if any).
+ OS << ", ";
+ const ListInit *GroupFlags = nullptr;
+ if (const DefInit *DI = dyn_cast<DefInit>(R.getValueInit("Group"))) {
+ GroupFlags = DI->getDef()->getValueAsListInit("Flags");
+ OS << getOptionName(*DI->getDef());
+ } else
+ OS << "INVALID";
+
+ // The option alias (if any).
+ OS << ", ";
+ if (const DefInit *DI = dyn_cast<DefInit>(R.getValueInit("Alias")))
+ OS << getOptionName(*DI->getDef());
+ else
+ OS << "INVALID";
+
+ // The option alias arguments (if any).
+ // Emitted as a \0 separated list in a string, e.g. ["foo", "bar"]
+ // would become "foo\0bar\0". Note that the compiler adds an implicit
+ // terminating \0 at the end.
+ OS << ", ";
+ std::vector<StringRef> AliasArgs = R.getValueAsListOfStrings("AliasArgs");
+ if (AliasArgs.size() == 0) {
+ OS << "nullptr";
+ } else {
+ OS << "\"";
+ for (StringRef AliasArg : AliasArgs)
+ OS << AliasArg << "\\0";
+ OS << "\"";
+ }
+
+ // The option flags.
+ OS << ", ";
+ int NumFlags = 0;
+ const ListInit *LI = R.getValueAsListInit("Flags");
+ for (Init *I : *LI)
+ OS << (NumFlags++ ? " | " : "") << cast<DefInit>(I)->getDef()->getName();
+ if (GroupFlags) {
+ for (Init *I : *GroupFlags)
+ OS << (NumFlags++ ? " | " : "")
+ << cast<DefInit>(I)->getDef()->getName();
+ }
+ if (NumFlags == 0)
+ OS << '0';
+
+ // The option parameter field.
+ OS << ", " << R.getValueAsInt("NumArgs");
+
+ // The option help text.
+ if (!isa<UnsetInit>(R.getValueInit("HelpText"))) {
+ OS << ",\n";
+ OS << " ";
+ write_cstring(OS, R.getValueAsString("HelpText"));
+ } else
+ OS << ", nullptr";
+
+ // The option meta-variable name.
+ OS << ", ";
+ if (!isa<UnsetInit>(R.getValueInit("MetaVarName")))
+ write_cstring(OS, R.getValueAsString("MetaVarName"));
+ else
+ OS << "nullptr";
+
+ // The option Values. Used for shell autocompletion.
+ OS << ", ";
+ if (!isa<UnsetInit>(R.getValueInit("Values")))
+ write_cstring(OS, R.getValueAsString("Values"));
+ else if (!isa<UnsetInit>(R.getValueInit("ValuesCode"))) {
+ OS << getOptionName(R) << "_Values";
+ }
+ else
+ OS << "nullptr";
+ };
+
+ auto IsMarshallingOption = [](const Record &R) {
+ return !isa<UnsetInit>(R.getValueInit("KeyPath")) &&
+ !R.getValueAsString("KeyPath").empty();
+ };
+
+ std::vector<const Record *> OptsWithMarshalling;
+ for (const Record &R : llvm::make_pointee_range(Opts)) {
+ // Start a single option entry.
+ OS << "OPTION(";
+ WriteOptRecordFields(OS, R);
+ OS << ")\n";
+ if (IsMarshallingOption(R))
+ OptsWithMarshalling.push_back(&R);
+ }
+ OS << "#endif // OPTION\n";
+
+ auto CmpMarshallingOpts = [](const Record *const *A, const Record *const *B) {
+ unsigned AID = (*A)->getID();
+ unsigned BID = (*B)->getID();
+
+ if (AID < BID)
+ return -1;
+ if (AID > BID)
+ return 1;
+ return 0;
+ };
+ // The RecordKeeper stores records (options) in lexicographical order, and we
+ // have reordered the options again when generating prefix groups. We need to
+ // restore the original definition order of options with marshalling to honor
+ // the topology of the dependency graph implied by `DefaultAnyOf`.
+ array_pod_sort(OptsWithMarshalling.begin(), OptsWithMarshalling.end(),
+ CmpMarshallingOpts);
+
+ std::vector<MarshallingInfo> MarshallingInfos;
+ MarshallingInfos.reserve(OptsWithMarshalling.size());
+ for (const auto *R : OptsWithMarshalling)
+ MarshallingInfos.push_back(createMarshallingInfo(*R));
+
+ for (const auto &MI : MarshallingInfos) {
+ OS << "#ifdef " << MI.getMacroName() << "\n";
+ OS << MI.getMacroName() << "(";
+ WriteOptRecordFields(OS, MI.R);
+ OS << ", ";
+ MI.emit(OS);
+ OS << ")\n";
+ OS << "#endif // " << MI.getMacroName() << "\n";
+ }
+
+ OS << "\n";
+ OS << "#ifdef SIMPLE_ENUM_VALUE_TABLE";
+ OS << "\n";
+ OS << MarshallingInfo::ValueTablePreamble;
+ std::vector<StringRef> ValueTableNames;
+ for (const auto &MI : MarshallingInfos)
+ if (auto MaybeValueTableName = MI.emitValueTable(OS))
+ ValueTableNames.push_back(*MaybeValueTableName);
+
+ OS << MarshallingInfo::ValueTablesDecl << "{";
+ for (auto ValueTableName : ValueTableNames)
+ OS << "{" << ValueTableName << ", std::size(" << ValueTableName << ")},\n";
+ OS << "};\n";
+ OS << "static const unsigned SimpleEnumValueTablesSize = "
+ "std::size(SimpleEnumValueTables);\n";
+
+ OS << "#endif // SIMPLE_ENUM_VALUE_TABLE\n";
+ OS << "\n";
+
+ OS << "\n";
+}
+} // end namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/OptRSTEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/OptRSTEmitter.cpp
new file mode 100644
index 0000000000..03c7326e81
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/OptRSTEmitter.cpp
@@ -0,0 +1,105 @@
+//===- OptParserEmitter.cpp - Table Driven Command Line Parsing -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "OptEmitter.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/TableGen/Record.h"
+
+using namespace llvm;
+
+/// OptParserEmitter - This tablegen backend takes an input .td file
+/// describing a list of options and emits a RST man page.
+namespace llvm {
+void EmitOptRST(RecordKeeper &Records, raw_ostream &OS) {
+ llvm::StringMap<std::vector<Record *>> OptionsByGroup;
+ std::vector<Record *> OptionsWithoutGroup;
+
+ // Get the options.
+ std::vector<Record *> Opts = Records.getAllDerivedDefinitions("Option");
+ array_pod_sort(Opts.begin(), Opts.end(), CompareOptionRecords);
+
+ // Get the option groups.
+ const std::vector<Record *> &Groups =
+ Records.getAllDerivedDefinitions("OptionGroup");
+ for (unsigned i = 0, e = Groups.size(); i != e; ++i) {
+ const Record &R = *Groups[i];
+ OptionsByGroup.try_emplace(R.getValueAsString("Name"));
+ }
+
+ // Map options to their group.
+ for (unsigned i = 0, e = Opts.size(); i != e; ++i) {
+ const Record &R = *Opts[i];
+ if (const DefInit *DI = dyn_cast<DefInit>(R.getValueInit("Group"))) {
+ OptionsByGroup[DI->getDef()->getValueAsString("Name")].push_back(Opts[i]);
+ } else {
+ OptionsByGroup["options"].push_back(Opts[i]);
+ }
+ }
+
+ // Print options under their group.
+ for (const auto &KV : OptionsByGroup) {
+ std::string GroupName = KV.getKey().upper();
+ OS << GroupName << '\n';
+ OS << std::string(GroupName.size(), '-') << '\n';
+ OS << '\n';
+
+ for (Record *R : KV.getValue()) {
+ OS << ".. option:: ";
+
+ // Print the prefix.
+ std::vector<StringRef> Prefixes = R->getValueAsListOfStrings("Prefixes");
+ if (!Prefixes.empty())
+ OS << Prefixes[0];
+
+ // Print the option name.
+ OS << R->getValueAsString("Name");
+
+ StringRef MetaVarName;
+ // Print the meta-variable.
+ if (!isa<UnsetInit>(R->getValueInit("MetaVarName"))) {
+ MetaVarName = R->getValueAsString("MetaVarName");
+ } else if (!isa<UnsetInit>(R->getValueInit("Values")))
+ MetaVarName = "<value>";
+
+ if (!MetaVarName.empty()) {
+ OS << '=';
+ OS.write_escaped(MetaVarName);
+ }
+
+ OS << "\n\n";
+
+ std::string HelpText;
+ // The option help text.
+ if (!isa<UnsetInit>(R->getValueInit("HelpText"))) {
+ HelpText = R->getValueAsString("HelpText").trim().str();
+ if (!HelpText.empty() && HelpText.back() != '.')
+ HelpText.push_back('.');
+ }
+
+ if (!isa<UnsetInit>(R->getValueInit("Values"))) {
+ SmallVector<StringRef> Values;
+ SplitString(R->getValueAsString("Values"), Values, ",");
+ HelpText += (" " + MetaVarName + " must be '").str();
+
+ if (Values.size() > 1) {
+ HelpText += join(Values.begin(), Values.end() - 1, "', '");
+ HelpText += "' or '";
+ }
+ HelpText += (Values.front() + "'.").str();
+ }
+
+ if (!HelpText.empty()) {
+ OS << ' ';
+ OS.write_escaped(HelpText);
+ OS << "\n\n";
+ }
+ }
+ }
+}
+} // end namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/PredicateExpander.cpp b/contrib/libs/llvm16/utils/TableGen/PredicateExpander.cpp
new file mode 100644
index 0000000000..b129401461
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/PredicateExpander.cpp
@@ -0,0 +1,547 @@
+//===--------------------- PredicateExpander.cpp --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// Functionalities used by the Tablegen backends to expand machine predicates.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PredicateExpander.h"
+#include "CodeGenSchedule.h" // Definition of STIPredicateFunction.
+
+namespace llvm {
+
+void PredicateExpander::expandTrue(raw_ostream &OS) { OS << "true"; }
+void PredicateExpander::expandFalse(raw_ostream &OS) { OS << "false"; }
+
+void PredicateExpander::expandCheckImmOperand(raw_ostream &OS, int OpIndex,
+ int ImmVal,
+ StringRef FunctionMapper) {
+ if (!FunctionMapper.empty())
+ OS << FunctionMapper << "(";
+ OS << "MI" << (isByRef() ? "." : "->") << "getOperand(" << OpIndex
+ << ").getImm()";
+ if (!FunctionMapper.empty())
+ OS << ")";
+ OS << (shouldNegate() ? " != " : " == ") << ImmVal;
+}
+
+void PredicateExpander::expandCheckImmOperand(raw_ostream &OS, int OpIndex,
+ StringRef ImmVal,
+ StringRef FunctionMapper) {
+ if (ImmVal.empty())
+ expandCheckImmOperandSimple(OS, OpIndex, FunctionMapper);
+
+ if (!FunctionMapper.empty())
+ OS << FunctionMapper << "(";
+ OS << "MI" << (isByRef() ? "." : "->") << "getOperand(" << OpIndex
+ << ").getImm()";
+ if (!FunctionMapper.empty())
+ OS << ")";
+ OS << (shouldNegate() ? " != " : " == ") << ImmVal;
+}
+
+void PredicateExpander::expandCheckImmOperandSimple(raw_ostream &OS,
+ int OpIndex,
+ StringRef FunctionMapper) {
+ if (shouldNegate())
+ OS << "!";
+ if (!FunctionMapper.empty())
+ OS << FunctionMapper << "(";
+ OS << "MI" << (isByRef() ? "." : "->") << "getOperand(" << OpIndex
+ << ").getImm()";
+ if (!FunctionMapper.empty())
+ OS << ")";
+}
+
+void PredicateExpander::expandCheckRegOperand(raw_ostream &OS, int OpIndex,
+ const Record *Reg,
+ StringRef FunctionMapper) {
+ assert(Reg->isSubClassOf("Register") && "Expected a register Record!");
+
+ if (!FunctionMapper.empty())
+ OS << FunctionMapper << "(";
+ OS << "MI" << (isByRef() ? "." : "->") << "getOperand(" << OpIndex
+ << ").getReg()";
+ if (!FunctionMapper.empty())
+ OS << ")";
+ OS << (shouldNegate() ? " != " : " == ");
+ const StringRef Str = Reg->getValueAsString("Namespace");
+ if (!Str.empty())
+ OS << Str << "::";
+ OS << Reg->getName();
+}
+
+
+void PredicateExpander::expandCheckRegOperandSimple(raw_ostream &OS,
+ int OpIndex,
+ StringRef FunctionMapper) {
+ if (shouldNegate())
+ OS << "!";
+ if (!FunctionMapper.empty())
+ OS << FunctionMapper << "(";
+ OS << "MI" << (isByRef() ? "." : "->") << "getOperand(" << OpIndex
+ << ").getReg()";
+ if (!FunctionMapper.empty())
+ OS << ")";
+}
+
+void PredicateExpander::expandCheckInvalidRegOperand(raw_ostream &OS,
+ int OpIndex) {
+ OS << "MI" << (isByRef() ? "." : "->") << "getOperand(" << OpIndex
+ << ").getReg() " << (shouldNegate() ? "!= " : "== ") << "0";
+}
+
+void PredicateExpander::expandCheckSameRegOperand(raw_ostream &OS, int First,
+ int Second) {
+ OS << "MI" << (isByRef() ? "." : "->") << "getOperand(" << First
+ << ").getReg() " << (shouldNegate() ? "!=" : "==") << " MI"
+ << (isByRef() ? "." : "->") << "getOperand(" << Second << ").getReg()";
+}
+
+void PredicateExpander::expandCheckNumOperands(raw_ostream &OS, int NumOps) {
+ OS << "MI" << (isByRef() ? "." : "->") << "getNumOperands() "
+ << (shouldNegate() ? "!= " : "== ") << NumOps;
+}
+
+void PredicateExpander::expandCheckOpcode(raw_ostream &OS, const Record *Inst) {
+ OS << "MI" << (isByRef() ? "." : "->") << "getOpcode() "
+ << (shouldNegate() ? "!= " : "== ") << Inst->getValueAsString("Namespace")
+ << "::" << Inst->getName();
+}
+
+void PredicateExpander::expandCheckOpcode(raw_ostream &OS,
+ const RecVec &Opcodes) {
+ assert(!Opcodes.empty() && "Expected at least one opcode to check!");
+ bool First = true;
+
+ if (Opcodes.size() == 1) {
+ OS << "( ";
+ expandCheckOpcode(OS, Opcodes[0]);
+ OS << " )";
+ return;
+ }
+
+ OS << '(';
+ increaseIndentLevel();
+ for (const Record *Rec : Opcodes) {
+ OS << '\n';
+ OS.indent(getIndentLevel() * 2);
+ if (!First)
+ OS << (shouldNegate() ? "&& " : "|| ");
+
+ expandCheckOpcode(OS, Rec);
+ First = false;
+ }
+
+ OS << '\n';
+ decreaseIndentLevel();
+ OS.indent(getIndentLevel() * 2);
+ OS << ')';
+}
+
+void PredicateExpander::expandCheckPseudo(raw_ostream &OS,
+ const RecVec &Opcodes) {
+ if (shouldExpandForMC())
+ expandFalse(OS);
+ else
+ expandCheckOpcode(OS, Opcodes);
+}
+
+void PredicateExpander::expandPredicateSequence(raw_ostream &OS,
+ const RecVec &Sequence,
+ bool IsCheckAll) {
+ assert(!Sequence.empty() && "Found an invalid empty predicate set!");
+ if (Sequence.size() == 1)
+ return expandPredicate(OS, Sequence[0]);
+
+ // Okay, there is more than one predicate in the set.
+ bool First = true;
+ OS << (shouldNegate() ? "!(" : "(");
+ increaseIndentLevel();
+
+ bool OldValue = shouldNegate();
+ setNegatePredicate(false);
+ for (const Record *Rec : Sequence) {
+ OS << '\n';
+ OS.indent(getIndentLevel() * 2);
+ if (!First)
+ OS << (IsCheckAll ? "&& " : "|| ");
+ expandPredicate(OS, Rec);
+ First = false;
+ }
+ OS << '\n';
+ decreaseIndentLevel();
+ OS.indent(getIndentLevel() * 2);
+ OS << ')';
+ setNegatePredicate(OldValue);
+}
+
+void PredicateExpander::expandTIIFunctionCall(raw_ostream &OS,
+ StringRef MethodName) {
+ OS << (shouldNegate() ? "!" : "");
+ OS << TargetName << (shouldExpandForMC() ? "_MC::" : "InstrInfo::");
+ OS << MethodName << (isByRef() ? "(MI)" : "(*MI)");
+}
+
+void PredicateExpander::expandCheckIsRegOperand(raw_ostream &OS, int OpIndex) {
+ OS << (shouldNegate() ? "!" : "") << "MI" << (isByRef() ? "." : "->")
+ << "getOperand(" << OpIndex << ").isReg() ";
+}
+
+void PredicateExpander::expandCheckIsImmOperand(raw_ostream &OS, int OpIndex) {
+ OS << (shouldNegate() ? "!" : "") << "MI" << (isByRef() ? "." : "->")
+ << "getOperand(" << OpIndex << ").isImm() ";
+}
+
+void PredicateExpander::expandCheckFunctionPredicateWithTII(
+ raw_ostream &OS, StringRef MCInstFn, StringRef MachineInstrFn,
+ StringRef TIIPtr) {
+ if (!shouldExpandForMC()) {
+ OS << (TIIPtr.empty() ? "TII" : TIIPtr) << "->" << MachineInstrFn;
+ OS << (isByRef() ? "(MI)" : "(*MI)");
+ return;
+ }
+
+ OS << MCInstFn << (isByRef() ? "(MI" : "(*MI") << ", MCII)";
+}
+
+void PredicateExpander::expandCheckFunctionPredicate(raw_ostream &OS,
+ StringRef MCInstFn,
+ StringRef MachineInstrFn) {
+ OS << (shouldExpandForMC() ? MCInstFn : MachineInstrFn)
+ << (isByRef() ? "(MI)" : "(*MI)");
+}
+
+void PredicateExpander::expandCheckNonPortable(raw_ostream &OS,
+ StringRef Code) {
+ if (shouldExpandForMC())
+ return expandFalse(OS);
+
+ OS << '(' << Code << ')';
+}
+
+void PredicateExpander::expandReturnStatement(raw_ostream &OS,
+ const Record *Rec) {
+ std::string Buffer;
+ raw_string_ostream SS(Buffer);
+
+ SS << "return ";
+ expandPredicate(SS, Rec);
+ SS << ";";
+ OS << Buffer;
+}
+
+void PredicateExpander::expandOpcodeSwitchCase(raw_ostream &OS,
+ const Record *Rec) {
+ const RecVec &Opcodes = Rec->getValueAsListOfDefs("Opcodes");
+ for (const Record *Opcode : Opcodes) {
+ OS.indent(getIndentLevel() * 2);
+ OS << "case " << Opcode->getValueAsString("Namespace")
+ << "::" << Opcode->getName() << ":\n";
+ }
+
+ increaseIndentLevel();
+ OS.indent(getIndentLevel() * 2);
+ expandStatement(OS, Rec->getValueAsDef("CaseStmt"));
+ decreaseIndentLevel();
+}
+
+void PredicateExpander::expandOpcodeSwitchStatement(raw_ostream &OS,
+ const RecVec &Cases,
+ const Record *Default) {
+ std::string Buffer;
+ raw_string_ostream SS(Buffer);
+
+ SS << "switch(MI" << (isByRef() ? "." : "->") << "getOpcode()) {\n";
+ for (const Record *Rec : Cases) {
+ expandOpcodeSwitchCase(SS, Rec);
+ SS << '\n';
+ }
+
+ // Expand the default case.
+ SS.indent(getIndentLevel() * 2);
+ SS << "default:\n";
+
+ increaseIndentLevel();
+ SS.indent(getIndentLevel() * 2);
+ expandStatement(SS, Default);
+ decreaseIndentLevel();
+ SS << '\n';
+
+ SS.indent(getIndentLevel() * 2);
+ SS << "} // end of switch-stmt";
+ OS << Buffer;
+}
+
+void PredicateExpander::expandStatement(raw_ostream &OS, const Record *Rec) {
+ // Assume that padding has been added by the caller.
+ if (Rec->isSubClassOf("MCOpcodeSwitchStatement")) {
+ expandOpcodeSwitchStatement(OS, Rec->getValueAsListOfDefs("Cases"),
+ Rec->getValueAsDef("DefaultCase"));
+ return;
+ }
+
+ if (Rec->isSubClassOf("MCReturnStatement")) {
+ expandReturnStatement(OS, Rec->getValueAsDef("Pred"));
+ return;
+ }
+
+ llvm_unreachable("No known rules to expand this MCStatement");
+}
+
+void PredicateExpander::expandPredicate(raw_ostream &OS, const Record *Rec) {
+ // Assume that padding has been added by the caller.
+ if (Rec->isSubClassOf("MCTrue")) {
+ if (shouldNegate())
+ return expandFalse(OS);
+ return expandTrue(OS);
+ }
+
+ if (Rec->isSubClassOf("MCFalse")) {
+ if (shouldNegate())
+ return expandTrue(OS);
+ return expandFalse(OS);
+ }
+
+ if (Rec->isSubClassOf("CheckNot")) {
+ flipNegatePredicate();
+ expandPredicate(OS, Rec->getValueAsDef("Pred"));
+ flipNegatePredicate();
+ return;
+ }
+
+ if (Rec->isSubClassOf("CheckIsRegOperand"))
+ return expandCheckIsRegOperand(OS, Rec->getValueAsInt("OpIndex"));
+
+ if (Rec->isSubClassOf("CheckIsImmOperand"))
+ return expandCheckIsImmOperand(OS, Rec->getValueAsInt("OpIndex"));
+
+ if (Rec->isSubClassOf("CheckRegOperand"))
+ return expandCheckRegOperand(OS, Rec->getValueAsInt("OpIndex"),
+ Rec->getValueAsDef("Reg"),
+ Rec->getValueAsString("FunctionMapper"));
+
+ if (Rec->isSubClassOf("CheckRegOperandSimple"))
+ return expandCheckRegOperandSimple(OS, Rec->getValueAsInt("OpIndex"),
+ Rec->getValueAsString("FunctionMapper"));
+
+ if (Rec->isSubClassOf("CheckInvalidRegOperand"))
+ return expandCheckInvalidRegOperand(OS, Rec->getValueAsInt("OpIndex"));
+
+ if (Rec->isSubClassOf("CheckImmOperand"))
+ return expandCheckImmOperand(OS, Rec->getValueAsInt("OpIndex"),
+ Rec->getValueAsInt("ImmVal"),
+ Rec->getValueAsString("FunctionMapper"));
+
+ if (Rec->isSubClassOf("CheckImmOperand_s"))
+ return expandCheckImmOperand(OS, Rec->getValueAsInt("OpIndex"),
+ Rec->getValueAsString("ImmVal"),
+ Rec->getValueAsString("FunctionMapper"));
+
+ if (Rec->isSubClassOf("CheckImmOperandSimple"))
+ return expandCheckImmOperandSimple(OS, Rec->getValueAsInt("OpIndex"),
+ Rec->getValueAsString("FunctionMapper"));
+
+ if (Rec->isSubClassOf("CheckSameRegOperand"))
+ return expandCheckSameRegOperand(OS, Rec->getValueAsInt("FirstIndex"),
+ Rec->getValueAsInt("SecondIndex"));
+
+ if (Rec->isSubClassOf("CheckNumOperands"))
+ return expandCheckNumOperands(OS, Rec->getValueAsInt("NumOps"));
+
+ if (Rec->isSubClassOf("CheckPseudo"))
+ return expandCheckPseudo(OS, Rec->getValueAsListOfDefs("ValidOpcodes"));
+
+ if (Rec->isSubClassOf("CheckOpcode"))
+ return expandCheckOpcode(OS, Rec->getValueAsListOfDefs("ValidOpcodes"));
+
+ if (Rec->isSubClassOf("CheckAll"))
+ return expandPredicateSequence(OS, Rec->getValueAsListOfDefs("Predicates"),
+ /* AllOf */ true);
+
+ if (Rec->isSubClassOf("CheckAny"))
+ return expandPredicateSequence(OS, Rec->getValueAsListOfDefs("Predicates"),
+ /* AllOf */ false);
+
+ if (Rec->isSubClassOf("CheckFunctionPredicate")) {
+ return expandCheckFunctionPredicate(
+ OS, Rec->getValueAsString("MCInstFnName"),
+ Rec->getValueAsString("MachineInstrFnName"));
+ }
+
+ if (Rec->isSubClassOf("CheckFunctionPredicateWithTII")) {
+ return expandCheckFunctionPredicateWithTII(
+ OS, Rec->getValueAsString("MCInstFnName"),
+ Rec->getValueAsString("MachineInstrFnName"),
+ Rec->getValueAsString("TIIPtrName"));
+ }
+
+ if (Rec->isSubClassOf("CheckNonPortable"))
+ return expandCheckNonPortable(OS, Rec->getValueAsString("CodeBlock"));
+
+ if (Rec->isSubClassOf("TIIPredicate"))
+ return expandTIIFunctionCall(OS, Rec->getValueAsString("FunctionName"));
+
+ llvm_unreachable("No known rules to expand this MCInstPredicate");
+}
+
+void STIPredicateExpander::expandHeader(raw_ostream &OS,
+ const STIPredicateFunction &Fn) {
+ const Record *Rec = Fn.getDeclaration();
+ StringRef FunctionName = Rec->getValueAsString("Name");
+
+ OS.indent(getIndentLevel() * 2);
+ OS << "bool ";
+ if (shouldExpandDefinition())
+ OS << getClassPrefix() << "::";
+ OS << FunctionName << "(";
+ if (shouldExpandForMC())
+ OS << "const MCInst " << (isByRef() ? "&" : "*") << "MI";
+ else
+ OS << "const MachineInstr " << (isByRef() ? "&" : "*") << "MI";
+ if (Rec->getValueAsBit("UpdatesOpcodeMask"))
+ OS << ", APInt &Mask";
+ OS << (shouldExpandForMC() ? ", unsigned ProcessorID) const " : ") const ");
+ if (shouldExpandDefinition()) {
+ OS << "{\n";
+ return;
+ }
+
+ if (Rec->getValueAsBit("OverridesBaseClassMember"))
+ OS << "override";
+ OS << ";\n";
+}
+
+void STIPredicateExpander::expandPrologue(raw_ostream &OS,
+ const STIPredicateFunction &Fn) {
+ RecVec Delegates = Fn.getDeclaration()->getValueAsListOfDefs("Delegates");
+ bool UpdatesOpcodeMask =
+ Fn.getDeclaration()->getValueAsBit("UpdatesOpcodeMask");
+
+ increaseIndentLevel();
+ unsigned IndentLevel = getIndentLevel();
+ for (const Record *Delegate : Delegates) {
+ OS.indent(IndentLevel * 2);
+ OS << "if (" << Delegate->getValueAsString("Name") << "(MI";
+ if (UpdatesOpcodeMask)
+ OS << ", Mask";
+ if (shouldExpandForMC())
+ OS << ", ProcessorID";
+ OS << "))\n";
+ OS.indent((1 + IndentLevel) * 2);
+ OS << "return true;\n\n";
+ }
+
+ if (shouldExpandForMC())
+ return;
+
+ OS.indent(IndentLevel * 2);
+ OS << "unsigned ProcessorID = getSchedModel().getProcessorID();\n";
+}
+
+void STIPredicateExpander::expandOpcodeGroup(raw_ostream &OS, const OpcodeGroup &Group,
+ bool ShouldUpdateOpcodeMask) {
+ const OpcodeInfo &OI = Group.getOpcodeInfo();
+ for (const PredicateInfo &PI : OI.getPredicates()) {
+ const APInt &ProcModelMask = PI.ProcModelMask;
+ bool FirstProcID = true;
+ for (unsigned I = 0, E = ProcModelMask.getActiveBits(); I < E; ++I) {
+ if (!ProcModelMask[I])
+ continue;
+
+ if (FirstProcID) {
+ OS.indent(getIndentLevel() * 2);
+ OS << "if (ProcessorID == " << I;
+ } else {
+ OS << " || ProcessorID == " << I;
+ }
+ FirstProcID = false;
+ }
+
+ OS << ") {\n";
+
+ increaseIndentLevel();
+ OS.indent(getIndentLevel() * 2);
+ if (ShouldUpdateOpcodeMask) {
+ if (PI.OperandMask.isZero())
+ OS << "Mask.clearAllBits();\n";
+ else
+ OS << "Mask = " << PI.OperandMask << ";\n";
+ OS.indent(getIndentLevel() * 2);
+ }
+ OS << "return ";
+ expandPredicate(OS, PI.Predicate);
+ OS << ";\n";
+ decreaseIndentLevel();
+ OS.indent(getIndentLevel() * 2);
+ OS << "}\n";
+ }
+}
+
+void STIPredicateExpander::expandBody(raw_ostream &OS,
+ const STIPredicateFunction &Fn) {
+ bool UpdatesOpcodeMask =
+ Fn.getDeclaration()->getValueAsBit("UpdatesOpcodeMask");
+
+ unsigned IndentLevel = getIndentLevel();
+ OS.indent(IndentLevel * 2);
+ OS << "switch(MI" << (isByRef() ? "." : "->") << "getOpcode()) {\n";
+ OS.indent(IndentLevel * 2);
+ OS << "default:\n";
+ OS.indent(IndentLevel * 2);
+ OS << " break;";
+
+ for (const OpcodeGroup &Group : Fn.getGroups()) {
+ for (const Record *Opcode : Group.getOpcodes()) {
+ OS << '\n';
+ OS.indent(IndentLevel * 2);
+ OS << "case " << getTargetName() << "::" << Opcode->getName() << ":";
+ }
+
+ OS << '\n';
+ increaseIndentLevel();
+ expandOpcodeGroup(OS, Group, UpdatesOpcodeMask);
+
+ OS.indent(getIndentLevel() * 2);
+ OS << "break;\n";
+ decreaseIndentLevel();
+ }
+
+ OS.indent(IndentLevel * 2);
+ OS << "}\n";
+}
+
+void STIPredicateExpander::expandEpilogue(raw_ostream &OS,
+ const STIPredicateFunction &Fn) {
+ OS << '\n';
+ OS.indent(getIndentLevel() * 2);
+ OS << "return ";
+ expandPredicate(OS, Fn.getDefaultReturnPredicate());
+ OS << ";\n";
+
+ decreaseIndentLevel();
+ OS.indent(getIndentLevel() * 2);
+ StringRef FunctionName = Fn.getDeclaration()->getValueAsString("Name");
+ OS << "} // " << ClassPrefix << "::" << FunctionName << "\n\n";
+}
+
+void STIPredicateExpander::expandSTIPredicate(raw_ostream &OS,
+ const STIPredicateFunction &Fn) {
+ const Record *Rec = Fn.getDeclaration();
+ if (shouldExpandForMC() && !Rec->getValueAsBit("ExpandForMC"))
+ return;
+
+ expandHeader(OS, Fn);
+ if (shouldExpandDefinition()) {
+ expandPrologue(OS, Fn);
+ expandBody(OS, Fn);
+ expandEpilogue(OS, Fn);
+ }
+}
+
+} // namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/PredicateExpander.h b/contrib/libs/llvm16/utils/TableGen/PredicateExpander.h
new file mode 100644
index 0000000000..27f049a715
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/PredicateExpander.h
@@ -0,0 +1,126 @@
+//===--------------------- PredicateExpander.h ----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// Functionalities used by the Tablegen backends to expand machine predicates.
+///
+/// See file llvm/Target/TargetInstrPredicate.td for a full list and description
+/// of all the supported MCInstPredicate classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_PREDICATEEXPANDER_H
+#define LLVM_UTILS_TABLEGEN_PREDICATEEXPANDER_H
+
+#include "llvm/ADT/StringRef.h"
+#include <vector>
+
+namespace llvm {
+
+class raw_ostream;
+class Record;
+
+class PredicateExpander {
+ bool EmitCallsByRef;
+ bool NegatePredicate;
+ bool ExpandForMC;
+ unsigned IndentLevel;
+ StringRef TargetName;
+
+ PredicateExpander(const PredicateExpander &) = delete;
+ PredicateExpander &operator=(const PredicateExpander &) = delete;
+
+public:
+ PredicateExpander(StringRef Target)
+ : EmitCallsByRef(true), NegatePredicate(false), ExpandForMC(false),
+ IndentLevel(1U), TargetName(Target) {}
+ bool isByRef() const { return EmitCallsByRef; }
+ bool shouldNegate() const { return NegatePredicate; }
+ bool shouldExpandForMC() const { return ExpandForMC; }
+ unsigned getIndentLevel() const { return IndentLevel; }
+ StringRef getTargetName() const { return TargetName; }
+
+ void setByRef(bool Value) { EmitCallsByRef = Value; }
+ void flipNegatePredicate() { NegatePredicate = !NegatePredicate; }
+ void setNegatePredicate(bool Value) { NegatePredicate = Value; }
+ void setExpandForMC(bool Value) { ExpandForMC = Value; }
+ void setIndentLevel(unsigned Level) { IndentLevel = Level; }
+ void increaseIndentLevel() { ++IndentLevel; }
+ void decreaseIndentLevel() { --IndentLevel; }
+
+ using RecVec = std::vector<Record *>;
+ void expandTrue(raw_ostream &OS);
+ void expandFalse(raw_ostream &OS);
+ void expandCheckImmOperand(raw_ostream &OS, int OpIndex, int ImmVal,
+ StringRef FunctionMapper);
+ void expandCheckImmOperand(raw_ostream &OS, int OpIndex, StringRef ImmVal,
+ StringRef FunctionMapperer);
+ void expandCheckImmOperandSimple(raw_ostream &OS, int OpIndex,
+ StringRef FunctionMapper);
+ void expandCheckRegOperand(raw_ostream &OS, int OpIndex, const Record *Reg,
+ StringRef FunctionMapper);
+ void expandCheckRegOperandSimple(raw_ostream &OS, int OpIndex,
+ StringRef FunctionMapper);
+ void expandCheckSameRegOperand(raw_ostream &OS, int First, int Second);
+ void expandCheckNumOperands(raw_ostream &OS, int NumOps);
+ void expandCheckOpcode(raw_ostream &OS, const Record *Inst);
+
+ void expandCheckPseudo(raw_ostream &OS, const RecVec &Opcodes);
+ void expandCheckOpcode(raw_ostream &OS, const RecVec &Opcodes);
+ void expandPredicateSequence(raw_ostream &OS, const RecVec &Sequence,
+ bool IsCheckAll);
+ void expandTIIFunctionCall(raw_ostream &OS, StringRef MethodName);
+ void expandCheckIsRegOperand(raw_ostream &OS, int OpIndex);
+ void expandCheckIsImmOperand(raw_ostream &OS, int OpIndex);
+ void expandCheckInvalidRegOperand(raw_ostream &OS, int OpIndex);
+ void expandCheckFunctionPredicate(raw_ostream &OS, StringRef MCInstFn,
+ StringRef MachineInstrFn);
+ void expandCheckFunctionPredicateWithTII(raw_ostream &OS, StringRef MCInstFn,
+ StringRef MachineInstrFn,
+ StringRef TIIPtr);
+ void expandCheckNonPortable(raw_ostream &OS, StringRef CodeBlock);
+ void expandPredicate(raw_ostream &OS, const Record *Rec);
+ void expandReturnStatement(raw_ostream &OS, const Record *Rec);
+ void expandOpcodeSwitchCase(raw_ostream &OS, const Record *Rec);
+ void expandOpcodeSwitchStatement(raw_ostream &OS, const RecVec &Cases,
+ const Record *Default);
+ void expandStatement(raw_ostream &OS, const Record *Rec);
+};
+
+// Forward declarations.
+class STIPredicateFunction;
+class OpcodeGroup;
+
+class STIPredicateExpander : public PredicateExpander {
+ StringRef ClassPrefix;
+ bool ExpandDefinition;
+
+ STIPredicateExpander(const PredicateExpander &) = delete;
+ STIPredicateExpander &operator=(const PredicateExpander &) = delete;
+
+ void expandHeader(raw_ostream &OS, const STIPredicateFunction &Fn);
+ void expandPrologue(raw_ostream &OS, const STIPredicateFunction &Fn);
+ void expandOpcodeGroup(raw_ostream &OS, const OpcodeGroup &Group,
+ bool ShouldUpdateOpcodeMask);
+ void expandBody(raw_ostream &OS, const STIPredicateFunction &Fn);
+ void expandEpilogue(raw_ostream &OS, const STIPredicateFunction &Fn);
+
+public:
+ STIPredicateExpander(StringRef Target)
+ : PredicateExpander(Target), ExpandDefinition(false) {}
+
+ bool shouldExpandDefinition() const { return ExpandDefinition; }
+ StringRef getClassPrefix() const { return ClassPrefix; }
+ void setClassPrefix(StringRef S) { ClassPrefix = S; }
+ void setExpandDefinition(bool Value) { ExpandDefinition = Value; }
+
+ void expandSTIPredicate(raw_ostream &OS, const STIPredicateFunction &Fn);
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/libs/llvm16/utils/TableGen/PseudoLoweringEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/PseudoLoweringEmitter.cpp
new file mode 100644
index 0000000000..6a1e1332d7
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/PseudoLoweringEmitter.cpp
@@ -0,0 +1,322 @@
+//===- PseudoLoweringEmitter.cpp - PseudoLowering Generator -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenInstruction.h"
+#include "CodeGenTarget.h"
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include <vector>
+using namespace llvm;
+
+#define DEBUG_TYPE "pseudo-lowering"
+
+namespace {
+class PseudoLoweringEmitter {
+ struct OpData {
+ enum MapKind { Operand, Imm, Reg };
+ MapKind Kind;
+ union {
+ unsigned Operand; // Operand number mapped to.
+ uint64_t Imm; // Integer immedate value.
+ Record *Reg; // Physical register.
+ } Data;
+ };
+ struct PseudoExpansion {
+ CodeGenInstruction Source; // The source pseudo instruction definition.
+ CodeGenInstruction Dest; // The destination instruction to lower to.
+ IndexedMap<OpData> OperandMap;
+
+ PseudoExpansion(CodeGenInstruction &s, CodeGenInstruction &d,
+ IndexedMap<OpData> &m) :
+ Source(s), Dest(d), OperandMap(m) {}
+ };
+
+ RecordKeeper &Records;
+
+ // It's overkill to have an instance of the full CodeGenTarget object,
+ // but it loads everything on demand, not in the constructor, so it's
+ // lightweight in performance, so it works out OK.
+ CodeGenTarget Target;
+
+ SmallVector<PseudoExpansion, 64> Expansions;
+
+ unsigned addDagOperandMapping(Record *Rec, DagInit *Dag,
+ CodeGenInstruction &Insn,
+ IndexedMap<OpData> &OperandMap,
+ unsigned BaseIdx);
+ void evaluateExpansion(Record *Pseudo);
+ void emitLoweringEmitter(raw_ostream &o);
+public:
+ PseudoLoweringEmitter(RecordKeeper &R) : Records(R), Target(R) {}
+
+ /// run - Output the pseudo-lowerings.
+ void run(raw_ostream &o);
+};
+} // End anonymous namespace
+
+// FIXME: This pass currently can only expand a pseudo to a single instruction.
+// The pseudo expansion really should take a list of dags, not just
+// a single dag, so we can do fancier things.
+
+unsigned PseudoLoweringEmitter::
+addDagOperandMapping(Record *Rec, DagInit *Dag, CodeGenInstruction &Insn,
+ IndexedMap<OpData> &OperandMap, unsigned BaseIdx) {
+ unsigned OpsAdded = 0;
+ for (unsigned i = 0, e = Dag->getNumArgs(); i != e; ++i) {
+ if (DefInit *DI = dyn_cast<DefInit>(Dag->getArg(i))) {
+ // Physical register reference. Explicit check for the special case
+ // "zero_reg" definition.
+ if (DI->getDef()->isSubClassOf("Register") ||
+ DI->getDef()->getName() == "zero_reg") {
+ OperandMap[BaseIdx + i].Kind = OpData::Reg;
+ OperandMap[BaseIdx + i].Data.Reg = DI->getDef();
+ ++OpsAdded;
+ continue;
+ }
+
+ // Normal operands should always have the same type, or we have a
+ // problem.
+ // FIXME: We probably shouldn't ever get a non-zero BaseIdx here.
+ assert(BaseIdx == 0 && "Named subargument in pseudo expansion?!");
+ // FIXME: Are the message operand types backward?
+ if (DI->getDef() != Insn.Operands[BaseIdx + i].Rec) {
+ PrintError(Rec, "In pseudo instruction '" + Rec->getName() +
+ "', operand type '" + DI->getDef()->getName() +
+ "' does not match expansion operand type '" +
+ Insn.Operands[BaseIdx + i].Rec->getName() + "'");
+ PrintFatalNote(DI->getDef(),
+ "Value was assigned at the following location:");
+ }
+ // Source operand maps to destination operand. The Data element
+ // will be filled in later, just set the Kind for now. Do it
+ // for each corresponding MachineInstr operand, not just the first.
+ for (unsigned I = 0, E = Insn.Operands[i].MINumOperands; I != E; ++I)
+ OperandMap[BaseIdx + i + I].Kind = OpData::Operand;
+ OpsAdded += Insn.Operands[i].MINumOperands;
+ } else if (IntInit *II = dyn_cast<IntInit>(Dag->getArg(i))) {
+ OperandMap[BaseIdx + i].Kind = OpData::Imm;
+ OperandMap[BaseIdx + i].Data.Imm = II->getValue();
+ ++OpsAdded;
+ } else if (auto *BI = dyn_cast<BitsInit>(Dag->getArg(i))) {
+ auto *II =
+ cast<IntInit>(BI->convertInitializerTo(IntRecTy::get(Records)));
+ OperandMap[BaseIdx + i].Kind = OpData::Imm;
+ OperandMap[BaseIdx + i].Data.Imm = II->getValue();
+ ++OpsAdded;
+ } else if (DagInit *SubDag = dyn_cast<DagInit>(Dag->getArg(i))) {
+ // Just add the operands recursively. This is almost certainly
+ // a constant value for a complex operand (> 1 MI operand).
+ unsigned NewOps =
+ addDagOperandMapping(Rec, SubDag, Insn, OperandMap, BaseIdx + i);
+ OpsAdded += NewOps;
+ // Since we added more than one, we also need to adjust the base.
+ BaseIdx += NewOps - 1;
+ } else
+ llvm_unreachable("Unhandled pseudo-expansion argument type!");
+ }
+ return OpsAdded;
+}
+
+void PseudoLoweringEmitter::evaluateExpansion(Record *Rec) {
+ LLVM_DEBUG(dbgs() << "Pseudo definition: " << Rec->getName() << "\n");
+
+ // Validate that the result pattern has the corrent number and types
+ // of arguments for the instruction it references.
+ DagInit *Dag = Rec->getValueAsDag("ResultInst");
+ assert(Dag && "Missing result instruction in pseudo expansion!");
+ LLVM_DEBUG(dbgs() << " Result: " << *Dag << "\n");
+
+ DefInit *OpDef = dyn_cast<DefInit>(Dag->getOperator());
+ if (!OpDef) {
+ PrintError(Rec, "In pseudo instruction '" + Rec->getName() +
+ "', result operator is not a record");
+ PrintFatalNote(Rec->getValue("ResultInst"),
+ "Result was assigned at the following location:");
+ }
+ Record *Operator = OpDef->getDef();
+ if (!Operator->isSubClassOf("Instruction")) {
+ PrintError(Rec, "In pseudo instruction '" + Rec->getName() +
+ "', result operator '" + Operator->getName() +
+ "' is not an instruction");
+ PrintFatalNote(Rec->getValue("ResultInst"),
+ "Result was assigned at the following location:");
+ }
+
+ CodeGenInstruction Insn(Operator);
+
+ if (Insn.isCodeGenOnly || Insn.isPseudo) {
+ PrintError(Rec, "In pseudo instruction '" + Rec->getName() +
+ "', result operator '" + Operator->getName() +
+ "' cannot be a pseudo instruction");
+ PrintFatalNote(Rec->getValue("ResultInst"),
+ "Result was assigned at the following location:");
+ }
+
+ if (Insn.Operands.size() != Dag->getNumArgs()) {
+ PrintError(Rec, "In pseudo instruction '" + Rec->getName() +
+ "', result operator '" + Operator->getName() +
+ "' has the wrong number of operands");
+ PrintFatalNote(Rec->getValue("ResultInst"),
+ "Result was assigned at the following location:");
+ }
+
+ unsigned NumMIOperands = 0;
+ for (unsigned i = 0, e = Insn.Operands.size(); i != e; ++i)
+ NumMIOperands += Insn.Operands[i].MINumOperands;
+ IndexedMap<OpData> OperandMap;
+ OperandMap.grow(NumMIOperands);
+
+ addDagOperandMapping(Rec, Dag, Insn, OperandMap, 0);
+
+ // If there are more operands that weren't in the DAG, they have to
+ // be operands that have default values, or we have an error. Currently,
+ // Operands that are a subclass of OperandWithDefaultOp have default values.
+
+ // Validate that each result pattern argument has a matching (by name)
+ // argument in the source instruction, in either the (outs) or (ins) list.
+ // Also check that the type of the arguments match.
+ //
+ // Record the mapping of the source to result arguments for use by
+ // the lowering emitter.
+ CodeGenInstruction SourceInsn(Rec);
+ StringMap<unsigned> SourceOperands;
+ for (unsigned i = 0, e = SourceInsn.Operands.size(); i != e; ++i)
+ SourceOperands[SourceInsn.Operands[i].Name] = i;
+
+ LLVM_DEBUG(dbgs() << " Operand mapping:\n");
+ for (unsigned i = 0, e = Insn.Operands.size(); i != e; ++i) {
+ // We've already handled constant values. Just map instruction operands
+ // here.
+ if (OperandMap[Insn.Operands[i].MIOperandNo].Kind != OpData::Operand)
+ continue;
+ StringMap<unsigned>::iterator SourceOp =
+ SourceOperands.find(Dag->getArgNameStr(i));
+ if (SourceOp == SourceOperands.end()) {
+ PrintError(Rec, "In pseudo instruction '" + Rec->getName() +
+ "', output operand '" + Dag->getArgNameStr(i) +
+ "' has no matching source operand");
+ PrintFatalNote(Rec->getValue("ResultInst"),
+ "Value was assigned at the following location:");
+ }
+ // Map the source operand to the destination operand index for each
+ // MachineInstr operand.
+ for (unsigned I = 0, E = Insn.Operands[i].MINumOperands; I != E; ++I)
+ OperandMap[Insn.Operands[i].MIOperandNo + I].Data.Operand =
+ SourceOp->getValue();
+
+ LLVM_DEBUG(dbgs() << " " << SourceOp->getValue() << " ==> " << i
+ << "\n");
+ }
+
+ Expansions.push_back(PseudoExpansion(SourceInsn, Insn, OperandMap));
+}
+
+void PseudoLoweringEmitter::emitLoweringEmitter(raw_ostream &o) {
+ // Emit file header.
+ emitSourceFileHeader("Pseudo-instruction MC lowering Source Fragment", o);
+
+ o << "bool " << Target.getName() + "AsmPrinter" << "::\n"
+ << "emitPseudoExpansionLowering(MCStreamer &OutStreamer,\n"
+ << " const MachineInstr *MI) {\n";
+
+ if (!Expansions.empty()) {
+ o << " switch (MI->getOpcode()) {\n"
+ << " default: return false;\n";
+ for (auto &Expansion : Expansions) {
+ CodeGenInstruction &Source = Expansion.Source;
+ CodeGenInstruction &Dest = Expansion.Dest;
+ o << " case " << Source.Namespace << "::"
+ << Source.TheDef->getName() << ": {\n"
+ << " MCInst TmpInst;\n"
+ << " MCOperand MCOp;\n"
+ << " TmpInst.setOpcode(" << Dest.Namespace << "::"
+ << Dest.TheDef->getName() << ");\n";
+
+ // Copy the operands from the source instruction.
+ // FIXME: Instruction operands with defaults values (predicates and cc_out
+ // in ARM, for example shouldn't need explicit values in the
+ // expansion DAG.
+ unsigned MIOpNo = 0;
+ for (const auto &DestOperand : Dest.Operands) {
+ o << " // Operand: " << DestOperand.Name << "\n";
+ for (unsigned i = 0, e = DestOperand.MINumOperands; i != e; ++i) {
+ switch (Expansion.OperandMap[MIOpNo + i].Kind) {
+ case OpData::Operand:
+ o << " lowerOperand(MI->getOperand("
+ << Source.Operands[Expansion.OperandMap[MIOpNo].Data
+ .Operand].MIOperandNo + i
+ << "), MCOp);\n"
+ << " TmpInst.addOperand(MCOp);\n";
+ break;
+ case OpData::Imm:
+ o << " TmpInst.addOperand(MCOperand::createImm("
+ << Expansion.OperandMap[MIOpNo + i].Data.Imm << "));\n";
+ break;
+ case OpData::Reg: {
+ Record *Reg = Expansion.OperandMap[MIOpNo + i].Data.Reg;
+ o << " TmpInst.addOperand(MCOperand::createReg(";
+ // "zero_reg" is special.
+ if (Reg->getName() == "zero_reg")
+ o << "0";
+ else
+ o << Reg->getValueAsString("Namespace") << "::"
+ << Reg->getName();
+ o << "));\n";
+ break;
+ }
+ }
+ }
+ MIOpNo += DestOperand.MINumOperands;
+ }
+ if (Dest.Operands.isVariadic) {
+ MIOpNo = Source.Operands.size() + 1;
+ o << " // variable_ops\n";
+ o << " for (unsigned i = " << MIOpNo
+ << ", e = MI->getNumOperands(); i != e; ++i)\n"
+ << " if (lowerOperand(MI->getOperand(i), MCOp))\n"
+ << " TmpInst.addOperand(MCOp);\n";
+ }
+ o << " EmitToStreamer(OutStreamer, TmpInst);\n"
+ << " break;\n"
+ << " }\n";
+ }
+ o << " }\n return true;";
+ } else
+ o << " return false;";
+
+ o << "\n}\n\n";
+}
+
+void PseudoLoweringEmitter::run(raw_ostream &o) {
+ StringRef Classes[] = {"PseudoInstExpansion", "Instruction"};
+ std::vector<Record *> Insts = Records.getAllDerivedDefinitions(Classes);
+
+ // Process the pseudo expansion definitions, validating them as we do so.
+ Records.startTimer("Process definitions");
+ for (unsigned i = 0, e = Insts.size(); i != e; ++i)
+ evaluateExpansion(Insts[i]);
+
+ // Generate expansion code to lower the pseudo to an MCInst of the real
+ // instruction.
+ Records.startTimer("Emit expansion code");
+ emitLoweringEmitter(o);
+}
+
+namespace llvm {
+
+void EmitPseudoLowering(RecordKeeper &RK, raw_ostream &OS) {
+ PseudoLoweringEmitter(RK).run(OS);
+}
+
+} // End llvm namespace
diff --git a/contrib/libs/llvm16/utils/TableGen/RISCVTargetDefEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/RISCVTargetDefEmitter.cpp
new file mode 100644
index 0000000000..fa6508cbfc
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/RISCVTargetDefEmitter.cpp
@@ -0,0 +1,82 @@
+//===- RISCVTargetDefEmitter.cpp - Generate lists of RISCV CPUs -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend emits the include file needed by the target
+// parser to parse the RISC-V CPUs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "TableGenBackends.h"
+#include "llvm/Support/RISCVISAInfo.h"
+#include "llvm/TableGen/Record.h"
+
+using namespace llvm;
+
+using ISAInfoTy = llvm::Expected<std::unique_ptr<RISCVISAInfo>>;
+
+// We can generate march string from target features as what has been described
+// in RISCV ISA specification (version 20191213) 'Chapter 27. ISA Extension
+// Naming Conventions'.
+//
+// This is almost the same as RISCVFeatures::parseFeatureBits, except that we
+// get feature name from feature records instead of feature bits.
+static std::string getMArch(const Record &Rec) {
+ std::vector<std::string> FeatureVector;
+ int XLen = 32;
+
+ // Convert features to FeatureVector.
+ for (auto *Feature : Rec.getValueAsListOfDefs("Features")) {
+ StringRef FeatureName = Feature->getValueAsString("Name");
+ if (llvm::RISCVISAInfo::isSupportedExtensionFeature(FeatureName))
+ FeatureVector.push_back((Twine("+") + FeatureName).str());
+ else if (FeatureName == "64bit")
+ XLen = 64;
+ }
+
+ ISAInfoTy ISAInfo = llvm::RISCVISAInfo::parseFeatures(XLen, FeatureVector);
+ if (!ISAInfo)
+ report_fatal_error("Invalid features");
+
+ // RISCVISAInfo::toString will generate a march string with all the extensions
+ // we have added to it.
+ return (*ISAInfo)->toString();
+}
+
+void llvm::EmitRISCVTargetDef(const RecordKeeper &RK, raw_ostream &OS) {
+ OS << "#ifndef PROC\n"
+ << "#define PROC(ENUM, NAME, DEFAULT_MARCH)\n"
+ << "#endif\n\n";
+
+ OS << "PROC(INVALID, {\"invalid\"}, {\"\"})\n";
+ // Iterate on all definition records.
+ for (const Record *Rec : RK.getAllDerivedDefinitions("RISCVProcessorModel")) {
+ std::string MArch = Rec->getValueAsString("DefaultMarch").str();
+
+ // Compute MArch from features if we don't specify it.
+ if (MArch.empty())
+ MArch = getMArch(*Rec);
+
+ OS << "PROC(" << Rec->getName() << ", "
+ << "{\"" << Rec->getValueAsString("Name") << "\"}, "
+ << "{\"" << MArch << "\"})\n";
+ }
+ OS << "\n#undef PROC\n";
+ OS << "\n";
+ OS << "#ifndef TUNE_PROC\n"
+ << "#define TUNE_PROC(ENUM, NAME)\n"
+ << "#endif\n\n";
+ OS << "TUNE_PROC(GENERIC, \"generic\")\n";
+
+ for (const Record *Rec :
+ RK.getAllDerivedDefinitions("RISCVTuneProcessorModel")) {
+ OS << "TUNE_PROC(" << Rec->getName() << ", "
+ << "\"" << Rec->getValueAsString("Name") << "\")\n";
+ }
+
+ OS << "\n#undef TUNE_PROC\n";
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/RegisterBankEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/RegisterBankEmitter.cpp
new file mode 100644
index 0000000000..e6689b211a
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/RegisterBankEmitter.cpp
@@ -0,0 +1,336 @@
+//===- RegisterBankEmitter.cpp - Generate a Register Bank Desc. -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend is responsible for emitting a description of a target
+// register bank for a code generator.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+
+#include "CodeGenRegisters.h"
+#include "CodeGenTarget.h"
+
+#define DEBUG_TYPE "register-bank-emitter"
+
+using namespace llvm;
+
+namespace {
+class RegisterBank {
+
+ /// A vector of register classes that are included in the register bank.
+ typedef std::vector<const CodeGenRegisterClass *> RegisterClassesTy;
+
+private:
+ const Record &TheDef;
+
+ /// The register classes that are covered by the register bank.
+ RegisterClassesTy RCs;
+
+ /// The register class with the largest register size.
+ const CodeGenRegisterClass *RCWithLargestRegsSize;
+
+public:
+ RegisterBank(const Record &TheDef)
+ : TheDef(TheDef), RCWithLargestRegsSize(nullptr) {}
+
+ /// Get the human-readable name for the bank.
+ StringRef getName() const { return TheDef.getValueAsString("Name"); }
+ /// Get the name of the enumerator in the ID enumeration.
+ std::string getEnumeratorName() const { return (TheDef.getName() + "ID").str(); }
+
+ /// Get the name of the array holding the register class coverage data;
+ std::string getCoverageArrayName() const {
+ return (TheDef.getName() + "CoverageData").str();
+ }
+
+ /// Get the name of the global instance variable.
+ StringRef getInstanceVarName() const { return TheDef.getName(); }
+
+ const Record &getDef() const { return TheDef; }
+
+ /// Get the register classes listed in the RegisterBank.RegisterClasses field.
+ std::vector<const CodeGenRegisterClass *>
+ getExplicitlySpecifiedRegisterClasses(
+ const CodeGenRegBank &RegisterClassHierarchy) const {
+ std::vector<const CodeGenRegisterClass *> RCs;
+ for (const auto *RCDef : getDef().getValueAsListOfDefs("RegisterClasses"))
+ RCs.push_back(RegisterClassHierarchy.getRegClass(RCDef));
+ return RCs;
+ }
+
+ /// Add a register class to the bank without duplicates.
+ void addRegisterClass(const CodeGenRegisterClass *RC) {
+ if (llvm::is_contained(RCs, RC))
+ return;
+
+ // FIXME? We really want the register size rather than the spill size
+ // since the spill size may be bigger on some targets with
+ // limited load/store instructions. However, we don't store the
+ // register size anywhere (we could sum the sizes of the subregisters
+ // but there may be additional bits too) and we can't derive it from
+ // the VT's reliably due to Untyped.
+ if (RCWithLargestRegsSize == nullptr)
+ RCWithLargestRegsSize = RC;
+ else if (RCWithLargestRegsSize->RSI.get(DefaultMode).SpillSize <
+ RC->RSI.get(DefaultMode).SpillSize)
+ RCWithLargestRegsSize = RC;
+ assert(RCWithLargestRegsSize && "RC was nullptr?");
+
+ RCs.emplace_back(RC);
+ }
+
+ const CodeGenRegisterClass *getRCWithLargestRegsSize() const {
+ return RCWithLargestRegsSize;
+ }
+
+ iterator_range<typename RegisterClassesTy::const_iterator>
+ register_classes() const {
+ return llvm::make_range(RCs.begin(), RCs.end());
+ }
+};
+
+class RegisterBankEmitter {
+private:
+ CodeGenTarget Target;
+ RecordKeeper &Records;
+
+ void emitHeader(raw_ostream &OS, const StringRef TargetName,
+ const std::vector<RegisterBank> &Banks);
+ void emitBaseClassDefinition(raw_ostream &OS, const StringRef TargetName,
+ const std::vector<RegisterBank> &Banks);
+ void emitBaseClassImplementation(raw_ostream &OS, const StringRef TargetName,
+ std::vector<RegisterBank> &Banks);
+
+public:
+ RegisterBankEmitter(RecordKeeper &R) : Target(R), Records(R) {}
+
+ void run(raw_ostream &OS);
+};
+
+} // end anonymous namespace
+
+/// Emit code to declare the ID enumeration and external global instance
+/// variables.
+void RegisterBankEmitter::emitHeader(raw_ostream &OS,
+ const StringRef TargetName,
+ const std::vector<RegisterBank> &Banks) {
+ // <Target>RegisterBankInfo.h
+ OS << "namespace llvm {\n"
+ << "namespace " << TargetName << " {\n"
+ << "enum : unsigned {\n";
+
+ OS << " InvalidRegBankID = ~0u,\n";
+ unsigned ID = 0;
+ for (const auto &Bank : Banks)
+ OS << " " << Bank.getEnumeratorName() << " = " << ID++ << ",\n";
+ OS << " NumRegisterBanks,\n"
+ << "};\n"
+ << "} // end namespace " << TargetName << "\n"
+ << "} // end namespace llvm\n";
+}
+
+/// Emit declarations of the <Target>GenRegisterBankInfo class.
+void RegisterBankEmitter::emitBaseClassDefinition(
+ raw_ostream &OS, const StringRef TargetName,
+ const std::vector<RegisterBank> &Banks) {
+ OS << "private:\n"
+ << " static RegisterBank *RegBanks[];\n\n"
+ << "protected:\n"
+ << " " << TargetName << "GenRegisterBankInfo();\n"
+ << "\n";
+}
+
+/// Visit each register class belonging to the given register bank.
+///
+/// A class belongs to the bank iff any of these apply:
+/// * It is explicitly specified
+/// * It is a subclass of a class that is a member.
+/// * It is a class containing subregisters of the registers of a class that
+/// is a member. This is known as a subreg-class.
+///
+/// This function must be called for each explicitly specified register class.
+///
+/// \param RC The register class to search.
+/// \param Kind A debug string containing the path the visitor took to reach RC.
+/// \param VisitFn The action to take for each class visited. It may be called
+/// multiple times for a given class if there are multiple paths
+/// to the class.
+static void visitRegisterBankClasses(
+ const CodeGenRegBank &RegisterClassHierarchy,
+ const CodeGenRegisterClass *RC, const Twine &Kind,
+ std::function<void(const CodeGenRegisterClass *, StringRef)> VisitFn,
+ SmallPtrSetImpl<const CodeGenRegisterClass *> &VisitedRCs) {
+
+ // Make sure we only visit each class once to avoid infinite loops.
+ if (!VisitedRCs.insert(RC).second)
+ return;
+
+ // Visit each explicitly named class.
+ VisitFn(RC, Kind.str());
+
+ for (const auto &PossibleSubclass : RegisterClassHierarchy.getRegClasses()) {
+ std::string TmpKind =
+ (Kind + " (" + PossibleSubclass.getName() + ")").str();
+
+ // Visit each subclass of an explicitly named class.
+ if (RC != &PossibleSubclass && RC->hasSubClass(&PossibleSubclass))
+ visitRegisterBankClasses(RegisterClassHierarchy, &PossibleSubclass,
+ TmpKind + " " + RC->getName() + " subclass",
+ VisitFn, VisitedRCs);
+
+ // Visit each class that contains only subregisters of RC with a common
+ // subregister-index.
+ //
+ // More precisely, PossibleSubclass is a subreg-class iff Reg:SubIdx is in
+ // PossibleSubclass for all registers Reg from RC using any
+ // subregister-index SubReg
+ for (const auto &SubIdx : RegisterClassHierarchy.getSubRegIndices()) {
+ BitVector BV(RegisterClassHierarchy.getRegClasses().size());
+ PossibleSubclass.getSuperRegClasses(&SubIdx, BV);
+ if (BV.test(RC->EnumValue)) {
+ std::string TmpKind2 = (Twine(TmpKind) + " " + RC->getName() +
+ " class-with-subregs: " + RC->getName())
+ .str();
+ VisitFn(&PossibleSubclass, TmpKind2);
+ }
+ }
+ }
+}
+
+void RegisterBankEmitter::emitBaseClassImplementation(
+ raw_ostream &OS, StringRef TargetName,
+ std::vector<RegisterBank> &Banks) {
+ const CodeGenRegBank &RegisterClassHierarchy = Target.getRegBank();
+
+ OS << "namespace llvm {\n"
+ << "namespace " << TargetName << " {\n";
+ for (const auto &Bank : Banks) {
+ std::vector<std::vector<const CodeGenRegisterClass *>> RCsGroupedByWord(
+ (RegisterClassHierarchy.getRegClasses().size() + 31) / 32);
+
+ for (const auto &RC : Bank.register_classes())
+ RCsGroupedByWord[RC->EnumValue / 32].push_back(RC);
+
+ OS << "const uint32_t " << Bank.getCoverageArrayName() << "[] = {\n";
+ unsigned LowestIdxInWord = 0;
+ for (const auto &RCs : RCsGroupedByWord) {
+ OS << " // " << LowestIdxInWord << "-" << (LowestIdxInWord + 31) << "\n";
+ for (const auto &RC : RCs) {
+ std::string QualifiedRegClassID =
+ (Twine(RC->Namespace) + "::" + RC->getName() + "RegClassID").str();
+ OS << " (1u << (" << QualifiedRegClassID << " - "
+ << LowestIdxInWord << ")) |\n";
+ }
+ OS << " 0,\n";
+ LowestIdxInWord += 32;
+ }
+ OS << "};\n";
+ }
+ OS << "\n";
+
+ for (const auto &Bank : Banks) {
+ std::string QualifiedBankID =
+ (TargetName + "::" + Bank.getEnumeratorName()).str();
+ const CodeGenRegisterClass &RC = *Bank.getRCWithLargestRegsSize();
+ unsigned Size = RC.RSI.get(DefaultMode).SpillSize;
+ OS << "RegisterBank " << Bank.getInstanceVarName() << "(/* ID */ "
+ << QualifiedBankID << ", /* Name */ \"" << Bank.getName()
+ << "\", /* Size */ " << Size << ", "
+ << "/* CoveredRegClasses */ " << Bank.getCoverageArrayName()
+ << ", /* NumRegClasses */ "
+ << RegisterClassHierarchy.getRegClasses().size() << ");\n";
+ }
+ OS << "} // end namespace " << TargetName << "\n"
+ << "\n";
+
+ OS << "RegisterBank *" << TargetName
+ << "GenRegisterBankInfo::RegBanks[] = {\n";
+ for (const auto &Bank : Banks)
+ OS << " &" << TargetName << "::" << Bank.getInstanceVarName() << ",\n";
+ OS << "};\n\n";
+
+ OS << TargetName << "GenRegisterBankInfo::" << TargetName
+ << "GenRegisterBankInfo()\n"
+ << " : RegisterBankInfo(RegBanks, " << TargetName
+ << "::NumRegisterBanks) {\n"
+ << " // Assert that RegBank indices match their ID's\n"
+ << "#ifndef NDEBUG\n"
+ << " for (auto RB : enumerate(RegBanks))\n"
+ << " assert(RB.index() == RB.value()->getID() && \"Index != ID\");\n"
+ << "#endif // NDEBUG\n"
+ << "}\n"
+ << "} // end namespace llvm\n";
+}
+
+void RegisterBankEmitter::run(raw_ostream &OS) {
+ StringRef TargetName = Target.getName();
+ const CodeGenRegBank &RegisterClassHierarchy = Target.getRegBank();
+
+ Records.startTimer("Analyze records");
+ std::vector<RegisterBank> Banks;
+ for (const auto &V : Records.getAllDerivedDefinitions("RegisterBank")) {
+ SmallPtrSet<const CodeGenRegisterClass *, 8> VisitedRCs;
+ RegisterBank Bank(*V);
+
+ for (const CodeGenRegisterClass *RC :
+ Bank.getExplicitlySpecifiedRegisterClasses(RegisterClassHierarchy)) {
+ visitRegisterBankClasses(
+ RegisterClassHierarchy, RC, "explicit",
+ [&Bank](const CodeGenRegisterClass *RC, StringRef Kind) {
+ LLVM_DEBUG(dbgs()
+ << "Added " << RC->getName() << "(" << Kind << ")\n");
+ Bank.addRegisterClass(RC);
+ },
+ VisitedRCs);
+ }
+
+ Banks.push_back(Bank);
+ }
+
+ // Warn about ambiguous MIR caused by register bank/class name clashes.
+ Records.startTimer("Warn ambiguous");
+ for (const auto &Class : RegisterClassHierarchy.getRegClasses()) {
+ for (const auto &Bank : Banks) {
+ if (Bank.getName().lower() == StringRef(Class.getName()).lower()) {
+ PrintWarning(Bank.getDef().getLoc(), "Register bank names should be "
+ "distinct from register classes "
+ "to avoid ambiguous MIR");
+ PrintNote(Bank.getDef().getLoc(), "RegisterBank was declared here");
+ PrintNote(Class.getDef()->getLoc(), "RegisterClass was declared here");
+ }
+ }
+ }
+
+ Records.startTimer("Emit output");
+ emitSourceFileHeader("Register Bank Source Fragments", OS);
+ OS << "#ifdef GET_REGBANK_DECLARATIONS\n"
+ << "#undef GET_REGBANK_DECLARATIONS\n";
+ emitHeader(OS, TargetName, Banks);
+ OS << "#endif // GET_REGBANK_DECLARATIONS\n\n"
+ << "#ifdef GET_TARGET_REGBANK_CLASS\n"
+ << "#undef GET_TARGET_REGBANK_CLASS\n";
+ emitBaseClassDefinition(OS, TargetName, Banks);
+ OS << "#endif // GET_TARGET_REGBANK_CLASS\n\n"
+ << "#ifdef GET_TARGET_REGBANK_IMPL\n"
+ << "#undef GET_TARGET_REGBANK_IMPL\n";
+ emitBaseClassImplementation(OS, TargetName, Banks);
+ OS << "#endif // GET_TARGET_REGBANK_IMPL\n";
+}
+
+namespace llvm {
+
+void EmitRegisterBank(RecordKeeper &RK, raw_ostream &OS) {
+ RegisterBankEmitter(RK).run(OS);
+}
+
+} // end namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/RegisterInfoEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/RegisterInfoEmitter.cpp
new file mode 100644
index 0000000000..113cebf8a0
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/RegisterInfoEmitter.cpp
@@ -0,0 +1,1915 @@
+//===- RegisterInfoEmitter.cpp - Generate a Register File Desc. -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend is responsible for emitting a description of a target
+// register file for a code generator. It uses instances of the Register,
+// RegisterAliases, and RegisterClass classes to gather this information.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenRegisters.h"
+#include "CodeGenTarget.h"
+#include "SequenceToOffsetTable.h"
+#include "Types.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SparseBitVector.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MachineValueType.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/SetTheory.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <deque>
+#include <iterator>
+#include <set>
+#include <string>
+#include <vector>
+
+using namespace llvm;
+
+cl::OptionCategory RegisterInfoCat("Options for -gen-register-info");
+
+static cl::opt<bool>
+ RegisterInfoDebug("register-info-debug", cl::init(false),
+ cl::desc("Dump register information to help debugging"),
+ cl::cat(RegisterInfoCat));
+
+namespace {
+
+class RegisterInfoEmitter {
+ CodeGenTarget Target;
+ RecordKeeper &Records;
+
+public:
+ RegisterInfoEmitter(RecordKeeper &R) : Target(R), Records(R) {
+ CodeGenRegBank &RegBank = Target.getRegBank();
+ RegBank.computeDerivedInfo();
+ }
+
+ // runEnums - Print out enum values for all of the registers.
+ void runEnums(raw_ostream &o, CodeGenTarget &Target, CodeGenRegBank &Bank);
+
+ // runMCDesc - Print out MC register descriptions.
+ void runMCDesc(raw_ostream &o, CodeGenTarget &Target, CodeGenRegBank &Bank);
+
+ // runTargetHeader - Emit a header fragment for the register info emitter.
+ void runTargetHeader(raw_ostream &o, CodeGenTarget &Target,
+ CodeGenRegBank &Bank);
+
+ // runTargetDesc - Output the target register and register file descriptions.
+ void runTargetDesc(raw_ostream &o, CodeGenTarget &Target,
+ CodeGenRegBank &Bank);
+
+ // run - Output the register file description.
+ void run(raw_ostream &o);
+
+ void debugDump(raw_ostream &OS);
+
+private:
+ void EmitRegMapping(raw_ostream &o, const std::deque<CodeGenRegister> &Regs,
+ bool isCtor);
+ void EmitRegMappingTables(raw_ostream &o,
+ const std::deque<CodeGenRegister> &Regs,
+ bool isCtor);
+ void EmitRegUnitPressure(raw_ostream &OS, const CodeGenRegBank &RegBank,
+ const std::string &ClassName);
+ void emitComposeSubRegIndices(raw_ostream &OS, CodeGenRegBank &RegBank,
+ const std::string &ClassName);
+ void emitComposeSubRegIndexLaneMask(raw_ostream &OS, CodeGenRegBank &RegBank,
+ const std::string &ClassName);
+};
+
+} // end anonymous namespace
+
+// runEnums - Print out enum values for all of the registers.
+void RegisterInfoEmitter::runEnums(raw_ostream &OS,
+ CodeGenTarget &Target, CodeGenRegBank &Bank) {
+ const auto &Registers = Bank.getRegisters();
+
+ // Register enums are stored as uint16_t in the tables. Make sure we'll fit.
+ assert(Registers.size() <= 0xffff && "Too many regs to fit in tables");
+
+ StringRef Namespace = Registers.front().TheDef->getValueAsString("Namespace");
+
+ emitSourceFileHeader("Target Register Enum Values", OS);
+
+ OS << "\n#ifdef GET_REGINFO_ENUM\n";
+ OS << "#undef GET_REGINFO_ENUM\n\n";
+
+ OS << "namespace llvm {\n\n";
+
+ OS << "class MCRegisterClass;\n"
+ << "extern const MCRegisterClass " << Target.getName()
+ << "MCRegisterClasses[];\n\n";
+
+ if (!Namespace.empty())
+ OS << "namespace " << Namespace << " {\n";
+ OS << "enum {\n NoRegister,\n";
+
+ for (const auto &Reg : Registers)
+ OS << " " << Reg.getName() << " = " << Reg.EnumValue << ",\n";
+ assert(Registers.size() == Registers.back().EnumValue &&
+ "Register enum value mismatch!");
+ OS << " NUM_TARGET_REGS // " << Registers.size()+1 << "\n";
+ OS << "};\n";
+ if (!Namespace.empty())
+ OS << "} // end namespace " << Namespace << "\n";
+
+ const auto &RegisterClasses = Bank.getRegClasses();
+ if (!RegisterClasses.empty()) {
+
+ // RegisterClass enums are stored as uint16_t in the tables.
+ assert(RegisterClasses.size() <= 0xffff &&
+ "Too many register classes to fit in tables");
+
+ OS << "\n// Register classes\n\n";
+ if (!Namespace.empty())
+ OS << "namespace " << Namespace << " {\n";
+ OS << "enum {\n";
+ for (const auto &RC : RegisterClasses)
+ OS << " " << RC.getName() << "RegClassID"
+ << " = " << RC.EnumValue << ",\n";
+ OS << "\n};\n";
+ if (!Namespace.empty())
+ OS << "} // end namespace " << Namespace << "\n\n";
+ }
+
+ const std::vector<Record*> &RegAltNameIndices = Target.getRegAltNameIndices();
+ // If the only definition is the default NoRegAltName, we don't need to
+ // emit anything.
+ if (RegAltNameIndices.size() > 1) {
+ OS << "\n// Register alternate name indices\n\n";
+ if (!Namespace.empty())
+ OS << "namespace " << Namespace << " {\n";
+ OS << "enum {\n";
+ for (unsigned i = 0, e = RegAltNameIndices.size(); i != e; ++i)
+ OS << " " << RegAltNameIndices[i]->getName() << ",\t// " << i << "\n";
+ OS << " NUM_TARGET_REG_ALT_NAMES = " << RegAltNameIndices.size() << "\n";
+ OS << "};\n";
+ if (!Namespace.empty())
+ OS << "} // end namespace " << Namespace << "\n\n";
+ }
+
+ auto &SubRegIndices = Bank.getSubRegIndices();
+ if (!SubRegIndices.empty()) {
+ OS << "\n// Subregister indices\n\n";
+ std::string Namespace = SubRegIndices.front().getNamespace();
+ if (!Namespace.empty())
+ OS << "namespace " << Namespace << " {\n";
+ OS << "enum : uint16_t {\n NoSubRegister,\n";
+ unsigned i = 0;
+ for (const auto &Idx : SubRegIndices)
+ OS << " " << Idx.getName() << ",\t// " << ++i << "\n";
+ OS << " NUM_TARGET_SUBREGS\n};\n";
+ if (!Namespace.empty())
+ OS << "} // end namespace " << Namespace << "\n\n";
+ }
+
+ OS << "// Register pressure sets enum.\n";
+ if (!Namespace.empty())
+ OS << "namespace " << Namespace << " {\n";
+ OS << "enum RegisterPressureSets {\n";
+ unsigned NumSets = Bank.getNumRegPressureSets();
+ for (unsigned i = 0; i < NumSets; ++i ) {
+ const RegUnitSet &RegUnits = Bank.getRegSetAt(i);
+ OS << " " << RegUnits.Name << " = " << i << ",\n";
+ }
+ OS << "};\n";
+ if (!Namespace.empty())
+ OS << "} // end namespace " << Namespace << '\n';
+ OS << '\n';
+
+ OS << "} // end namespace llvm\n\n";
+ OS << "#endif // GET_REGINFO_ENUM\n\n";
+}
+
+static void printInt(raw_ostream &OS, int Val) {
+ OS << Val;
+}
+
+void RegisterInfoEmitter::
+EmitRegUnitPressure(raw_ostream &OS, const CodeGenRegBank &RegBank,
+ const std::string &ClassName) {
+ unsigned NumRCs = RegBank.getRegClasses().size();
+ unsigned NumSets = RegBank.getNumRegPressureSets();
+
+ OS << "/// Get the weight in units of pressure for this register class.\n"
+ << "const RegClassWeight &" << ClassName << "::\n"
+ << "getRegClassWeight(const TargetRegisterClass *RC) const {\n"
+ << " static const RegClassWeight RCWeightTable[] = {\n";
+ for (const auto &RC : RegBank.getRegClasses()) {
+ const CodeGenRegister::Vec &Regs = RC.getMembers();
+ OS << " {" << RC.getWeight(RegBank) << ", ";
+ if (Regs.empty() || RC.Artificial)
+ OS << '0';
+ else {
+ std::vector<unsigned> RegUnits;
+ RC.buildRegUnitSet(RegBank, RegUnits);
+ OS << RegBank.getRegUnitSetWeight(RegUnits);
+ }
+ OS << "}, \t// " << RC.getName() << "\n";
+ }
+ OS << " };\n"
+ << " return RCWeightTable[RC->getID()];\n"
+ << "}\n\n";
+
+ // Reasonable targets (not ARMv7) have unit weight for all units, so don't
+ // bother generating a table.
+ bool RegUnitsHaveUnitWeight = true;
+ for (unsigned UnitIdx = 0, UnitEnd = RegBank.getNumNativeRegUnits();
+ UnitIdx < UnitEnd; ++UnitIdx) {
+ if (RegBank.getRegUnit(UnitIdx).Weight > 1)
+ RegUnitsHaveUnitWeight = false;
+ }
+ OS << "/// Get the weight in units of pressure for this register unit.\n"
+ << "unsigned " << ClassName << "::\n"
+ << "getRegUnitWeight(unsigned RegUnit) const {\n"
+ << " assert(RegUnit < " << RegBank.getNumNativeRegUnits()
+ << " && \"invalid register unit\");\n";
+ if (!RegUnitsHaveUnitWeight) {
+ OS << " static const uint8_t RUWeightTable[] = {\n ";
+ for (unsigned UnitIdx = 0, UnitEnd = RegBank.getNumNativeRegUnits();
+ UnitIdx < UnitEnd; ++UnitIdx) {
+ const RegUnit &RU = RegBank.getRegUnit(UnitIdx);
+ assert(RU.Weight < 256 && "RegUnit too heavy");
+ OS << RU.Weight << ", ";
+ }
+ OS << "};\n"
+ << " return RUWeightTable[RegUnit];\n";
+ }
+ else {
+ OS << " // All register units have unit weight.\n"
+ << " return 1;\n";
+ }
+ OS << "}\n\n";
+
+ OS << "\n"
+ << "// Get the number of dimensions of register pressure.\n"
+ << "unsigned " << ClassName << "::getNumRegPressureSets() const {\n"
+ << " return " << NumSets << ";\n}\n\n";
+
+ OS << "// Get the name of this register unit pressure set.\n"
+ << "const char *" << ClassName << "::\n"
+ << "getRegPressureSetName(unsigned Idx) const {\n"
+ << " static const char *PressureNameTable[] = {\n";
+ unsigned MaxRegUnitWeight = 0;
+ for (unsigned i = 0; i < NumSets; ++i ) {
+ const RegUnitSet &RegUnits = RegBank.getRegSetAt(i);
+ MaxRegUnitWeight = std::max(MaxRegUnitWeight, RegUnits.Weight);
+ OS << " \"" << RegUnits.Name << "\",\n";
+ }
+ OS << " };\n"
+ << " return PressureNameTable[Idx];\n"
+ << "}\n\n";
+
+ OS << "// Get the register unit pressure limit for this dimension.\n"
+ << "// This limit must be adjusted dynamically for reserved registers.\n"
+ << "unsigned " << ClassName << "::\n"
+ << "getRegPressureSetLimit(const MachineFunction &MF, unsigned Idx) const "
+ "{\n"
+ << " static const " << getMinimalTypeForRange(MaxRegUnitWeight, 32)
+ << " PressureLimitTable[] = {\n";
+ for (unsigned i = 0; i < NumSets; ++i ) {
+ const RegUnitSet &RegUnits = RegBank.getRegSetAt(i);
+ OS << " " << RegUnits.Weight << ", \t// " << i << ": "
+ << RegUnits.Name << "\n";
+ }
+ OS << " };\n"
+ << " return PressureLimitTable[Idx];\n"
+ << "}\n\n";
+
+ SequenceToOffsetTable<std::vector<int>> PSetsSeqs;
+
+ // This table may be larger than NumRCs if some register units needed a list
+ // of unit sets that did not correspond to a register class.
+ unsigned NumRCUnitSets = RegBank.getNumRegClassPressureSetLists();
+ std::vector<std::vector<int>> PSets(NumRCUnitSets);
+
+ for (unsigned i = 0, e = NumRCUnitSets; i != e; ++i) {
+ ArrayRef<unsigned> PSetIDs = RegBank.getRCPressureSetIDs(i);
+ PSets[i].reserve(PSetIDs.size());
+ for (unsigned PSetID : PSetIDs) {
+ PSets[i].push_back(RegBank.getRegPressureSet(PSetID).Order);
+ }
+ llvm::sort(PSets[i]);
+ PSetsSeqs.add(PSets[i]);
+ }
+
+ PSetsSeqs.layout();
+
+ OS << "/// Table of pressure sets per register class or unit.\n"
+ << "static const int RCSetsTable[] = {\n";
+ PSetsSeqs.emit(OS, printInt, "-1");
+ OS << "};\n\n";
+
+ OS << "/// Get the dimensions of register pressure impacted by this "
+ << "register class.\n"
+ << "/// Returns a -1 terminated array of pressure set IDs\n"
+ << "const int *" << ClassName << "::\n"
+ << "getRegClassPressureSets(const TargetRegisterClass *RC) const {\n";
+ OS << " static const " << getMinimalTypeForRange(PSetsSeqs.size() - 1, 32)
+ << " RCSetStartTable[] = {\n ";
+ for (unsigned i = 0, e = NumRCs; i != e; ++i) {
+ OS << PSetsSeqs.get(PSets[i]) << ",";
+ }
+ OS << "};\n"
+ << " return &RCSetsTable[RCSetStartTable[RC->getID()]];\n"
+ << "}\n\n";
+
+ OS << "/// Get the dimensions of register pressure impacted by this "
+ << "register unit.\n"
+ << "/// Returns a -1 terminated array of pressure set IDs\n"
+ << "const int *" << ClassName << "::\n"
+ << "getRegUnitPressureSets(unsigned RegUnit) const {\n"
+ << " assert(RegUnit < " << RegBank.getNumNativeRegUnits()
+ << " && \"invalid register unit\");\n";
+ OS << " static const " << getMinimalTypeForRange(PSetsSeqs.size() - 1, 32)
+ << " RUSetStartTable[] = {\n ";
+ for (unsigned UnitIdx = 0, UnitEnd = RegBank.getNumNativeRegUnits();
+ UnitIdx < UnitEnd; ++UnitIdx) {
+ OS << PSetsSeqs.get(PSets[RegBank.getRegUnit(UnitIdx).RegClassUnitSetsIdx])
+ << ",";
+ }
+ OS << "};\n"
+ << " return &RCSetsTable[RUSetStartTable[RegUnit]];\n"
+ << "}\n\n";
+}
+
+using DwarfRegNumsMapPair = std::pair<Record*, std::vector<int64_t>>;
+using DwarfRegNumsVecTy = std::vector<DwarfRegNumsMapPair>;
+
+static void finalizeDwarfRegNumsKeys(DwarfRegNumsVecTy &DwarfRegNums) {
+ // Sort and unique to get a map-like vector. We want the last assignment to
+ // match previous behaviour.
+ llvm::stable_sort(DwarfRegNums, on_first<LessRecordRegister>());
+ // Warn about duplicate assignments.
+ const Record *LastSeenReg = nullptr;
+ for (const auto &X : DwarfRegNums) {
+ const auto &Reg = X.first;
+ // The only way LessRecordRegister can return equal is if they're the same
+ // string. Use simple equality instead.
+ if (LastSeenReg && Reg->getName() == LastSeenReg->getName())
+ PrintWarning(Reg->getLoc(), Twine("DWARF numbers for register ") +
+ getQualifiedName(Reg) +
+ "specified multiple times");
+ LastSeenReg = Reg;
+ }
+ auto Last = std::unique(
+ DwarfRegNums.begin(), DwarfRegNums.end(),
+ [](const DwarfRegNumsMapPair &A, const DwarfRegNumsMapPair &B) {
+ return A.first->getName() == B.first->getName();
+ });
+ DwarfRegNums.erase(Last, DwarfRegNums.end());
+}
+
+void RegisterInfoEmitter::EmitRegMappingTables(
+ raw_ostream &OS, const std::deque<CodeGenRegister> &Regs, bool isCtor) {
+ // Collect all information about dwarf register numbers
+ DwarfRegNumsVecTy DwarfRegNums;
+
+ // First, just pull all provided information to the map
+ unsigned maxLength = 0;
+ for (auto &RE : Regs) {
+ Record *Reg = RE.TheDef;
+ std::vector<int64_t> RegNums = Reg->getValueAsListOfInts("DwarfNumbers");
+ maxLength = std::max((size_t)maxLength, RegNums.size());
+ DwarfRegNums.emplace_back(Reg, std::move(RegNums));
+ }
+ finalizeDwarfRegNumsKeys(DwarfRegNums);
+
+ if (!maxLength)
+ return;
+
+ // Now we know maximal length of number list. Append -1's, where needed
+ for (auto &DwarfRegNum : DwarfRegNums)
+ for (unsigned I = DwarfRegNum.second.size(), E = maxLength; I != E; ++I)
+ DwarfRegNum.second.push_back(-1);
+
+ StringRef Namespace = Regs.front().TheDef->getValueAsString("Namespace");
+
+ OS << "// " << Namespace << " Dwarf<->LLVM register mappings.\n";
+
+ // Emit reverse information about the dwarf register numbers.
+ for (unsigned j = 0; j < 2; ++j) {
+ for (unsigned I = 0, E = maxLength; I != E; ++I) {
+ OS << "extern const MCRegisterInfo::DwarfLLVMRegPair " << Namespace;
+ OS << (j == 0 ? "DwarfFlavour" : "EHFlavour");
+ OS << I << "Dwarf2L[]";
+
+ if (!isCtor) {
+ OS << " = {\n";
+
+ // Store the mapping sorted by the LLVM reg num so lookup can be done
+ // with a binary search.
+ std::map<uint64_t, Record*> Dwarf2LMap;
+ for (auto &DwarfRegNum : DwarfRegNums) {
+ int DwarfRegNo = DwarfRegNum.second[I];
+ if (DwarfRegNo < 0)
+ continue;
+ Dwarf2LMap[DwarfRegNo] = DwarfRegNum.first;
+ }
+
+ for (auto &I : Dwarf2LMap)
+ OS << " { " << I.first << "U, " << getQualifiedName(I.second)
+ << " },\n";
+
+ OS << "};\n";
+ } else {
+ OS << ";\n";
+ }
+
+ // We have to store the size in a const global, it's used in multiple
+ // places.
+ OS << "extern const unsigned " << Namespace
+ << (j == 0 ? "DwarfFlavour" : "EHFlavour") << I << "Dwarf2LSize";
+ if (!isCtor)
+ OS << " = std::size(" << Namespace
+ << (j == 0 ? "DwarfFlavour" : "EHFlavour") << I << "Dwarf2L);\n\n";
+ else
+ OS << ";\n\n";
+ }
+ }
+
+ for (auto &RE : Regs) {
+ Record *Reg = RE.TheDef;
+ const RecordVal *V = Reg->getValue("DwarfAlias");
+ if (!V || !V->getValue())
+ continue;
+
+ DefInit *DI = cast<DefInit>(V->getValue());
+ Record *Alias = DI->getDef();
+ const auto &AliasIter = llvm::lower_bound(
+ DwarfRegNums, Alias, [](const DwarfRegNumsMapPair &A, const Record *B) {
+ return LessRecordRegister()(A.first, B);
+ });
+ assert(AliasIter != DwarfRegNums.end() && AliasIter->first == Alias &&
+ "Expected Alias to be present in map");
+ const auto &RegIter = llvm::lower_bound(
+ DwarfRegNums, Reg, [](const DwarfRegNumsMapPair &A, const Record *B) {
+ return LessRecordRegister()(A.first, B);
+ });
+ assert(RegIter != DwarfRegNums.end() && RegIter->first == Reg &&
+ "Expected Reg to be present in map");
+ RegIter->second = AliasIter->second;
+ }
+
+ // Emit information about the dwarf register numbers.
+ for (unsigned j = 0; j < 2; ++j) {
+ for (unsigned i = 0, e = maxLength; i != e; ++i) {
+ OS << "extern const MCRegisterInfo::DwarfLLVMRegPair " << Namespace;
+ OS << (j == 0 ? "DwarfFlavour" : "EHFlavour");
+ OS << i << "L2Dwarf[]";
+ if (!isCtor) {
+ OS << " = {\n";
+ // Store the mapping sorted by the Dwarf reg num so lookup can be done
+ // with a binary search.
+ for (auto &DwarfRegNum : DwarfRegNums) {
+ int RegNo = DwarfRegNum.second[i];
+ if (RegNo == -1) // -1 is the default value, don't emit a mapping.
+ continue;
+
+ OS << " { " << getQualifiedName(DwarfRegNum.first) << ", " << RegNo
+ << "U },\n";
+ }
+ OS << "};\n";
+ } else {
+ OS << ";\n";
+ }
+
+ // We have to store the size in a const global, it's used in multiple
+ // places.
+ OS << "extern const unsigned " << Namespace
+ << (j == 0 ? "DwarfFlavour" : "EHFlavour") << i << "L2DwarfSize";
+ if (!isCtor)
+ OS << " = std::size(" << Namespace
+ << (j == 0 ? "DwarfFlavour" : "EHFlavour") << i << "L2Dwarf);\n\n";
+ else
+ OS << ";\n\n";
+ }
+ }
+}
+
+void RegisterInfoEmitter::EmitRegMapping(
+ raw_ostream &OS, const std::deque<CodeGenRegister> &Regs, bool isCtor) {
+ // Emit the initializer so the tables from EmitRegMappingTables get wired up
+ // to the MCRegisterInfo object.
+ unsigned maxLength = 0;
+ for (auto &RE : Regs) {
+ Record *Reg = RE.TheDef;
+ maxLength = std::max((size_t)maxLength,
+ Reg->getValueAsListOfInts("DwarfNumbers").size());
+ }
+
+ if (!maxLength)
+ return;
+
+ StringRef Namespace = Regs.front().TheDef->getValueAsString("Namespace");
+
+ // Emit reverse information about the dwarf register numbers.
+ for (unsigned j = 0; j < 2; ++j) {
+ OS << " switch (";
+ if (j == 0)
+ OS << "DwarfFlavour";
+ else
+ OS << "EHFlavour";
+ OS << ") {\n"
+ << " default:\n"
+ << " llvm_unreachable(\"Unknown DWARF flavour\");\n";
+
+ for (unsigned i = 0, e = maxLength; i != e; ++i) {
+ OS << " case " << i << ":\n";
+ OS << " ";
+ if (!isCtor)
+ OS << "RI->";
+ std::string Tmp;
+ raw_string_ostream(Tmp) << Namespace
+ << (j == 0 ? "DwarfFlavour" : "EHFlavour") << i
+ << "Dwarf2L";
+ OS << "mapDwarfRegsToLLVMRegs(" << Tmp << ", " << Tmp << "Size, ";
+ if (j == 0)
+ OS << "false";
+ else
+ OS << "true";
+ OS << ");\n";
+ OS << " break;\n";
+ }
+ OS << " }\n";
+ }
+
+ // Emit information about the dwarf register numbers.
+ for (unsigned j = 0; j < 2; ++j) {
+ OS << " switch (";
+ if (j == 0)
+ OS << "DwarfFlavour";
+ else
+ OS << "EHFlavour";
+ OS << ") {\n"
+ << " default:\n"
+ << " llvm_unreachable(\"Unknown DWARF flavour\");\n";
+
+ for (unsigned i = 0, e = maxLength; i != e; ++i) {
+ OS << " case " << i << ":\n";
+ OS << " ";
+ if (!isCtor)
+ OS << "RI->";
+ std::string Tmp;
+ raw_string_ostream(Tmp) << Namespace
+ << (j == 0 ? "DwarfFlavour" : "EHFlavour") << i
+ << "L2Dwarf";
+ OS << "mapLLVMRegsToDwarfRegs(" << Tmp << ", " << Tmp << "Size, ";
+ if (j == 0)
+ OS << "false";
+ else
+ OS << "true";
+ OS << ");\n";
+ OS << " break;\n";
+ }
+ OS << " }\n";
+ }
+}
+
+// Print a BitVector as a sequence of hex numbers using a little-endian mapping.
+// Width is the number of bits per hex number.
+static void printBitVectorAsHex(raw_ostream &OS,
+ const BitVector &Bits,
+ unsigned Width) {
+ assert(Width <= 32 && "Width too large");
+ unsigned Digits = (Width + 3) / 4;
+ for (unsigned i = 0, e = Bits.size(); i < e; i += Width) {
+ unsigned Value = 0;
+ for (unsigned j = 0; j != Width && i + j != e; ++j)
+ Value |= Bits.test(i + j) << j;
+ OS << format("0x%0*x, ", Digits, Value);
+ }
+}
+
+// Helper to emit a set of bits into a constant byte array.
+class BitVectorEmitter {
+ BitVector Values;
+public:
+ void add(unsigned v) {
+ if (v >= Values.size())
+ Values.resize(((v/8)+1)*8); // Round up to the next byte.
+ Values[v] = true;
+ }
+
+ void print(raw_ostream &OS) {
+ printBitVectorAsHex(OS, Values, 8);
+ }
+};
+
+static void printSimpleValueType(raw_ostream &OS, MVT::SimpleValueType VT) {
+ OS << getEnumName(VT);
+}
+
+static void printSubRegIndex(raw_ostream &OS, const CodeGenSubRegIndex *Idx) {
+ OS << Idx->EnumValue;
+}
+
+// Differentially encoded register and regunit lists allow for better
+// compression on regular register banks. The sequence is computed from the
+// differential list as:
+//
+// out[0] = InitVal;
+// out[n+1] = out[n] + diff[n]; // n = 0, 1, ...
+//
+// The initial value depends on the specific list. The list is terminated by a
+// 0 differential which means we can't encode repeated elements.
+
+typedef SmallVector<uint16_t, 4> DiffVec;
+typedef SmallVector<LaneBitmask, 4> MaskVec;
+
+// Differentially encode a sequence of numbers into V. The starting value and
+// terminating 0 are not added to V, so it will have the same size as List.
+static
+DiffVec &diffEncode(DiffVec &V, unsigned InitVal, SparseBitVector<> List) {
+ assert(V.empty() && "Clear DiffVec before diffEncode.");
+ uint16_t Val = uint16_t(InitVal);
+
+ for (uint16_t Cur : List) {
+ V.push_back(Cur - Val);
+ Val = Cur;
+ }
+ return V;
+}
+
+template<typename Iter>
+static
+DiffVec &diffEncode(DiffVec &V, unsigned InitVal, Iter Begin, Iter End) {
+ assert(V.empty() && "Clear DiffVec before diffEncode.");
+ uint16_t Val = uint16_t(InitVal);
+ for (Iter I = Begin; I != End; ++I) {
+ uint16_t Cur = (*I)->EnumValue;
+ V.push_back(Cur - Val);
+ Val = Cur;
+ }
+ return V;
+}
+
+static void printDiff16(raw_ostream &OS, uint16_t Val) {
+ OS << Val;
+}
+
+static void printMask(raw_ostream &OS, LaneBitmask Val) {
+ OS << "LaneBitmask(0x" << PrintLaneMask(Val) << ')';
+}
+
+// Try to combine Idx's compose map into Vec if it is compatible.
+// Return false if it's not possible.
+static bool combine(const CodeGenSubRegIndex *Idx,
+ SmallVectorImpl<CodeGenSubRegIndex*> &Vec) {
+ const CodeGenSubRegIndex::CompMap &Map = Idx->getComposites();
+ for (const auto &I : Map) {
+ CodeGenSubRegIndex *&Entry = Vec[I.first->EnumValue - 1];
+ if (Entry && Entry != I.second)
+ return false;
+ }
+
+ // All entries are compatible. Make it so.
+ for (const auto &I : Map) {
+ auto *&Entry = Vec[I.first->EnumValue - 1];
+ assert((!Entry || Entry == I.second) &&
+ "Expected EnumValue to be unique");
+ Entry = I.second;
+ }
+ return true;
+}
+
+void
+RegisterInfoEmitter::emitComposeSubRegIndices(raw_ostream &OS,
+ CodeGenRegBank &RegBank,
+ const std::string &ClName) {
+ const auto &SubRegIndices = RegBank.getSubRegIndices();
+ OS << "unsigned " << ClName
+ << "::composeSubRegIndicesImpl(unsigned IdxA, unsigned IdxB) const {\n";
+
+ // Many sub-register indexes are composition-compatible, meaning that
+ //
+ // compose(IdxA, IdxB) == compose(IdxA', IdxB)
+ //
+ // for many IdxA, IdxA' pairs. Not all sub-register indexes can be composed.
+ // The illegal entries can be use as wildcards to compress the table further.
+
+ // Map each Sub-register index to a compatible table row.
+ SmallVector<unsigned, 4> RowMap;
+ SmallVector<SmallVector<CodeGenSubRegIndex*, 4>, 4> Rows;
+
+ auto SubRegIndicesSize =
+ std::distance(SubRegIndices.begin(), SubRegIndices.end());
+ for (const auto &Idx : SubRegIndices) {
+ unsigned Found = ~0u;
+ for (unsigned r = 0, re = Rows.size(); r != re; ++r) {
+ if (combine(&Idx, Rows[r])) {
+ Found = r;
+ break;
+ }
+ }
+ if (Found == ~0u) {
+ Found = Rows.size();
+ Rows.resize(Found + 1);
+ Rows.back().resize(SubRegIndicesSize);
+ combine(&Idx, Rows.back());
+ }
+ RowMap.push_back(Found);
+ }
+
+ // Output the row map if there is multiple rows.
+ if (Rows.size() > 1) {
+ OS << " static const " << getMinimalTypeForRange(Rows.size(), 32)
+ << " RowMap[" << SubRegIndicesSize << "] = {\n ";
+ for (unsigned i = 0, e = SubRegIndicesSize; i != e; ++i)
+ OS << RowMap[i] << ", ";
+ OS << "\n };\n";
+ }
+
+ // Output the rows.
+ OS << " static const " << getMinimalTypeForRange(SubRegIndicesSize + 1, 32)
+ << " Rows[" << Rows.size() << "][" << SubRegIndicesSize << "] = {\n";
+ for (unsigned r = 0, re = Rows.size(); r != re; ++r) {
+ OS << " { ";
+ for (unsigned i = 0, e = SubRegIndicesSize; i != e; ++i)
+ if (Rows[r][i])
+ OS << Rows[r][i]->getQualifiedName() << ", ";
+ else
+ OS << "0, ";
+ OS << "},\n";
+ }
+ OS << " };\n\n";
+
+ OS << " --IdxA; assert(IdxA < " << SubRegIndicesSize << "); (void) IdxA;\n"
+ << " --IdxB; assert(IdxB < " << SubRegIndicesSize << ");\n";
+ if (Rows.size() > 1)
+ OS << " return Rows[RowMap[IdxA]][IdxB];\n";
+ else
+ OS << " return Rows[0][IdxB];\n";
+ OS << "}\n\n";
+}
+
+void
+RegisterInfoEmitter::emitComposeSubRegIndexLaneMask(raw_ostream &OS,
+ CodeGenRegBank &RegBank,
+ const std::string &ClName) {
+ // See the comments in computeSubRegLaneMasks() for our goal here.
+ const auto &SubRegIndices = RegBank.getSubRegIndices();
+
+ // Create a list of Mask+Rotate operations, with equivalent entries merged.
+ SmallVector<unsigned, 4> SubReg2SequenceIndexMap;
+ SmallVector<SmallVector<MaskRolPair, 1>, 4> Sequences;
+ for (const auto &Idx : SubRegIndices) {
+ const SmallVector<MaskRolPair, 1> &IdxSequence
+ = Idx.CompositionLaneMaskTransform;
+
+ unsigned Found = ~0u;
+ unsigned SIdx = 0;
+ unsigned NextSIdx;
+ for (size_t s = 0, se = Sequences.size(); s != se; ++s, SIdx = NextSIdx) {
+ SmallVectorImpl<MaskRolPair> &Sequence = Sequences[s];
+ NextSIdx = SIdx + Sequence.size() + 1;
+ if (Sequence == IdxSequence) {
+ Found = SIdx;
+ break;
+ }
+ }
+ if (Found == ~0u) {
+ Sequences.push_back(IdxSequence);
+ Found = SIdx;
+ }
+ SubReg2SequenceIndexMap.push_back(Found);
+ }
+
+ OS << " struct MaskRolOp {\n"
+ " LaneBitmask Mask;\n"
+ " uint8_t RotateLeft;\n"
+ " };\n"
+ " static const MaskRolOp LaneMaskComposeSequences[] = {\n";
+ unsigned Idx = 0;
+ for (size_t s = 0, se = Sequences.size(); s != se; ++s) {
+ OS << " ";
+ const SmallVectorImpl<MaskRolPair> &Sequence = Sequences[s];
+ for (size_t p = 0, pe = Sequence.size(); p != pe; ++p) {
+ const MaskRolPair &P = Sequence[p];
+ printMask(OS << "{ ", P.Mask);
+ OS << format(", %2u }, ", P.RotateLeft);
+ }
+ OS << "{ LaneBitmask::getNone(), 0 }";
+ if (s+1 != se)
+ OS << ", ";
+ OS << " // Sequence " << Idx << "\n";
+ Idx += Sequence.size() + 1;
+ }
+ auto *IntType = getMinimalTypeForRange(*std::max_element(
+ SubReg2SequenceIndexMap.begin(), SubReg2SequenceIndexMap.end()));
+ OS << " };\n"
+ " static const "
+ << IntType << " CompositeSequences[] = {\n";
+ for (size_t i = 0, e = SubRegIndices.size(); i != e; ++i) {
+ OS << " ";
+ OS << SubReg2SequenceIndexMap[i];
+ if (i+1 != e)
+ OS << ",";
+ OS << " // to " << SubRegIndices[i].getName() << "\n";
+ }
+ OS << " };\n\n";
+
+ OS << "LaneBitmask " << ClName
+ << "::composeSubRegIndexLaneMaskImpl(unsigned IdxA, LaneBitmask LaneMask)"
+ " const {\n"
+ " --IdxA; assert(IdxA < " << SubRegIndices.size()
+ << " && \"Subregister index out of bounds\");\n"
+ " LaneBitmask Result;\n"
+ " for (const MaskRolOp *Ops =\n"
+ " &LaneMaskComposeSequences[CompositeSequences[IdxA]];\n"
+ " Ops->Mask.any(); ++Ops) {\n"
+ " LaneBitmask::Type M = LaneMask.getAsInteger() & Ops->Mask.getAsInteger();\n"
+ " if (unsigned S = Ops->RotateLeft)\n"
+ " Result |= LaneBitmask((M << S) | (M >> (LaneBitmask::BitWidth - S)));\n"
+ " else\n"
+ " Result |= LaneBitmask(M);\n"
+ " }\n"
+ " return Result;\n"
+ "}\n\n";
+
+ OS << "LaneBitmask " << ClName
+ << "::reverseComposeSubRegIndexLaneMaskImpl(unsigned IdxA, "
+ " LaneBitmask LaneMask) const {\n"
+ " LaneMask &= getSubRegIndexLaneMask(IdxA);\n"
+ " --IdxA; assert(IdxA < " << SubRegIndices.size()
+ << " && \"Subregister index out of bounds\");\n"
+ " LaneBitmask Result;\n"
+ " for (const MaskRolOp *Ops =\n"
+ " &LaneMaskComposeSequences[CompositeSequences[IdxA]];\n"
+ " Ops->Mask.any(); ++Ops) {\n"
+ " LaneBitmask::Type M = LaneMask.getAsInteger();\n"
+ " if (unsigned S = Ops->RotateLeft)\n"
+ " Result |= LaneBitmask((M >> S) | (M << (LaneBitmask::BitWidth - S)));\n"
+ " else\n"
+ " Result |= LaneBitmask(M);\n"
+ " }\n"
+ " return Result;\n"
+ "}\n\n";
+}
+
+//
+// runMCDesc - Print out MC register descriptions.
+//
+void
+RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target,
+ CodeGenRegBank &RegBank) {
+ emitSourceFileHeader("MC Register Information", OS);
+
+ OS << "\n#ifdef GET_REGINFO_MC_DESC\n";
+ OS << "#undef GET_REGINFO_MC_DESC\n\n";
+
+ const auto &Regs = RegBank.getRegisters();
+
+ auto &SubRegIndices = RegBank.getSubRegIndices();
+ // The lists of sub-registers and super-registers go in the same array. That
+ // allows us to share suffixes.
+ typedef std::vector<const CodeGenRegister*> RegVec;
+
+ // Differentially encoded lists.
+ SequenceToOffsetTable<DiffVec> DiffSeqs;
+ SmallVector<DiffVec, 4> SubRegLists(Regs.size());
+ SmallVector<DiffVec, 4> SuperRegLists(Regs.size());
+ SmallVector<DiffVec, 4> RegUnitLists(Regs.size());
+ SmallVector<unsigned, 4> RegUnitInitScale(Regs.size());
+
+ // List of lane masks accompanying register unit sequences.
+ SequenceToOffsetTable<MaskVec> LaneMaskSeqs;
+ SmallVector<MaskVec, 4> RegUnitLaneMasks(Regs.size());
+
+ // Keep track of sub-register names as well. These are not differentially
+ // encoded.
+ typedef SmallVector<const CodeGenSubRegIndex*, 4> SubRegIdxVec;
+ SequenceToOffsetTable<SubRegIdxVec, deref<std::less<>>> SubRegIdxSeqs;
+ SmallVector<SubRegIdxVec, 4> SubRegIdxLists(Regs.size());
+
+ SequenceToOffsetTable<std::string> RegStrings;
+
+ // Precompute register lists for the SequenceToOffsetTable.
+ unsigned i = 0;
+ for (auto I = Regs.begin(), E = Regs.end(); I != E; ++I, ++i) {
+ const auto &Reg = *I;
+ RegStrings.add(std::string(Reg.getName()));
+
+ // Compute the ordered sub-register list.
+ SetVector<const CodeGenRegister*> SR;
+ Reg.addSubRegsPreOrder(SR, RegBank);
+ diffEncode(SubRegLists[i], Reg.EnumValue, SR.begin(), SR.end());
+ DiffSeqs.add(SubRegLists[i]);
+
+ // Compute the corresponding sub-register indexes.
+ SubRegIdxVec &SRIs = SubRegIdxLists[i];
+ for (const CodeGenRegister *S : SR)
+ SRIs.push_back(Reg.getSubRegIndex(S));
+ SubRegIdxSeqs.add(SRIs);
+
+ // Super-registers are already computed.
+ const RegVec &SuperRegList = Reg.getSuperRegs();
+ diffEncode(SuperRegLists[i], Reg.EnumValue, SuperRegList.begin(),
+ SuperRegList.end());
+ DiffSeqs.add(SuperRegLists[i]);
+
+ // Differentially encode the register unit list, seeded by register number.
+ // First compute a scale factor that allows more diff-lists to be reused:
+ //
+ // D0 -> (S0, S1)
+ // D1 -> (S2, S3)
+ //
+ // A scale factor of 2 allows D0 and D1 to share a diff-list. The initial
+ // value for the differential decoder is the register number multiplied by
+ // the scale.
+ //
+ // Check the neighboring registers for arithmetic progressions.
+ unsigned ScaleA = ~0u, ScaleB = ~0u;
+ SparseBitVector<> RUs = Reg.getNativeRegUnits();
+ if (I != Regs.begin() &&
+ std::prev(I)->getNativeRegUnits().count() == RUs.count())
+ ScaleB = *RUs.begin() - *std::prev(I)->getNativeRegUnits().begin();
+ if (std::next(I) != Regs.end() &&
+ std::next(I)->getNativeRegUnits().count() == RUs.count())
+ ScaleA = *std::next(I)->getNativeRegUnits().begin() - *RUs.begin();
+ unsigned Scale = std::min(ScaleB, ScaleA);
+ // Default the scale to 0 if it can't be encoded in 4 bits.
+ if (Scale >= 16)
+ Scale = 0;
+ RegUnitInitScale[i] = Scale;
+ DiffSeqs.add(diffEncode(RegUnitLists[i], Scale * Reg.EnumValue, RUs));
+
+ const auto &RUMasks = Reg.getRegUnitLaneMasks();
+ MaskVec &LaneMaskVec = RegUnitLaneMasks[i];
+ assert(LaneMaskVec.empty());
+ llvm::append_range(LaneMaskVec, RUMasks);
+ // Terminator mask should not be used inside of the list.
+#ifndef NDEBUG
+ for (LaneBitmask M : LaneMaskVec) {
+ assert(!M.all() && "terminator mask should not be part of the list");
+ }
+#endif
+ LaneMaskSeqs.add(LaneMaskVec);
+ }
+
+ // Compute the final layout of the sequence table.
+ DiffSeqs.layout();
+ LaneMaskSeqs.layout();
+ SubRegIdxSeqs.layout();
+
+ OS << "namespace llvm {\n\n";
+
+ const std::string &TargetName = std::string(Target.getName());
+
+ // Emit the shared table of differential lists.
+ OS << "extern const MCPhysReg " << TargetName << "RegDiffLists[] = {\n";
+ DiffSeqs.emit(OS, printDiff16);
+ OS << "};\n\n";
+
+ // Emit the shared table of regunit lane mask sequences.
+ OS << "extern const LaneBitmask " << TargetName << "LaneMaskLists[] = {\n";
+ LaneMaskSeqs.emit(OS, printMask, "LaneBitmask::getAll()");
+ OS << "};\n\n";
+
+ // Emit the table of sub-register indexes.
+ OS << "extern const uint16_t " << TargetName << "SubRegIdxLists[] = {\n";
+ SubRegIdxSeqs.emit(OS, printSubRegIndex);
+ OS << "};\n\n";
+
+ // Emit the table of sub-register index sizes.
+ OS << "extern const MCRegisterInfo::SubRegCoveredBits "
+ << TargetName << "SubRegIdxRanges[] = {\n";
+ OS << " { " << (uint16_t)-1 << ", " << (uint16_t)-1 << " },\n";
+ for (const auto &Idx : SubRegIndices) {
+ OS << " { " << Idx.Offset << ", " << Idx.Size << " },\t// "
+ << Idx.getName() << "\n";
+ }
+ OS << "};\n\n";
+
+ // Emit the string table.
+ RegStrings.layout();
+ RegStrings.emitStringLiteralDef(OS, Twine("extern const char ") + TargetName +
+ "RegStrings[]");
+
+ OS << "extern const MCRegisterDesc " << TargetName
+ << "RegDesc[] = { // Descriptors\n";
+ OS << " { " << RegStrings.get("") << ", 0, 0, 0, 0, 0 },\n";
+
+ // Emit the register descriptors now.
+ i = 0;
+ for (const auto &Reg : Regs) {
+ OS << " { " << RegStrings.get(std::string(Reg.getName())) << ", "
+ << DiffSeqs.get(SubRegLists[i]) << ", " << DiffSeqs.get(SuperRegLists[i])
+ << ", " << SubRegIdxSeqs.get(SubRegIdxLists[i]) << ", "
+ << (DiffSeqs.get(RegUnitLists[i]) * 16 + RegUnitInitScale[i]) << ", "
+ << LaneMaskSeqs.get(RegUnitLaneMasks[i]) << " },\n";
+ ++i;
+ }
+ OS << "};\n\n"; // End of register descriptors...
+
+ // Emit the table of register unit roots. Each regunit has one or two root
+ // registers.
+ OS << "extern const MCPhysReg " << TargetName << "RegUnitRoots[][2] = {\n";
+ for (unsigned i = 0, e = RegBank.getNumNativeRegUnits(); i != e; ++i) {
+ ArrayRef<const CodeGenRegister*> Roots = RegBank.getRegUnit(i).getRoots();
+ assert(!Roots.empty() && "All regunits must have a root register.");
+ assert(Roots.size() <= 2 && "More than two roots not supported yet.");
+ OS << " { ";
+ ListSeparator LS;
+ for (const CodeGenRegister *R : Roots)
+ OS << LS << getQualifiedName(R->TheDef);
+ OS << " },\n";
+ }
+ OS << "};\n\n";
+
+ const auto &RegisterClasses = RegBank.getRegClasses();
+
+ // Loop over all of the register classes... emitting each one.
+ OS << "namespace { // Register classes...\n";
+
+ SequenceToOffsetTable<std::string> RegClassStrings;
+
+ // Emit the register enum value arrays for each RegisterClass
+ for (const auto &RC : RegisterClasses) {
+ ArrayRef<Record*> Order = RC.getOrder();
+
+ // Give the register class a legal C name if it's anonymous.
+ const std::string &Name = RC.getName();
+
+ RegClassStrings.add(Name);
+
+ // Emit the register list now (unless it would be a zero-length array).
+ if (!Order.empty()) {
+ OS << " // " << Name << " Register Class...\n"
+ << " const MCPhysReg " << Name << "[] = {\n ";
+ for (Record *Reg : Order) {
+ OS << getQualifiedName(Reg) << ", ";
+ }
+ OS << "\n };\n\n";
+
+ OS << " // " << Name << " Bit set.\n"
+ << " const uint8_t " << Name << "Bits[] = {\n ";
+ BitVectorEmitter BVE;
+ for (Record *Reg : Order) {
+ BVE.add(Target.getRegBank().getReg(Reg)->EnumValue);
+ }
+ BVE.print(OS);
+ OS << "\n };\n\n";
+ }
+ }
+ OS << "} // end anonymous namespace\n\n";
+
+ RegClassStrings.layout();
+ RegClassStrings.emitStringLiteralDef(
+ OS, Twine("extern const char ") + TargetName + "RegClassStrings[]");
+
+ OS << "extern const MCRegisterClass " << TargetName
+ << "MCRegisterClasses[] = {\n";
+
+ for (const auto &RC : RegisterClasses) {
+ ArrayRef<Record *> Order = RC.getOrder();
+ std::string RCName = Order.empty() ? "nullptr" : RC.getName();
+ std::string RCBitsName = Order.empty() ? "nullptr" : RC.getName() + "Bits";
+ std::string RCBitsSize = Order.empty() ? "0" : "sizeof(" + RCBitsName + ")";
+ assert(isInt<8>(RC.CopyCost) && "Copy cost too large.");
+ uint32_t RegSize = 0;
+ if (RC.RSI.isSimple())
+ RegSize = RC.RSI.getSimple().RegSize;
+ OS << " { " << RCName << ", " << RCBitsName << ", "
+ << RegClassStrings.get(RC.getName()) << ", " << RC.getOrder().size()
+ << ", " << RCBitsSize << ", " << RC.getQualifiedName() + "RegClassID"
+ << ", " << RegSize << ", " << RC.CopyCost << ", "
+ << (RC.Allocatable ? "true" : "false") << " },\n";
+ }
+
+ OS << "};\n\n";
+
+ EmitRegMappingTables(OS, Regs, false);
+
+ // Emit Reg encoding table
+ OS << "extern const uint16_t " << TargetName;
+ OS << "RegEncodingTable[] = {\n";
+ // Add entry for NoRegister
+ OS << " 0,\n";
+ for (const auto &RE : Regs) {
+ Record *Reg = RE.TheDef;
+ BitsInit *BI = Reg->getValueAsBitsInit("HWEncoding");
+ uint64_t Value = 0;
+ for (unsigned b = 0, be = BI->getNumBits(); b != be; ++b) {
+ if (BitInit *B = dyn_cast<BitInit>(BI->getBit(b)))
+ Value |= (uint64_t)B->getValue() << b;
+ }
+ OS << " " << Value << ",\n";
+ }
+ OS << "};\n"; // End of HW encoding table
+
+ // MCRegisterInfo initialization routine.
+ OS << "static inline void Init" << TargetName
+ << "MCRegisterInfo(MCRegisterInfo *RI, unsigned RA, "
+ << "unsigned DwarfFlavour = 0, unsigned EHFlavour = 0, unsigned PC = 0) "
+ "{\n"
+ << " RI->InitMCRegisterInfo(" << TargetName << "RegDesc, "
+ << Regs.size() + 1 << ", RA, PC, " << TargetName << "MCRegisterClasses, "
+ << RegisterClasses.size() << ", " << TargetName << "RegUnitRoots, "
+ << RegBank.getNumNativeRegUnits() << ", " << TargetName << "RegDiffLists, "
+ << TargetName << "LaneMaskLists, " << TargetName << "RegStrings, "
+ << TargetName << "RegClassStrings, " << TargetName << "SubRegIdxLists, "
+ << (std::distance(SubRegIndices.begin(), SubRegIndices.end()) + 1) << ",\n"
+ << TargetName << "SubRegIdxRanges, " << TargetName
+ << "RegEncodingTable);\n\n";
+
+ EmitRegMapping(OS, Regs, false);
+
+ OS << "}\n\n";
+
+ OS << "} // end namespace llvm\n\n";
+ OS << "#endif // GET_REGINFO_MC_DESC\n\n";
+}
+
+void
+RegisterInfoEmitter::runTargetHeader(raw_ostream &OS, CodeGenTarget &Target,
+ CodeGenRegBank &RegBank) {
+ emitSourceFileHeader("Register Information Header Fragment", OS);
+
+ OS << "\n#ifdef GET_REGINFO_HEADER\n";
+ OS << "#undef GET_REGINFO_HEADER\n\n";
+
+ const std::string &TargetName = std::string(Target.getName());
+ std::string ClassName = TargetName + "GenRegisterInfo";
+
+ OS << "#include \"llvm/CodeGen/TargetRegisterInfo.h\"\n\n";
+
+ OS << "namespace llvm {\n\n";
+
+ OS << "class " << TargetName << "FrameLowering;\n\n";
+
+ OS << "struct " << ClassName << " : public TargetRegisterInfo {\n"
+ << " explicit " << ClassName
+ << "(unsigned RA, unsigned D = 0, unsigned E = 0,\n"
+ << " unsigned PC = 0, unsigned HwMode = 0);\n";
+ if (!RegBank.getSubRegIndices().empty()) {
+ OS << " unsigned composeSubRegIndicesImpl"
+ << "(unsigned, unsigned) const override;\n"
+ << " LaneBitmask composeSubRegIndexLaneMaskImpl"
+ << "(unsigned, LaneBitmask) const override;\n"
+ << " LaneBitmask reverseComposeSubRegIndexLaneMaskImpl"
+ << "(unsigned, LaneBitmask) const override;\n"
+ << " const TargetRegisterClass *getSubClassWithSubReg"
+ << "(const TargetRegisterClass *, unsigned) const override;\n"
+ << " const TargetRegisterClass *getSubRegisterClass"
+ << "(const TargetRegisterClass *, unsigned) const override;\n";
+ }
+ OS << " const RegClassWeight &getRegClassWeight("
+ << "const TargetRegisterClass *RC) const override;\n"
+ << " unsigned getRegUnitWeight(unsigned RegUnit) const override;\n"
+ << " unsigned getNumRegPressureSets() const override;\n"
+ << " const char *getRegPressureSetName(unsigned Idx) const override;\n"
+ << " unsigned getRegPressureSetLimit(const MachineFunction &MF, unsigned "
+ "Idx) const override;\n"
+ << " const int *getRegClassPressureSets("
+ << "const TargetRegisterClass *RC) const override;\n"
+ << " const int *getRegUnitPressureSets("
+ << "unsigned RegUnit) const override;\n"
+ << " ArrayRef<const char *> getRegMaskNames() const override;\n"
+ << " ArrayRef<const uint32_t *> getRegMasks() const override;\n"
+ << " bool isGeneralPurposeRegister(const MachineFunction &, "
+ << "MCRegister) const override;\n"
+ << " bool isFixedRegister(const MachineFunction &, "
+ << "MCRegister) const override;\n"
+ << " bool isArgumentRegister(const MachineFunction &, "
+ << "MCRegister) const override;\n"
+ << " bool isConstantPhysReg(MCRegister PhysReg) const override final;\n"
+ << " /// Devirtualized TargetFrameLowering.\n"
+ << " static const " << TargetName << "FrameLowering *getFrameLowering(\n"
+ << " const MachineFunction &MF);\n";
+
+ const auto &RegisterClasses = RegBank.getRegClasses();
+ if (llvm::any_of(RegisterClasses, [](const auto &RC) { return RC.getBaseClassOrder(); })) {
+ OS << " const TargetRegisterClass *getPhysRegBaseClass(MCRegister Reg) const override;\n";
+ }
+
+ OS << "};\n\n";
+
+ if (!RegisterClasses.empty()) {
+ OS << "namespace " << RegisterClasses.front().Namespace
+ << " { // Register classes\n";
+
+ for (const auto &RC : RegisterClasses) {
+ const std::string &Name = RC.getName();
+
+ // Output the extern for the instance.
+ OS << " extern const TargetRegisterClass " << Name << "RegClass;\n";
+ }
+ OS << "} // end namespace " << RegisterClasses.front().Namespace << "\n\n";
+ }
+ OS << "} // end namespace llvm\n\n";
+ OS << "#endif // GET_REGINFO_HEADER\n\n";
+}
+
+//
+// runTargetDesc - Output the target register and register file descriptions.
+//
+void
+RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target,
+ CodeGenRegBank &RegBank){
+ emitSourceFileHeader("Target Register and Register Classes Information", OS);
+
+ OS << "\n#ifdef GET_REGINFO_TARGET_DESC\n";
+ OS << "#undef GET_REGINFO_TARGET_DESC\n\n";
+
+ OS << "namespace llvm {\n\n";
+
+ // Get access to MCRegisterClass data.
+ OS << "extern const MCRegisterClass " << Target.getName()
+ << "MCRegisterClasses[];\n";
+
+ // Start out by emitting each of the register classes.
+ const auto &RegisterClasses = RegBank.getRegClasses();
+ const auto &SubRegIndices = RegBank.getSubRegIndices();
+
+ // Collect all registers belonging to any allocatable class.
+ std::set<Record*> AllocatableRegs;
+
+ // Collect allocatable registers.
+ for (const auto &RC : RegisterClasses) {
+ ArrayRef<Record*> Order = RC.getOrder();
+
+ if (RC.Allocatable)
+ AllocatableRegs.insert(Order.begin(), Order.end());
+ }
+
+ const CodeGenHwModes &CGH = Target.getHwModes();
+ unsigned NumModes = CGH.getNumModeIds();
+
+ // Build a shared array of value types.
+ SequenceToOffsetTable<std::vector<MVT::SimpleValueType>> VTSeqs;
+ for (unsigned M = 0; M < NumModes; ++M) {
+ for (const auto &RC : RegisterClasses) {
+ std::vector<MVT::SimpleValueType> S;
+ for (const ValueTypeByHwMode &VVT : RC.VTs)
+ S.push_back(VVT.get(M).SimpleTy);
+ VTSeqs.add(S);
+ }
+ }
+ VTSeqs.layout();
+ OS << "\nstatic const MVT::SimpleValueType VTLists[] = {\n";
+ VTSeqs.emit(OS, printSimpleValueType, "MVT::Other");
+ OS << "};\n";
+
+ // Emit SubRegIndex names, skipping 0.
+ OS << "\nstatic const char *SubRegIndexNameTable[] = { \"";
+
+ for (const auto &Idx : SubRegIndices) {
+ OS << Idx.getName();
+ OS << "\", \"";
+ }
+ OS << "\" };\n\n";
+
+ // Emit SubRegIndex lane masks, including 0.
+ OS << "\nstatic const LaneBitmask SubRegIndexLaneMaskTable[] = {\n "
+ "LaneBitmask::getAll(),\n";
+ for (const auto &Idx : SubRegIndices) {
+ printMask(OS << " ", Idx.LaneMask);
+ OS << ", // " << Idx.getName() << '\n';
+ }
+ OS << " };\n\n";
+
+ OS << "\n";
+
+ // Now that all of the structs have been emitted, emit the instances.
+ if (!RegisterClasses.empty()) {
+ OS << "\nstatic const TargetRegisterInfo::RegClassInfo RegClassInfos[]"
+ << " = {\n";
+ for (unsigned M = 0; M < NumModes; ++M) {
+ unsigned EV = 0;
+ OS << " // Mode = " << M << " (";
+ if (M == 0)
+ OS << "Default";
+ else
+ OS << CGH.getMode(M).Name;
+ OS << ")\n";
+ for (const auto &RC : RegisterClasses) {
+ assert(RC.EnumValue == EV && "Unexpected order of register classes");
+ ++EV;
+ (void)EV;
+ const RegSizeInfo &RI = RC.RSI.get(M);
+ OS << " { " << RI.RegSize << ", " << RI.SpillSize << ", "
+ << RI.SpillAlignment;
+ std::vector<MVT::SimpleValueType> VTs;
+ for (const ValueTypeByHwMode &VVT : RC.VTs)
+ VTs.push_back(VVT.get(M).SimpleTy);
+ OS << ", VTLists+" << VTSeqs.get(VTs) << " }, // "
+ << RC.getName() << '\n';
+ }
+ }
+ OS << "};\n";
+
+
+ OS << "\nstatic const TargetRegisterClass *const "
+ << "NullRegClasses[] = { nullptr };\n\n";
+
+ // Emit register class bit mask tables. The first bit mask emitted for a
+ // register class, RC, is the set of sub-classes, including RC itself.
+ //
+ // If RC has super-registers, also create a list of subreg indices and bit
+ // masks, (Idx, Mask). The bit mask has a bit for every superreg regclass,
+ // SuperRC, that satisfies:
+ //
+ // For all SuperReg in SuperRC: SuperReg:Idx in RC
+ //
+ // The 0-terminated list of subreg indices starts at:
+ //
+ // RC->getSuperRegIndices() = SuperRegIdxSeqs + ...
+ //
+ // The corresponding bitmasks follow the sub-class mask in memory. Each
+ // mask has RCMaskWords uint32_t entries.
+ //
+ // Every bit mask present in the list has at least one bit set.
+
+ // Compress the sub-reg index lists.
+ typedef std::vector<const CodeGenSubRegIndex*> IdxList;
+ SmallVector<IdxList, 8> SuperRegIdxLists(RegisterClasses.size());
+ SequenceToOffsetTable<IdxList, deref<std::less<>>> SuperRegIdxSeqs;
+ BitVector MaskBV(RegisterClasses.size());
+
+ for (const auto &RC : RegisterClasses) {
+ OS << "static const uint32_t " << RC.getName()
+ << "SubClassMask[] = {\n ";
+ printBitVectorAsHex(OS, RC.getSubClasses(), 32);
+
+ // Emit super-reg class masks for any relevant SubRegIndices that can
+ // project into RC.
+ IdxList &SRIList = SuperRegIdxLists[RC.EnumValue];
+ for (auto &Idx : SubRegIndices) {
+ MaskBV.reset();
+ RC.getSuperRegClasses(&Idx, MaskBV);
+ if (MaskBV.none())
+ continue;
+ SRIList.push_back(&Idx);
+ OS << "\n ";
+ printBitVectorAsHex(OS, MaskBV, 32);
+ OS << "// " << Idx.getName();
+ }
+ SuperRegIdxSeqs.add(SRIList);
+ OS << "\n};\n\n";
+ }
+
+ OS << "static const uint16_t SuperRegIdxSeqs[] = {\n";
+ SuperRegIdxSeqs.layout();
+ SuperRegIdxSeqs.emit(OS, printSubRegIndex);
+ OS << "};\n\n";
+
+ // Emit NULL terminated super-class lists.
+ for (const auto &RC : RegisterClasses) {
+ ArrayRef<CodeGenRegisterClass*> Supers = RC.getSuperClasses();
+
+ // Skip classes without supers. We can reuse NullRegClasses.
+ if (Supers.empty())
+ continue;
+
+ OS << "static const TargetRegisterClass *const "
+ << RC.getName() << "Superclasses[] = {\n";
+ for (const auto *Super : Supers)
+ OS << " &" << Super->getQualifiedName() << "RegClass,\n";
+ OS << " nullptr\n};\n\n";
+ }
+
+ // Emit methods.
+ for (const auto &RC : RegisterClasses) {
+ if (!RC.AltOrderSelect.empty()) {
+ OS << "\nstatic inline unsigned " << RC.getName()
+ << "AltOrderSelect(const MachineFunction &MF) {"
+ << RC.AltOrderSelect << "}\n\n"
+ << "static ArrayRef<MCPhysReg> " << RC.getName()
+ << "GetRawAllocationOrder(const MachineFunction &MF) {\n";
+ for (unsigned oi = 1 , oe = RC.getNumOrders(); oi != oe; ++oi) {
+ ArrayRef<Record*> Elems = RC.getOrder(oi);
+ if (!Elems.empty()) {
+ OS << " static const MCPhysReg AltOrder" << oi << "[] = {";
+ for (unsigned elem = 0; elem != Elems.size(); ++elem)
+ OS << (elem ? ", " : " ") << getQualifiedName(Elems[elem]);
+ OS << " };\n";
+ }
+ }
+ OS << " const MCRegisterClass &MCR = " << Target.getName()
+ << "MCRegisterClasses[" << RC.getQualifiedName() + "RegClassID];\n"
+ << " const ArrayRef<MCPhysReg> Order[] = {\n"
+ << " ArrayRef(MCR.begin(), MCR.getNumRegs()";
+ for (unsigned oi = 1, oe = RC.getNumOrders(); oi != oe; ++oi)
+ if (RC.getOrder(oi).empty())
+ OS << "),\n ArrayRef<MCPhysReg>(";
+ else
+ OS << "),\n ArrayRef(AltOrder" << oi;
+ OS << ")\n };\n const unsigned Select = " << RC.getName()
+ << "AltOrderSelect(MF);\n assert(Select < " << RC.getNumOrders()
+ << ");\n return Order[Select];\n}\n";
+ }
+ }
+
+ // Now emit the actual value-initialized register class instances.
+ OS << "\nnamespace " << RegisterClasses.front().Namespace
+ << " { // Register class instances\n";
+
+ for (const auto &RC : RegisterClasses) {
+ OS << " extern const TargetRegisterClass " << RC.getName()
+ << "RegClass = {\n " << '&' << Target.getName()
+ << "MCRegisterClasses[" << RC.getName() << "RegClassID],\n "
+ << RC.getName() << "SubClassMask,\n SuperRegIdxSeqs + "
+ << SuperRegIdxSeqs.get(SuperRegIdxLists[RC.EnumValue]) << ",\n ";
+ printMask(OS, RC.LaneMask);
+ OS << ",\n " << (unsigned)RC.AllocationPriority << ",\n "
+ << (RC.GlobalPriority ? "true" : "false") << ",\n "
+ << format("0x%02x", RC.TSFlags) << ", /* TSFlags */\n "
+ << (RC.HasDisjunctSubRegs ? "true" : "false")
+ << ", /* HasDisjunctSubRegs */\n "
+ << (RC.CoveredBySubRegs ? "true" : "false")
+ << ", /* CoveredBySubRegs */\n ";
+ if (RC.getSuperClasses().empty())
+ OS << "NullRegClasses,\n ";
+ else
+ OS << RC.getName() << "Superclasses,\n ";
+ if (RC.AltOrderSelect.empty())
+ OS << "nullptr\n";
+ else
+ OS << RC.getName() << "GetRawAllocationOrder\n";
+ OS << " };\n\n";
+ }
+
+ OS << "} // end namespace " << RegisterClasses.front().Namespace << "\n";
+ }
+
+ OS << "\nnamespace {\n";
+ OS << " const TargetRegisterClass *const RegisterClasses[] = {\n";
+ for (const auto &RC : RegisterClasses)
+ OS << " &" << RC.getQualifiedName() << "RegClass,\n";
+ OS << " };\n";
+ OS << "} // end anonymous namespace\n";
+
+ // Emit extra information about registers.
+ const std::string &TargetName = std::string(Target.getName());
+ const auto &Regs = RegBank.getRegisters();
+ unsigned NumRegCosts = 1;
+ for (const auto &Reg : Regs)
+ NumRegCosts = std::max((size_t)NumRegCosts, Reg.CostPerUse.size());
+
+ std::vector<unsigned> AllRegCostPerUse;
+ llvm::BitVector InAllocClass(Regs.size() + 1, false);
+ AllRegCostPerUse.insert(AllRegCostPerUse.end(), NumRegCosts, 0);
+
+ // Populate the vector RegCosts with the CostPerUse list of the registers
+ // in the order they are read. Have at most NumRegCosts entries for
+ // each register. Fill with zero for values which are not explicitly given.
+ for (const auto &Reg : Regs) {
+ auto Costs = Reg.CostPerUse;
+ AllRegCostPerUse.insert(AllRegCostPerUse.end(), Costs.begin(), Costs.end());
+ if (NumRegCosts > Costs.size())
+ AllRegCostPerUse.insert(AllRegCostPerUse.end(),
+ NumRegCosts - Costs.size(), 0);
+
+ if (AllocatableRegs.count(Reg.TheDef))
+ InAllocClass.set(Reg.EnumValue);
+ }
+
+ // Emit the cost values as a 1D-array after grouping them by their indices,
+ // i.e. the costs for all registers corresponds to index 0, 1, 2, etc.
+ // Size of the emitted array should be NumRegCosts * (Regs.size() + 1).
+ OS << "\nstatic const uint8_t "
+ << "CostPerUseTable[] = { \n";
+ for (unsigned int I = 0; I < NumRegCosts; ++I) {
+ for (unsigned J = I, E = AllRegCostPerUse.size(); J < E; J += NumRegCosts)
+ OS << AllRegCostPerUse[J] << ", ";
+ }
+ OS << "};\n\n";
+
+ OS << "\nstatic const bool "
+ << "InAllocatableClassTable[] = { \n";
+ for (unsigned I = 0, E = InAllocClass.size(); I < E; ++I) {
+ OS << (InAllocClass[I] ? "true" : "false") << ", ";
+ }
+ OS << "};\n\n";
+
+ OS << "\nstatic const TargetRegisterInfoDesc " << TargetName
+ << "RegInfoDesc = { // Extra Descriptors\n";
+ OS << "CostPerUseTable, " << NumRegCosts << ", "
+ << "InAllocatableClassTable";
+ OS << "};\n\n"; // End of register descriptors...
+
+ std::string ClassName = Target.getName().str() + "GenRegisterInfo";
+
+ auto SubRegIndicesSize =
+ std::distance(SubRegIndices.begin(), SubRegIndices.end());
+
+ if (!SubRegIndices.empty()) {
+ emitComposeSubRegIndices(OS, RegBank, ClassName);
+ emitComposeSubRegIndexLaneMask(OS, RegBank, ClassName);
+ }
+
+ if (!SubRegIndices.empty()) {
+ // Emit getSubClassWithSubReg.
+ OS << "const TargetRegisterClass *" << ClassName
+ << "::getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx)"
+ << " const {\n";
+ // Use the smallest type that can hold a regclass ID with room for a
+ // sentinel.
+ if (RegisterClasses.size() <= UINT8_MAX)
+ OS << " static const uint8_t Table[";
+ else if (RegisterClasses.size() <= UINT16_MAX)
+ OS << " static const uint16_t Table[";
+ else
+ PrintFatalError("Too many register classes.");
+ OS << RegisterClasses.size() << "][" << SubRegIndicesSize << "] = {\n";
+ for (const auto &RC : RegisterClasses) {
+ OS << " {\t// " << RC.getName() << "\n";
+ for (auto &Idx : SubRegIndices) {
+ if (CodeGenRegisterClass *SRC = RC.getSubClassWithSubReg(&Idx))
+ OS << " " << SRC->EnumValue + 1 << ",\t// " << Idx.getName()
+ << " -> " << SRC->getName() << "\n";
+ else
+ OS << " 0,\t// " << Idx.getName() << "\n";
+ }
+ OS << " },\n";
+ }
+ OS << " };\n assert(RC && \"Missing regclass\");\n"
+ << " if (!Idx) return RC;\n --Idx;\n"
+ << " assert(Idx < " << SubRegIndicesSize << " && \"Bad subreg\");\n"
+ << " unsigned TV = Table[RC->getID()][Idx];\n"
+ << " return TV ? getRegClass(TV - 1) : nullptr;\n}\n\n";
+
+ // Emit getSubRegisterClass
+ OS << "const TargetRegisterClass *" << ClassName
+ << "::getSubRegisterClass(const TargetRegisterClass *RC, unsigned Idx)"
+ << " const {\n";
+
+ // Use the smallest type that can hold a regclass ID with room for a
+ // sentinel.
+ if (RegisterClasses.size() <= UINT8_MAX)
+ OS << " static const uint8_t Table[";
+ else if (RegisterClasses.size() <= UINT16_MAX)
+ OS << " static const uint16_t Table[";
+ else
+ PrintFatalError("Too many register classes.");
+
+ OS << RegisterClasses.size() << "][" << SubRegIndicesSize << "] = {\n";
+
+ for (const auto &RC : RegisterClasses) {
+ OS << " {\t// " << RC.getName() << '\n';
+ for (auto &Idx : SubRegIndices) {
+ std::optional<std::pair<CodeGenRegisterClass *, CodeGenRegisterClass *>>
+ MatchingSubClass = RC.getMatchingSubClassWithSubRegs(RegBank, &Idx);
+
+ unsigned EnumValue = 0;
+ if (MatchingSubClass) {
+ CodeGenRegisterClass *SubRegClass = MatchingSubClass->second;
+ EnumValue = SubRegClass->EnumValue + 1;
+ }
+
+ OS << " " << EnumValue << ",\t// "
+ << RC.getName() << ':' << Idx.getName();
+
+ if (MatchingSubClass) {
+ CodeGenRegisterClass *SubRegClass = MatchingSubClass->second;
+ OS << " -> " << SubRegClass->getName();
+ }
+
+ OS << '\n';
+ }
+
+ OS << " },\n";
+ }
+ OS << " };\n assert(RC && \"Missing regclass\");\n"
+ << " if (!Idx) return RC;\n --Idx;\n"
+ << " assert(Idx < " << SubRegIndicesSize << " && \"Bad subreg\");\n"
+ << " unsigned TV = Table[RC->getID()][Idx];\n"
+ << " return TV ? getRegClass(TV - 1) : nullptr;\n}\n\n";
+ }
+
+ EmitRegUnitPressure(OS, RegBank, ClassName);
+
+ // Emit register base class mapper
+ if (!RegisterClasses.empty()) {
+ // Collect base classes
+ SmallVector<const CodeGenRegisterClass*> BaseClasses;
+ for (const auto &RC : RegisterClasses) {
+ if (RC.getBaseClassOrder())
+ BaseClasses.push_back(&RC);
+ }
+ if (!BaseClasses.empty()) {
+ // Represent class indexes with uint8_t and allocate one index for nullptr
+ assert(BaseClasses.size() <= UINT8_MAX && "Too many base register classes");
+
+ // Apply order
+ struct BaseClassOrdering {
+ bool operator()(const CodeGenRegisterClass *LHS, const CodeGenRegisterClass *RHS) const {
+ return std::pair(*LHS->getBaseClassOrder(), LHS->EnumValue)
+ < std::pair(*RHS->getBaseClassOrder(), RHS->EnumValue);
+ }
+ };
+ llvm::stable_sort(BaseClasses, BaseClassOrdering());
+
+ // Build mapping for Regs (+1 for NoRegister)
+ std::vector<uint8_t> Mapping(Regs.size() + 1, 0);
+ for (int RCIdx = BaseClasses.size() - 1; RCIdx >= 0; --RCIdx) {
+ for (const auto Reg : BaseClasses[RCIdx]->getMembers())
+ Mapping[Reg->EnumValue] = RCIdx + 1;
+ }
+
+ OS << "\n// Register to base register class mapping\n\n";
+ OS << "\n";
+ OS << "const TargetRegisterClass *" << ClassName
+ << "::getPhysRegBaseClass(MCRegister Reg)"
+ << " const {\n";
+ OS << " static const TargetRegisterClass *BaseClasses[" << (BaseClasses.size() + 1) << "] = {\n";
+ OS << " nullptr,\n";
+ for (const auto RC : BaseClasses)
+ OS << " &" << RC->getQualifiedName() << "RegClass,\n";
+ OS << " };\n";
+ OS << " static const uint8_t Mapping[" << Mapping.size() << "] = {\n ";
+ for (const uint8_t Value : Mapping)
+ OS << (unsigned)Value << ",";
+ OS << " };\n\n";
+ OS << " assert(Reg < sizeof(Mapping));\n";
+ OS << " return BaseClasses[Mapping[Reg]];\n";
+ OS << "}\n";
+ }
+ }
+
+ // Emit the constructor of the class...
+ OS << "extern const MCRegisterDesc " << TargetName << "RegDesc[];\n";
+ OS << "extern const MCPhysReg " << TargetName << "RegDiffLists[];\n";
+ OS << "extern const LaneBitmask " << TargetName << "LaneMaskLists[];\n";
+ OS << "extern const char " << TargetName << "RegStrings[];\n";
+ OS << "extern const char " << TargetName << "RegClassStrings[];\n";
+ OS << "extern const MCPhysReg " << TargetName << "RegUnitRoots[][2];\n";
+ OS << "extern const uint16_t " << TargetName << "SubRegIdxLists[];\n";
+ OS << "extern const MCRegisterInfo::SubRegCoveredBits "
+ << TargetName << "SubRegIdxRanges[];\n";
+ OS << "extern const uint16_t " << TargetName << "RegEncodingTable[];\n";
+
+ EmitRegMappingTables(OS, Regs, true);
+
+ OS << ClassName << "::\n"
+ << ClassName
+ << "(unsigned RA, unsigned DwarfFlavour, unsigned EHFlavour,\n"
+ " unsigned PC, unsigned HwMode)\n"
+ << " : TargetRegisterInfo(&" << TargetName << "RegInfoDesc"
+ << ", RegisterClasses, RegisterClasses+" << RegisterClasses.size() << ",\n"
+ << " SubRegIndexNameTable, SubRegIndexLaneMaskTable,\n"
+ << " ";
+ printMask(OS, RegBank.CoveringLanes);
+ OS << ", RegClassInfos, HwMode) {\n"
+ << " InitMCRegisterInfo(" << TargetName << "RegDesc, " << Regs.size() + 1
+ << ", RA, PC,\n " << TargetName
+ << "MCRegisterClasses, " << RegisterClasses.size() << ",\n"
+ << " " << TargetName << "RegUnitRoots,\n"
+ << " " << RegBank.getNumNativeRegUnits() << ",\n"
+ << " " << TargetName << "RegDiffLists,\n"
+ << " " << TargetName << "LaneMaskLists,\n"
+ << " " << TargetName << "RegStrings,\n"
+ << " " << TargetName << "RegClassStrings,\n"
+ << " " << TargetName << "SubRegIdxLists,\n"
+ << " " << SubRegIndicesSize + 1 << ",\n"
+ << " " << TargetName << "SubRegIdxRanges,\n"
+ << " " << TargetName << "RegEncodingTable);\n\n";
+
+ EmitRegMapping(OS, Regs, true);
+
+ OS << "}\n\n";
+
+ // Emit CalleeSavedRegs information.
+ std::vector<Record*> CSRSets =
+ Records.getAllDerivedDefinitions("CalleeSavedRegs");
+ for (unsigned i = 0, e = CSRSets.size(); i != e; ++i) {
+ Record *CSRSet = CSRSets[i];
+ const SetTheory::RecVec *Regs = RegBank.getSets().expand(CSRSet);
+ assert(Regs && "Cannot expand CalleeSavedRegs instance");
+
+ // Emit the *_SaveList list of callee-saved registers.
+ OS << "static const MCPhysReg " << CSRSet->getName()
+ << "_SaveList[] = { ";
+ for (unsigned r = 0, re = Regs->size(); r != re; ++r)
+ OS << getQualifiedName((*Regs)[r]) << ", ";
+ OS << "0 };\n";
+
+ // Emit the *_RegMask bit mask of call-preserved registers.
+ BitVector Covered = RegBank.computeCoveredRegisters(*Regs);
+
+ // Check for an optional OtherPreserved set.
+ // Add those registers to RegMask, but not to SaveList.
+ if (DagInit *OPDag =
+ dyn_cast<DagInit>(CSRSet->getValueInit("OtherPreserved"))) {
+ SetTheory::RecSet OPSet;
+ RegBank.getSets().evaluate(OPDag, OPSet, CSRSet->getLoc());
+ Covered |= RegBank.computeCoveredRegisters(
+ ArrayRef<Record*>(OPSet.begin(), OPSet.end()));
+ }
+
+ // Add all constant physical registers to the preserved mask:
+ SetTheory::RecSet ConstantSet;
+ for (auto &Reg : RegBank.getRegisters()) {
+ if (Reg.Constant)
+ ConstantSet.insert(Reg.TheDef);
+ }
+ Covered |= RegBank.computeCoveredRegisters(
+ ArrayRef<Record *>(ConstantSet.begin(), ConstantSet.end()));
+
+ OS << "static const uint32_t " << CSRSet->getName()
+ << "_RegMask[] = { ";
+ printBitVectorAsHex(OS, Covered, 32);
+ OS << "};\n";
+ }
+ OS << "\n\n";
+
+ OS << "ArrayRef<const uint32_t *> " << ClassName
+ << "::getRegMasks() const {\n";
+ if (!CSRSets.empty()) {
+ OS << " static const uint32_t *const Masks[] = {\n";
+ for (Record *CSRSet : CSRSets)
+ OS << " " << CSRSet->getName() << "_RegMask,\n";
+ OS << " };\n";
+ OS << " return ArrayRef(Masks);\n";
+ } else {
+ OS << " return std::nullopt;\n";
+ }
+ OS << "}\n\n";
+
+ const std::list<CodeGenRegisterCategory> &RegCategories =
+ RegBank.getRegCategories();
+ OS << "bool " << ClassName << "::\n"
+ << "isGeneralPurposeRegister(const MachineFunction &MF, "
+ << "MCRegister PhysReg) const {\n"
+ << " return\n";
+ for (const CodeGenRegisterCategory &Category : RegCategories)
+ if (Category.getName() == "GeneralPurposeRegisters") {
+ for (const CodeGenRegisterClass *RC : Category.getClasses())
+ OS << " " << RC->getQualifiedName()
+ << "RegClass.contains(PhysReg) ||\n";
+ break;
+ }
+ OS << " false;\n";
+ OS << "}\n\n";
+
+ OS << "bool " << ClassName << "::\n"
+ << "isFixedRegister(const MachineFunction &MF, "
+ << "MCRegister PhysReg) const {\n"
+ << " return\n";
+ for (const CodeGenRegisterCategory &Category : RegCategories)
+ if (Category.getName() == "FixedRegisters") {
+ for (const CodeGenRegisterClass *RC : Category.getClasses())
+ OS << " " << RC->getQualifiedName()
+ << "RegClass.contains(PhysReg) ||\n";
+ break;
+ }
+ OS << " false;\n";
+ OS << "}\n\n";
+
+ OS << "bool " << ClassName << "::\n"
+ << "isArgumentRegister(const MachineFunction &MF, "
+ << "MCRegister PhysReg) const {\n"
+ << " return\n";
+ for (const CodeGenRegisterCategory &Category : RegCategories)
+ if (Category.getName() == "ArgumentRegisters") {
+ for (const CodeGenRegisterClass *RC : Category.getClasses())
+ OS << " " << RC->getQualifiedName()
+ << "RegClass.contains(PhysReg) ||\n";
+ break;
+ }
+ OS << " false;\n";
+ OS << "}\n\n";
+
+ OS << "bool " << ClassName << "::\n"
+ << "isConstantPhysReg(MCRegister PhysReg) const {\n"
+ << " return\n";
+ for (const auto &Reg : Regs)
+ if (Reg.Constant)
+ OS << " PhysReg == " << getQualifiedName(Reg.TheDef) << " ||\n";
+ OS << " false;\n";
+ OS << "}\n\n";
+
+ OS << "ArrayRef<const char *> " << ClassName
+ << "::getRegMaskNames() const {\n";
+ if (!CSRSets.empty()) {
+ OS << " static const char *Names[] = {\n";
+ for (Record *CSRSet : CSRSets)
+ OS << " " << '"' << CSRSet->getName() << '"' << ",\n";
+ OS << " };\n";
+ OS << " return ArrayRef(Names);\n";
+ } else {
+ OS << " return std::nullopt;\n";
+ }
+ OS << "}\n\n";
+
+ OS << "const " << TargetName << "FrameLowering *\n" << TargetName
+ << "GenRegisterInfo::getFrameLowering(const MachineFunction &MF) {\n"
+ << " return static_cast<const " << TargetName << "FrameLowering *>(\n"
+ << " MF.getSubtarget().getFrameLowering());\n"
+ << "}\n\n";
+
+ OS << "} // end namespace llvm\n\n";
+ OS << "#endif // GET_REGINFO_TARGET_DESC\n\n";
+}
+
+void RegisterInfoEmitter::run(raw_ostream &OS) {
+ CodeGenRegBank &RegBank = Target.getRegBank();
+ Records.startTimer("Print enums");
+ runEnums(OS, Target, RegBank);
+
+ Records.startTimer("Print MC registers");
+ runMCDesc(OS, Target, RegBank);
+
+ Records.startTimer("Print header fragment");
+ runTargetHeader(OS, Target, RegBank);
+
+ Records.startTimer("Print target registers");
+ runTargetDesc(OS, Target, RegBank);
+
+ if (RegisterInfoDebug)
+ debugDump(errs());
+}
+
+void RegisterInfoEmitter::debugDump(raw_ostream &OS) {
+ CodeGenRegBank &RegBank = Target.getRegBank();
+ const CodeGenHwModes &CGH = Target.getHwModes();
+ unsigned NumModes = CGH.getNumModeIds();
+ auto getModeName = [CGH] (unsigned M) -> StringRef {
+ if (M == 0)
+ return "Default";
+ return CGH.getMode(M).Name;
+ };
+
+ for (const CodeGenRegisterClass &RC : RegBank.getRegClasses()) {
+ OS << "RegisterClass " << RC.getName() << ":\n";
+ OS << "\tSpillSize: {";
+ for (unsigned M = 0; M != NumModes; ++M)
+ OS << ' ' << getModeName(M) << ':' << RC.RSI.get(M).SpillSize;
+ OS << " }\n\tSpillAlignment: {";
+ for (unsigned M = 0; M != NumModes; ++M)
+ OS << ' ' << getModeName(M) << ':' << RC.RSI.get(M).SpillAlignment;
+ OS << " }\n\tNumRegs: " << RC.getMembers().size() << '\n';
+ OS << "\tLaneMask: " << PrintLaneMask(RC.LaneMask) << '\n';
+ OS << "\tHasDisjunctSubRegs: " << RC.HasDisjunctSubRegs << '\n';
+ OS << "\tCoveredBySubRegs: " << RC.CoveredBySubRegs << '\n';
+ OS << "\tAllocatable: " << RC.Allocatable << '\n';
+ OS << "\tAllocationPriority: " << unsigned(RC.AllocationPriority) << '\n';
+ OS << "\tRegs:";
+ for (const CodeGenRegister *R : RC.getMembers()) {
+ OS << " " << R->getName();
+ }
+ OS << '\n';
+ OS << "\tSubClasses:";
+ const BitVector &SubClasses = RC.getSubClasses();
+ for (const CodeGenRegisterClass &SRC : RegBank.getRegClasses()) {
+ if (!SubClasses.test(SRC.EnumValue))
+ continue;
+ OS << " " << SRC.getName();
+ }
+ OS << '\n';
+ OS << "\tSuperClasses:";
+ for (const CodeGenRegisterClass *SRC : RC.getSuperClasses()) {
+ OS << " " << SRC->getName();
+ }
+ OS << '\n';
+ }
+
+ for (const CodeGenSubRegIndex &SRI : RegBank.getSubRegIndices()) {
+ OS << "SubRegIndex " << SRI.getName() << ":\n";
+ OS << "\tLaneMask: " << PrintLaneMask(SRI.LaneMask) << '\n';
+ OS << "\tAllSuperRegsCovered: " << SRI.AllSuperRegsCovered << '\n';
+ OS << "\tOffset, Size: " << SRI.Offset << ", " << SRI.Size << '\n';
+ }
+
+ for (const CodeGenRegister &R : RegBank.getRegisters()) {
+ OS << "Register " << R.getName() << ":\n";
+ OS << "\tCostPerUse: ";
+ for (const auto &Cost : R.CostPerUse)
+ OS << Cost << " ";
+ OS << '\n';
+ OS << "\tCoveredBySubregs: " << R.CoveredBySubRegs << '\n';
+ OS << "\tHasDisjunctSubRegs: " << R.HasDisjunctSubRegs << '\n';
+ for (std::pair<CodeGenSubRegIndex*,CodeGenRegister*> P : R.getSubRegs()) {
+ OS << "\tSubReg " << P.first->getName()
+ << " = " << P.second->getName() << '\n';
+ }
+ }
+}
+
+namespace llvm {
+
+void EmitRegisterInfo(RecordKeeper &RK, raw_ostream &OS) {
+ RegisterInfoEmitter(RK).run(OS);
+}
+
+} // end namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/SDNodeProperties.cpp b/contrib/libs/llvm16/utils/TableGen/SDNodeProperties.cpp
new file mode 100644
index 0000000000..2aec41aac6
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/SDNodeProperties.cpp
@@ -0,0 +1,40 @@
+//===- SDNodeProperties.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SDNodeProperties.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+
+using namespace llvm;
+
+unsigned llvm::parseSDPatternOperatorProperties(Record *R) {
+ unsigned Properties = 0;
+ for (Record *Property : R->getValueAsListOfDefs("Properties")) {
+ auto Offset = StringSwitch<unsigned>(Property->getName())
+ .Case("SDNPCommutative", SDNPCommutative)
+ .Case("SDNPAssociative", SDNPAssociative)
+ .Case("SDNPHasChain", SDNPHasChain)
+ .Case("SDNPOutGlue", SDNPOutGlue)
+ .Case("SDNPInGlue", SDNPInGlue)
+ .Case("SDNPOptInGlue", SDNPOptInGlue)
+ .Case("SDNPMayStore", SDNPMayStore)
+ .Case("SDNPMayLoad", SDNPMayLoad)
+ .Case("SDNPSideEffect", SDNPSideEffect)
+ .Case("SDNPMemOperand", SDNPMemOperand)
+ .Case("SDNPVariadic", SDNPVariadic)
+ .Default(-1u);
+ if (Offset != -1u)
+ Properties |= 1 << Offset;
+ else
+ PrintFatalError(R->getLoc(), "Unknown SD Node property '" +
+ Property->getName() + "' on node '" +
+ R->getName() + "'!");
+ }
+ return Properties;
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/SDNodeProperties.h b/contrib/libs/llvm16/utils/TableGen/SDNodeProperties.h
new file mode 100644
index 0000000000..66a04e6315
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/SDNodeProperties.h
@@ -0,0 +1,39 @@
+//===- SDNodeProperties.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_SDNODEPROPERTIES_H
+#define LLVM_UTILS_TABLEGEN_SDNODEPROPERTIES_H
+
+namespace llvm {
+
+class Record;
+
+// SelectionDAG node properties.
+// SDNPMemOperand: indicates that a node touches memory and therefore must
+// have an associated memory operand that describes the access.
+enum SDNP {
+ SDNPCommutative,
+ SDNPAssociative,
+ SDNPHasChain,
+ SDNPOutGlue,
+ SDNPInGlue,
+ SDNPOptInGlue,
+ SDNPMayLoad,
+ SDNPMayStore,
+ SDNPSideEffect,
+ SDNPMemOperand,
+ SDNPVariadic,
+ SDNPWantRoot,
+ SDNPWantParent
+};
+
+unsigned parseSDPatternOperatorProperties(Record *R);
+
+}
+
+#endif
diff --git a/contrib/libs/llvm16/utils/TableGen/SearchableTableEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/SearchableTableEmitter.cpp
new file mode 100644
index 0000000000..c88a2db555
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/SearchableTableEmitter.cpp
@@ -0,0 +1,831 @@
+//===- SearchableTableEmitter.cpp - Generate efficiently searchable tables -==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend emits a generic array initialized by specified fields,
+// together with companion index tables and lookup functions (binary search,
+// currently).
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenIntrinsics.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include <algorithm>
+#include <set>
+#include <string>
+#include <vector>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "searchable-table-emitter"
+
+namespace {
+
+int getAsInt(Init *B) {
+ return cast<IntInit>(
+ B->convertInitializerTo(IntRecTy::get(B->getRecordKeeper())))
+ ->getValue();
+}
+int getInt(Record *R, StringRef Field) {
+ return getAsInt(R->getValueInit(Field));
+}
+
+struct GenericEnum {
+ using Entry = std::pair<StringRef, int64_t>;
+
+ std::string Name;
+ Record *Class = nullptr;
+ std::string PreprocessorGuard;
+ std::vector<std::unique_ptr<Entry>> Entries;
+ DenseMap<Record *, Entry *> EntryMap;
+};
+
+struct GenericField {
+ std::string Name;
+ RecTy *RecType = nullptr;
+ bool IsCode = false;
+ bool IsIntrinsic = false;
+ bool IsInstruction = false;
+ GenericEnum *Enum = nullptr;
+
+ GenericField(StringRef Name) : Name(std::string(Name)) {}
+};
+
+struct SearchIndex {
+ std::string Name;
+ SMLoc Loc; // Source location of PrimaryKey or Key field definition.
+ SmallVector<GenericField, 1> Fields;
+ bool EarlyOut = false;
+};
+
+struct GenericTable {
+ std::string Name;
+ ArrayRef<SMLoc> Locs; // Source locations from the Record instance.
+ std::string PreprocessorGuard;
+ std::string CppTypeName;
+ SmallVector<GenericField, 2> Fields;
+ std::vector<Record *> Entries;
+
+ std::unique_ptr<SearchIndex> PrimaryKey;
+ SmallVector<std::unique_ptr<SearchIndex>, 2> Indices;
+
+ const GenericField *getFieldByName(StringRef Name) const {
+ for (const auto &Field : Fields) {
+ if (Name == Field.Name)
+ return &Field;
+ }
+ return nullptr;
+ }
+};
+
+class SearchableTableEmitter {
+ RecordKeeper &Records;
+ DenseMap<Init *, std::unique_ptr<CodeGenIntrinsic>> Intrinsics;
+ std::vector<std::unique_ptr<GenericEnum>> Enums;
+ DenseMap<Record *, GenericEnum *> EnumMap;
+ std::set<std::string> PreprocessorGuards;
+
+public:
+ SearchableTableEmitter(RecordKeeper &R) : Records(R) {}
+
+ void run(raw_ostream &OS);
+
+private:
+ typedef std::pair<Init *, int> SearchTableEntry;
+
+ enum TypeContext {
+ TypeInStaticStruct,
+ TypeInTempStruct,
+ TypeInArgument,
+ };
+
+ std::string primaryRepresentation(SMLoc Loc, const GenericField &Field,
+ Init *I) {
+ if (StringInit *SI = dyn_cast<StringInit>(I)) {
+ if (Field.IsCode || SI->hasCodeFormat())
+ return std::string(SI->getValue());
+ else
+ return SI->getAsString();
+ } else if (BitsInit *BI = dyn_cast<BitsInit>(I))
+ return "0x" + utohexstr(getAsInt(BI));
+ else if (BitInit *BI = dyn_cast<BitInit>(I))
+ return BI->getValue() ? "true" : "false";
+ else if (Field.IsIntrinsic)
+ return "Intrinsic::" + getIntrinsic(I).EnumName;
+ else if (Field.IsInstruction)
+ return I->getAsString();
+ else if (Field.Enum) {
+ auto *Entry = Field.Enum->EntryMap[cast<DefInit>(I)->getDef()];
+ if (!Entry)
+ PrintFatalError(Loc,
+ Twine("Entry for field '") + Field.Name + "' is null");
+ return std::string(Entry->first);
+ }
+ PrintFatalError(Loc, Twine("invalid field type for field '") + Field.Name +
+ "'; expected: bit, bits, string, or code");
+ }
+
+ bool isIntrinsic(Init *I) {
+ if (DefInit *DI = dyn_cast<DefInit>(I))
+ return DI->getDef()->isSubClassOf("Intrinsic");
+ return false;
+ }
+
+ CodeGenIntrinsic &getIntrinsic(Init *I) {
+ std::unique_ptr<CodeGenIntrinsic> &Intr = Intrinsics[I];
+ if (!Intr)
+ Intr = std::make_unique<CodeGenIntrinsic>(cast<DefInit>(I)->getDef(),
+ std::vector<Record *>());
+ return *Intr;
+ }
+
+ bool compareBy(Record *LHS, Record *RHS, const SearchIndex &Index);
+
+ std::string searchableFieldType(const GenericTable &Table,
+ const SearchIndex &Index,
+ const GenericField &Field, TypeContext Ctx) {
+ if (isa<StringRecTy>(Field.RecType)) {
+ if (Ctx == TypeInStaticStruct)
+ return "const char *";
+ if (Ctx == TypeInTempStruct)
+ return "std::string";
+ return "StringRef";
+ } else if (BitsRecTy *BI = dyn_cast<BitsRecTy>(Field.RecType)) {
+ unsigned NumBits = BI->getNumBits();
+ if (NumBits <= 8)
+ return "uint8_t";
+ if (NumBits <= 16)
+ return "uint16_t";
+ if (NumBits <= 32)
+ return "uint32_t";
+ if (NumBits <= 64)
+ return "uint64_t";
+ PrintFatalError(Index.Loc, Twine("In table '") + Table.Name +
+ "' lookup method '" + Index.Name +
+ "', key field '" + Field.Name +
+ "' of type bits is too large");
+ } else if (Field.Enum || Field.IsIntrinsic || Field.IsInstruction)
+ return "unsigned";
+ PrintFatalError(Index.Loc,
+ Twine("In table '") + Table.Name + "' lookup method '" +
+ Index.Name + "', key field '" + Field.Name +
+ "' has invalid type: " + Field.RecType->getAsString());
+ }
+
+ void emitGenericTable(const GenericTable &Table, raw_ostream &OS);
+ void emitGenericEnum(const GenericEnum &Enum, raw_ostream &OS);
+ void emitLookupDeclaration(const GenericTable &Table,
+ const SearchIndex &Index, raw_ostream &OS);
+ void emitLookupFunction(const GenericTable &Table, const SearchIndex &Index,
+ bool IsPrimary, raw_ostream &OS);
+ void emitIfdef(StringRef Guard, raw_ostream &OS);
+
+ bool parseFieldType(GenericField &Field, Init *II);
+ std::unique_ptr<SearchIndex>
+ parseSearchIndex(GenericTable &Table, const RecordVal *RecVal, StringRef Name,
+ const std::vector<StringRef> &Key, bool EarlyOut);
+ void collectEnumEntries(GenericEnum &Enum, StringRef NameField,
+ StringRef ValueField,
+ const std::vector<Record *> &Items);
+ void collectTableEntries(GenericTable &Table,
+ const std::vector<Record *> &Items);
+};
+
+} // End anonymous namespace.
+
+// For search indices that consists of a single field whose numeric value is
+// known, return that numeric value.
+static int64_t getNumericKey(const SearchIndex &Index, Record *Rec) {
+ assert(Index.Fields.size() == 1);
+
+ if (Index.Fields[0].Enum) {
+ Record *EnumEntry = Rec->getValueAsDef(Index.Fields[0].Name);
+ return Index.Fields[0].Enum->EntryMap[EnumEntry]->second;
+ }
+
+ return getInt(Rec, Index.Fields[0].Name);
+}
+
+/// Less-than style comparison between \p LHS and \p RHS according to the
+/// key of \p Index.
+bool SearchableTableEmitter::compareBy(Record *LHS, Record *RHS,
+ const SearchIndex &Index) {
+ for (const auto &Field : Index.Fields) {
+ Init *LHSI = LHS->getValueInit(Field.Name);
+ Init *RHSI = RHS->getValueInit(Field.Name);
+
+ if (isa<BitsRecTy>(Field.RecType) || isa<IntRecTy>(Field.RecType)) {
+ int64_t LHSi = getAsInt(LHSI);
+ int64_t RHSi = getAsInt(RHSI);
+ if (LHSi < RHSi)
+ return true;
+ if (LHSi > RHSi)
+ return false;
+ } else if (Field.IsIntrinsic) {
+ CodeGenIntrinsic &LHSi = getIntrinsic(LHSI);
+ CodeGenIntrinsic &RHSi = getIntrinsic(RHSI);
+ if (std::tie(LHSi.TargetPrefix, LHSi.Name) <
+ std::tie(RHSi.TargetPrefix, RHSi.Name))
+ return true;
+ if (std::tie(LHSi.TargetPrefix, LHSi.Name) >
+ std::tie(RHSi.TargetPrefix, RHSi.Name))
+ return false;
+ } else if (Field.IsInstruction) {
+ // This does not correctly compare the predefined instructions!
+ Record *LHSr = cast<DefInit>(LHSI)->getDef();
+ Record *RHSr = cast<DefInit>(RHSI)->getDef();
+
+ bool LHSpseudo = LHSr->getValueAsBit("isPseudo");
+ bool RHSpseudo = RHSr->getValueAsBit("isPseudo");
+ if (LHSpseudo && !RHSpseudo)
+ return true;
+ if (!LHSpseudo && RHSpseudo)
+ return false;
+
+ int comp = LHSr->getName().compare(RHSr->getName());
+ if (comp < 0)
+ return true;
+ if (comp > 0)
+ return false;
+ } else if (Field.Enum) {
+ auto LHSr = cast<DefInit>(LHSI)->getDef();
+ auto RHSr = cast<DefInit>(RHSI)->getDef();
+ int64_t LHSv = Field.Enum->EntryMap[LHSr]->second;
+ int64_t RHSv = Field.Enum->EntryMap[RHSr]->second;
+ if (LHSv < RHSv)
+ return true;
+ if (LHSv > RHSv)
+ return false;
+ } else {
+ std::string LHSs = primaryRepresentation(Index.Loc, Field, LHSI);
+ std::string RHSs = primaryRepresentation(Index.Loc, Field, RHSI);
+
+ if (isa<StringRecTy>(Field.RecType)) {
+ LHSs = StringRef(LHSs).upper();
+ RHSs = StringRef(RHSs).upper();
+ }
+
+ int comp = LHSs.compare(RHSs);
+ if (comp < 0)
+ return true;
+ if (comp > 0)
+ return false;
+ }
+ }
+ return false;
+}
+
+void SearchableTableEmitter::emitIfdef(StringRef Guard, raw_ostream &OS) {
+ OS << "#ifdef " << Guard << "\n";
+ PreprocessorGuards.insert(std::string(Guard));
+}
+
+/// Emit a generic enum.
+void SearchableTableEmitter::emitGenericEnum(const GenericEnum &Enum,
+ raw_ostream &OS) {
+ emitIfdef((Twine("GET_") + Enum.PreprocessorGuard + "_DECL").str(), OS);
+
+ OS << "enum " << Enum.Name << " {\n";
+ for (const auto &Entry : Enum.Entries)
+ OS << " " << Entry->first << " = " << Entry->second << ",\n";
+ OS << "};\n";
+
+ OS << "#endif\n\n";
+}
+
+void SearchableTableEmitter::emitLookupFunction(const GenericTable &Table,
+ const SearchIndex &Index,
+ bool IsPrimary,
+ raw_ostream &OS) {
+ OS << "\n";
+ emitLookupDeclaration(Table, Index, OS);
+ OS << " {\n";
+
+ std::vector<Record *> IndexRowsStorage;
+ ArrayRef<Record *> IndexRows;
+ StringRef IndexTypeName;
+ StringRef IndexName;
+
+ if (IsPrimary) {
+ IndexTypeName = Table.CppTypeName;
+ IndexName = Table.Name;
+ IndexRows = Table.Entries;
+ } else {
+ OS << " struct IndexType {\n";
+ for (const auto &Field : Index.Fields) {
+ OS << " "
+ << searchableFieldType(Table, Index, Field, TypeInStaticStruct) << " "
+ << Field.Name << ";\n";
+ }
+ OS << " unsigned _index;\n";
+ OS << " };\n";
+
+ OS << " static const struct IndexType Index[] = {\n";
+
+ std::vector<std::pair<Record *, unsigned>> Entries;
+ Entries.reserve(Table.Entries.size());
+ for (unsigned i = 0; i < Table.Entries.size(); ++i)
+ Entries.emplace_back(Table.Entries[i], i);
+
+ llvm::stable_sort(Entries, [&](const std::pair<Record *, unsigned> &LHS,
+ const std::pair<Record *, unsigned> &RHS) {
+ return compareBy(LHS.first, RHS.first, Index);
+ });
+
+ IndexRowsStorage.reserve(Entries.size());
+ for (const auto &Entry : Entries) {
+ IndexRowsStorage.push_back(Entry.first);
+
+ OS << " { ";
+ ListSeparator LS;
+ for (const auto &Field : Index.Fields) {
+ std::string Repr = primaryRepresentation(
+ Index.Loc, Field, Entry.first->getValueInit(Field.Name));
+ if (isa<StringRecTy>(Field.RecType))
+ Repr = StringRef(Repr).upper();
+ OS << LS << Repr;
+ }
+ OS << ", " << Entry.second << " },\n";
+ }
+
+ OS << " };\n\n";
+
+ IndexTypeName = "IndexType";
+ IndexName = "Index";
+ IndexRows = IndexRowsStorage;
+ }
+
+ bool IsContiguous = false;
+
+ if (Index.Fields.size() == 1 &&
+ (Index.Fields[0].Enum || isa<BitsRecTy>(Index.Fields[0].RecType))) {
+ IsContiguous = true;
+ for (unsigned i = 0; i < IndexRows.size(); ++i) {
+ if (getNumericKey(Index, IndexRows[i]) != i) {
+ IsContiguous = false;
+ break;
+ }
+ }
+ }
+
+ if (IsContiguous) {
+ OS << " auto Table = ArrayRef(" << IndexName << ");\n";
+ OS << " size_t Idx = " << Index.Fields[0].Name << ";\n";
+ OS << " return Idx >= Table.size() ? nullptr : ";
+ if (IsPrimary)
+ OS << "&Table[Idx]";
+ else
+ OS << "&" << Table.Name << "[Table[Idx]._index]";
+ OS << ";\n";
+ OS << "}\n";
+ return;
+ }
+
+ if (Index.EarlyOut) {
+ const GenericField &Field = Index.Fields[0];
+ std::string FirstRepr = primaryRepresentation(
+ Index.Loc, Field, IndexRows[0]->getValueInit(Field.Name));
+ std::string LastRepr = primaryRepresentation(
+ Index.Loc, Field, IndexRows.back()->getValueInit(Field.Name));
+ OS << " if ((" << Field.Name << " < " << FirstRepr << ") ||\n";
+ OS << " (" << Field.Name << " > " << LastRepr << "))\n";
+ OS << " return nullptr;\n\n";
+ }
+
+ OS << " struct KeyType {\n";
+ for (const auto &Field : Index.Fields) {
+ OS << " " << searchableFieldType(Table, Index, Field, TypeInTempStruct)
+ << " " << Field.Name << ";\n";
+ }
+ OS << " };\n";
+ OS << " KeyType Key = {";
+ ListSeparator LS;
+ for (const auto &Field : Index.Fields) {
+ OS << LS << Field.Name;
+ if (isa<StringRecTy>(Field.RecType)) {
+ OS << ".upper()";
+ if (IsPrimary)
+ PrintFatalError(Index.Loc,
+ Twine("In table '") + Table.Name +
+ "', use a secondary lookup method for "
+ "case-insensitive comparison of field '" +
+ Field.Name + "'");
+ }
+ }
+ OS << "};\n";
+
+ OS << " auto Table = ArrayRef(" << IndexName << ");\n";
+ OS << " auto Idx = std::lower_bound(Table.begin(), Table.end(), Key,\n";
+ OS << " [](const " << IndexTypeName << " &LHS, const KeyType &RHS) {\n";
+
+ for (const auto &Field : Index.Fields) {
+ if (isa<StringRecTy>(Field.RecType)) {
+ OS << " int Cmp" << Field.Name << " = StringRef(LHS." << Field.Name
+ << ").compare(RHS." << Field.Name << ");\n";
+ OS << " if (Cmp" << Field.Name << " < 0) return true;\n";
+ OS << " if (Cmp" << Field.Name << " > 0) return false;\n";
+ } else if (Field.Enum) {
+ // Explicitly cast to unsigned, because the signedness of enums is
+ // compiler-dependent.
+ OS << " if ((unsigned)LHS." << Field.Name << " < (unsigned)RHS."
+ << Field.Name << ")\n";
+ OS << " return true;\n";
+ OS << " if ((unsigned)LHS." << Field.Name << " > (unsigned)RHS."
+ << Field.Name << ")\n";
+ OS << " return false;\n";
+ } else {
+ OS << " if (LHS." << Field.Name << " < RHS." << Field.Name << ")\n";
+ OS << " return true;\n";
+ OS << " if (LHS." << Field.Name << " > RHS." << Field.Name << ")\n";
+ OS << " return false;\n";
+ }
+ }
+
+ OS << " return false;\n";
+ OS << " });\n\n";
+
+ OS << " if (Idx == Table.end()";
+
+ for (const auto &Field : Index.Fields)
+ OS << " ||\n Key." << Field.Name << " != Idx->" << Field.Name;
+ OS << ")\n return nullptr;\n";
+
+ if (IsPrimary)
+ OS << " return &*Idx;\n";
+ else
+ OS << " return &" << Table.Name << "[Idx->_index];\n";
+
+ OS << "}\n";
+}
+
+void SearchableTableEmitter::emitLookupDeclaration(const GenericTable &Table,
+ const SearchIndex &Index,
+ raw_ostream &OS) {
+ OS << "const " << Table.CppTypeName << " *" << Index.Name << "(";
+
+ ListSeparator LS;
+ for (const auto &Field : Index.Fields)
+ OS << LS << searchableFieldType(Table, Index, Field, TypeInArgument) << " "
+ << Field.Name;
+ OS << ")";
+}
+
+void SearchableTableEmitter::emitGenericTable(const GenericTable &Table,
+ raw_ostream &OS) {
+ emitIfdef((Twine("GET_") + Table.PreprocessorGuard + "_DECL").str(), OS);
+
+ // Emit the declarations for the functions that will perform lookup.
+ if (Table.PrimaryKey) {
+ emitLookupDeclaration(Table, *Table.PrimaryKey, OS);
+ OS << ";\n";
+ }
+ for (const auto &Index : Table.Indices) {
+ emitLookupDeclaration(Table, *Index, OS);
+ OS << ";\n";
+ }
+
+ OS << "#endif\n\n";
+
+ emitIfdef((Twine("GET_") + Table.PreprocessorGuard + "_IMPL").str(), OS);
+
+ // The primary data table contains all the fields defined for this map.
+ OS << "constexpr " << Table.CppTypeName << " " << Table.Name << "[] = {\n";
+ for (unsigned i = 0; i < Table.Entries.size(); ++i) {
+ Record *Entry = Table.Entries[i];
+ OS << " { ";
+
+ ListSeparator LS;
+ for (const auto &Field : Table.Fields)
+ OS << LS
+ << primaryRepresentation(Table.Locs[0], Field,
+ Entry->getValueInit(Field.Name));
+
+ OS << " }, // " << i << "\n";
+ }
+ OS << " };\n";
+
+ // Indexes are sorted "{ Thing, PrimaryIdx }" arrays, so that a binary
+ // search can be performed by "Thing".
+ if (Table.PrimaryKey)
+ emitLookupFunction(Table, *Table.PrimaryKey, true, OS);
+ for (const auto &Index : Table.Indices)
+ emitLookupFunction(Table, *Index, false, OS);
+
+ OS << "#endif\n\n";
+}
+
+bool SearchableTableEmitter::parseFieldType(GenericField &Field, Init *TypeOf) {
+ if (auto Type = dyn_cast<StringInit>(TypeOf)) {
+ if (Type->getValue() == "code") {
+ Field.IsCode = true;
+ return true;
+ } else {
+ if (Record *TypeRec = Records.getDef(Type->getValue())) {
+ if (TypeRec->isSubClassOf("GenericEnum")) {
+ Field.Enum = EnumMap[TypeRec];
+ Field.RecType = RecordRecTy::get(Field.Enum->Class);
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+std::unique_ptr<SearchIndex> SearchableTableEmitter::parseSearchIndex(
+ GenericTable &Table, const RecordVal *KeyRecVal, StringRef Name,
+ const std::vector<StringRef> &Key, bool EarlyOut) {
+ auto Index = std::make_unique<SearchIndex>();
+ Index->Name = std::string(Name);
+ Index->Loc = KeyRecVal->getLoc();
+ Index->EarlyOut = EarlyOut;
+
+ for (const auto &FieldName : Key) {
+ const GenericField *Field = Table.getFieldByName(FieldName);
+ if (!Field)
+ PrintFatalError(
+ KeyRecVal,
+ Twine("In table '") + Table.Name +
+ "', 'PrimaryKey' or 'Key' refers to nonexistent field '" +
+ FieldName + "'");
+
+ Index->Fields.push_back(*Field);
+ }
+
+ if (EarlyOut && isa<StringRecTy>(Index->Fields[0].RecType)) {
+ PrintFatalError(
+ KeyRecVal, Twine("In lookup method '") + Name + "', early-out is not " +
+ "supported for a first key field of type string");
+ }
+
+ return Index;
+}
+
+void SearchableTableEmitter::collectEnumEntries(
+ GenericEnum &Enum, StringRef NameField, StringRef ValueField,
+ const std::vector<Record *> &Items) {
+ for (auto *EntryRec : Items) {
+ StringRef Name;
+ if (NameField.empty())
+ Name = EntryRec->getName();
+ else
+ Name = EntryRec->getValueAsString(NameField);
+
+ int64_t Value = 0;
+ if (!ValueField.empty())
+ Value = getInt(EntryRec, ValueField);
+
+ Enum.Entries.push_back(std::make_unique<GenericEnum::Entry>(Name, Value));
+ Enum.EntryMap.insert(std::make_pair(EntryRec, Enum.Entries.back().get()));
+ }
+
+ if (ValueField.empty()) {
+ llvm::stable_sort(Enum.Entries,
+ [](const std::unique_ptr<GenericEnum::Entry> &LHS,
+ const std::unique_ptr<GenericEnum::Entry> &RHS) {
+ return LHS->first < RHS->first;
+ });
+
+ for (size_t i = 0; i < Enum.Entries.size(); ++i)
+ Enum.Entries[i]->second = i;
+ }
+}
+
+void SearchableTableEmitter::collectTableEntries(
+ GenericTable &Table, const std::vector<Record *> &Items) {
+ if (Items.empty())
+ PrintFatalError(Table.Locs,
+ Twine("Table '") + Table.Name + "' has no entries");
+
+ for (auto *EntryRec : Items) {
+ for (auto &Field : Table.Fields) {
+ auto TI = dyn_cast<TypedInit>(EntryRec->getValueInit(Field.Name));
+ if (!TI || !TI->isComplete()) {
+ PrintFatalError(EntryRec, Twine("Record '") + EntryRec->getName() +
+ "' for table '" + Table.Name +
+ "' is missing field '" + Field.Name +
+ "'");
+ }
+ if (!Field.RecType) {
+ Field.RecType = TI->getType();
+ } else {
+ RecTy *Ty = resolveTypes(Field.RecType, TI->getType());
+ if (!Ty)
+ PrintFatalError(EntryRec->getValue(Field.Name),
+ Twine("Field '") + Field.Name + "' of table '" +
+ Table.Name + "' entry has incompatible type: " +
+ TI->getType()->getAsString() + " vs. " +
+ Field.RecType->getAsString());
+ Field.RecType = Ty;
+ }
+ }
+
+ Table.Entries.push_back(EntryRec); // Add record to table's record list.
+ }
+
+ Record *IntrinsicClass = Records.getClass("Intrinsic");
+ Record *InstructionClass = Records.getClass("Instruction");
+ for (auto &Field : Table.Fields) {
+ if (!Field.RecType)
+ PrintFatalError(Twine("Cannot determine type of field '") + Field.Name +
+ "' in table '" + Table.Name + "'. Maybe it is not used?");
+
+ if (auto RecordTy = dyn_cast<RecordRecTy>(Field.RecType)) {
+ if (IntrinsicClass && RecordTy->isSubClassOf(IntrinsicClass))
+ Field.IsIntrinsic = true;
+ else if (InstructionClass && RecordTy->isSubClassOf(InstructionClass))
+ Field.IsInstruction = true;
+ }
+ }
+
+ SearchIndex Idx;
+ std::copy(Table.Fields.begin(), Table.Fields.end(),
+ std::back_inserter(Idx.Fields));
+ llvm::sort(Table.Entries, [&](Record *LHS, Record *RHS) {
+ return compareBy(LHS, RHS, Idx);
+ });
+}
+
+void SearchableTableEmitter::run(raw_ostream &OS) {
+ // Emit tables in a deterministic order to avoid needless rebuilds.
+ SmallVector<std::unique_ptr<GenericTable>, 4> Tables;
+ DenseMap<Record *, GenericTable *> TableMap;
+
+ // Collect all definitions first.
+ for (auto *EnumRec : Records.getAllDerivedDefinitions("GenericEnum")) {
+ StringRef NameField;
+ if (!EnumRec->isValueUnset("NameField"))
+ NameField = EnumRec->getValueAsString("NameField");
+
+ StringRef ValueField;
+ if (!EnumRec->isValueUnset("ValueField"))
+ ValueField = EnumRec->getValueAsString("ValueField");
+
+ auto Enum = std::make_unique<GenericEnum>();
+ Enum->Name = std::string(EnumRec->getName());
+ Enum->PreprocessorGuard = std::string(EnumRec->getName());
+
+ StringRef FilterClass = EnumRec->getValueAsString("FilterClass");
+ Enum->Class = Records.getClass(FilterClass);
+ if (!Enum->Class)
+ PrintFatalError(EnumRec->getValue("FilterClass"),
+ Twine("Enum FilterClass '") + FilterClass +
+ "' does not exist");
+
+ collectEnumEntries(*Enum, NameField, ValueField,
+ Records.getAllDerivedDefinitions(FilterClass));
+ EnumMap.insert(std::make_pair(EnumRec, Enum.get()));
+ Enums.emplace_back(std::move(Enum));
+ }
+
+ for (auto *TableRec : Records.getAllDerivedDefinitions("GenericTable")) {
+ auto Table = std::make_unique<GenericTable>();
+ Table->Name = std::string(TableRec->getName());
+ Table->Locs = TableRec->getLoc();
+ Table->PreprocessorGuard = std::string(TableRec->getName());
+ Table->CppTypeName = std::string(TableRec->getValueAsString("CppTypeName"));
+
+ std::vector<StringRef> Fields = TableRec->getValueAsListOfStrings("Fields");
+ for (const auto &FieldName : Fields) {
+ Table->Fields.emplace_back(FieldName); // Construct a GenericField.
+
+ if (auto TypeOfRecordVal = TableRec->getValue(("TypeOf_" + FieldName).str())) {
+ if (!parseFieldType(Table->Fields.back(), TypeOfRecordVal->getValue())) {
+ PrintError(TypeOfRecordVal,
+ Twine("Table '") + Table->Name +
+ "' has invalid 'TypeOf_" + FieldName +
+ "': " + TypeOfRecordVal->getValue()->getAsString());
+ PrintFatalNote("The 'TypeOf_xxx' field must be a string naming a "
+ "GenericEnum record, or \"code\"");
+ }
+ }
+ }
+
+ StringRef FilterClass = TableRec->getValueAsString("FilterClass");
+ if (!Records.getClass(FilterClass))
+ PrintFatalError(TableRec->getValue("FilterClass"),
+ Twine("Table FilterClass '") +
+ FilterClass + "' does not exist");
+
+ collectTableEntries(*Table, Records.getAllDerivedDefinitions(FilterClass));
+
+ if (!TableRec->isValueUnset("PrimaryKey")) {
+ Table->PrimaryKey =
+ parseSearchIndex(*Table, TableRec->getValue("PrimaryKey"),
+ TableRec->getValueAsString("PrimaryKeyName"),
+ TableRec->getValueAsListOfStrings("PrimaryKey"),
+ TableRec->getValueAsBit("PrimaryKeyEarlyOut"));
+
+ llvm::stable_sort(Table->Entries, [&](Record *LHS, Record *RHS) {
+ return compareBy(LHS, RHS, *Table->PrimaryKey);
+ });
+ }
+
+ TableMap.insert(std::make_pair(TableRec, Table.get()));
+ Tables.emplace_back(std::move(Table));
+ }
+
+ for (Record *IndexRec : Records.getAllDerivedDefinitions("SearchIndex")) {
+ Record *TableRec = IndexRec->getValueAsDef("Table");
+ auto It = TableMap.find(TableRec);
+ if (It == TableMap.end())
+ PrintFatalError(IndexRec->getValue("Table"),
+ Twine("SearchIndex '") + IndexRec->getName() +
+ "' refers to nonexistent table '" +
+ TableRec->getName());
+
+ GenericTable &Table = *It->second;
+ Table.Indices.push_back(
+ parseSearchIndex(Table, IndexRec->getValue("Key"), IndexRec->getName(),
+ IndexRec->getValueAsListOfStrings("Key"),
+ IndexRec->getValueAsBit("EarlyOut")));
+ }
+
+ // Translate legacy tables.
+ Record *SearchableTable = Records.getClass("SearchableTable");
+ for (auto &NameRec : Records.getClasses()) {
+ Record *Class = NameRec.second.get();
+ if (Class->getSuperClasses().size() != 1 ||
+ !Class->isSubClassOf(SearchableTable))
+ continue;
+
+ StringRef TableName = Class->getName();
+ std::vector<Record *> Items = Records.getAllDerivedDefinitions(TableName);
+ if (!Class->isValueUnset("EnumNameField")) {
+ StringRef NameField = Class->getValueAsString("EnumNameField");
+ StringRef ValueField;
+ if (!Class->isValueUnset("EnumValueField"))
+ ValueField = Class->getValueAsString("EnumValueField");
+
+ auto Enum = std::make_unique<GenericEnum>();
+ Enum->Name = (Twine(Class->getName()) + "Values").str();
+ Enum->PreprocessorGuard = Class->getName().upper();
+ Enum->Class = Class;
+
+ collectEnumEntries(*Enum, NameField, ValueField, Items);
+
+ Enums.emplace_back(std::move(Enum));
+ }
+
+ auto Table = std::make_unique<GenericTable>();
+ Table->Name = (Twine(Class->getName()) + "sList").str();
+ Table->Locs = Class->getLoc();
+ Table->PreprocessorGuard = Class->getName().upper();
+ Table->CppTypeName = std::string(Class->getName());
+
+ for (const RecordVal &Field : Class->getValues()) {
+ std::string FieldName = std::string(Field.getName());
+
+ // Skip uninteresting fields: either special to us, or injected
+ // template parameters (if they contain a ':').
+ if (FieldName.find(':') != std::string::npos ||
+ FieldName == "SearchableFields" || FieldName == "EnumNameField" ||
+ FieldName == "EnumValueField")
+ continue;
+
+ Table->Fields.emplace_back(FieldName);
+ }
+
+ collectTableEntries(*Table, Items);
+
+ for (const auto &Field :
+ Class->getValueAsListOfStrings("SearchableFields")) {
+ std::string Name =
+ (Twine("lookup") + Table->CppTypeName + "By" + Field).str();
+ Table->Indices.push_back(parseSearchIndex(*Table, Class->getValue(Field),
+ Name, {Field}, false));
+ }
+
+ Tables.emplace_back(std::move(Table));
+ }
+
+ // Emit everything.
+ for (const auto &Enum : Enums)
+ emitGenericEnum(*Enum, OS);
+
+ for (const auto &Table : Tables)
+ emitGenericTable(*Table, OS);
+
+ // Put all #undefs last, to allow multiple sections guarded by the same
+ // define.
+ for (const auto &Guard : PreprocessorGuards)
+ OS << "#undef " << Guard << "\n";
+}
+
+namespace llvm {
+
+void EmitSearchableTables(RecordKeeper &RK, raw_ostream &OS) {
+ SearchableTableEmitter(RK).run(OS);
+}
+
+} // End llvm namespace.
diff --git a/contrib/libs/llvm16/utils/TableGen/SequenceToOffsetTable.h b/contrib/libs/llvm16/utils/TableGen/SequenceToOffsetTable.h
new file mode 100644
index 0000000000..77a404d07b
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/SequenceToOffsetTable.h
@@ -0,0 +1,175 @@
+//===-- SequenceToOffsetTable.h - Compress similar sequences ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// SequenceToOffsetTable can be used to emit a number of null-terminated
+// sequences as one big array. Use the same memory when a sequence is a suffix
+// of another.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_SEQUENCETOOFFSETTABLE_H
+#define LLVM_UTILS_TABLEGEN_SEQUENCETOOFFSETTABLE_H
+
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cctype>
+#include <functional>
+#include <map>
+
+namespace llvm {
+extern llvm::cl::opt<bool> EmitLongStrLiterals;
+
+static inline void printChar(raw_ostream &OS, char C) {
+ unsigned char UC(C);
+ if (isalnum(UC) || ispunct(UC)) {
+ OS << '\'';
+ if (C == '\\' || C == '\'')
+ OS << '\\';
+ OS << C << '\'';
+ } else {
+ OS << unsigned(UC);
+ }
+}
+
+/// SequenceToOffsetTable - Collect a number of terminated sequences of T.
+/// Compute the layout of a table that contains all the sequences, possibly by
+/// reusing entries.
+///
+/// @tparam SeqT The sequence container. (vector or string).
+/// @tparam Less A stable comparator for SeqT elements.
+template<typename SeqT, typename Less = std::less<typename SeqT::value_type> >
+class SequenceToOffsetTable {
+ typedef typename SeqT::value_type ElemT;
+
+ // Define a comparator for SeqT that sorts a suffix immediately before a
+ // sequence with that suffix.
+ struct SeqLess {
+ Less L;
+ bool operator()(const SeqT &A, const SeqT &B) const {
+ return std::lexicographical_compare(A.rbegin(), A.rend(),
+ B.rbegin(), B.rend(), L);
+ }
+ };
+
+ // Keep sequences ordered according to SeqLess so suffixes are easy to find.
+ // Map each sequence to its offset in the table.
+ typedef std::map<SeqT, unsigned, SeqLess> SeqMap;
+
+ // Sequences added so far, with suffixes removed.
+ SeqMap Seqs;
+
+ // Entries in the final table, or 0 before layout was called.
+ unsigned Entries;
+
+ // isSuffix - Returns true if A is a suffix of B.
+ static bool isSuffix(const SeqT &A, const SeqT &B) {
+ return A.size() <= B.size() && std::equal(A.rbegin(), A.rend(), B.rbegin());
+ }
+
+public:
+ SequenceToOffsetTable() : Entries(0) {}
+
+ /// add - Add a sequence to the table.
+ /// This must be called before layout().
+ void add(const SeqT &Seq) {
+ assert(Entries == 0 && "Cannot call add() after layout()");
+ typename SeqMap::iterator I = Seqs.lower_bound(Seq);
+
+ // If SeqMap contains a sequence that has Seq as a suffix, I will be
+ // pointing to it.
+ if (I != Seqs.end() && isSuffix(Seq, I->first))
+ return;
+
+ I = Seqs.insert(I, std::make_pair(Seq, 0u));
+
+ // The entry before I may be a suffix of Seq that can now be erased.
+ if (I != Seqs.begin() && isSuffix((--I)->first, Seq))
+ Seqs.erase(I);
+ }
+
+ bool empty() const { return Seqs.empty(); }
+
+ unsigned size() const {
+ assert((empty() || Entries) && "Call layout() before size()");
+ return Entries;
+ }
+
+ /// layout - Computes the final table layout.
+ void layout() {
+ assert(Entries == 0 && "Can only call layout() once");
+ // Lay out the table in Seqs iteration order.
+ for (typename SeqMap::iterator I = Seqs.begin(), E = Seqs.end(); I != E;
+ ++I) {
+ I->second = Entries;
+ // Include space for a terminator.
+ Entries += I->first.size() + 1;
+ }
+ }
+
+ /// get - Returns the offset of Seq in the final table.
+ unsigned get(const SeqT &Seq) const {
+ assert(Entries && "Call layout() before get()");
+ typename SeqMap::const_iterator I = Seqs.lower_bound(Seq);
+ assert(I != Seqs.end() && isSuffix(Seq, I->first) &&
+ "get() called with sequence that wasn't added first");
+ return I->second + (I->first.size() - Seq.size());
+ }
+
+ /// `emitStringLiteralDef` - Print out the table as the body of an array
+ /// initializer, where each element is a C string literal terminated by
+ /// `\0`. Falls back to emitting a comma-separated integer list if
+ /// `EmitLongStrLiterals` is false
+ void emitStringLiteralDef(raw_ostream &OS, const llvm::Twine &Decl) const {
+ assert(Entries && "Call layout() before emitStringLiteralDef()");
+ if (!EmitLongStrLiterals) {
+ OS << Decl << " = {\n";
+ emit(OS, printChar, "0");
+ OS << " 0\n};\n\n";
+ return;
+ }
+
+ OS << "\n#ifdef __GNUC__\n"
+ << "#pragma GCC diagnostic push\n"
+ << "#pragma GCC diagnostic ignored \"-Woverlength-strings\"\n"
+ << "#endif\n"
+ << Decl << " = {\n";
+ for (auto I : Seqs) {
+ OS << " /* " << I.second << " */ \"";
+ OS.write_escaped(I.first);
+ OS << "\\0\"\n";
+ }
+ OS << "};\n"
+ << "#ifdef __GNUC__\n"
+ << "#pragma GCC diagnostic pop\n"
+ << "#endif\n\n";
+ }
+
+ /// emit - Print out the table as the body of an array initializer.
+ /// Use the Print function to print elements.
+ void emit(raw_ostream &OS,
+ void (*Print)(raw_ostream&, ElemT),
+ const char *Term = "0") const {
+ assert((empty() || Entries) && "Call layout() before emit()");
+ for (typename SeqMap::const_iterator I = Seqs.begin(), E = Seqs.end();
+ I != E; ++I) {
+ OS << " /* " << I->second << " */ ";
+ for (typename SeqT::const_iterator SI = I->first.begin(),
+ SE = I->first.end(); SI != SE; ++SI) {
+ Print(OS, *SI);
+ OS << ", ";
+ }
+ OS << Term << ",\n";
+ }
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/libs/llvm16/utils/TableGen/SubtargetEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/SubtargetEmitter.cpp
new file mode 100644
index 0000000000..8afe6d37d0
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/SubtargetEmitter.cpp
@@ -0,0 +1,1993 @@
+//===- SubtargetEmitter.cpp - Generate subtarget enumerations -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend emits subtarget enumerations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenSchedule.h"
+#include "CodeGenTarget.h"
+#include "PredicateExpander.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCInstrItineraries.h"
+#include "llvm/MC/MCSchedule.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <iterator>
+#include <map>
+#include <string>
+#include <vector>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "subtarget-emitter"
+
+namespace {
+
+class SubtargetEmitter {
+ // Each processor has a SchedClassDesc table with an entry for each SchedClass.
+ // The SchedClassDesc table indexes into a global write resource table, write
+ // latency table, and read advance table.
+ struct SchedClassTables {
+ std::vector<std::vector<MCSchedClassDesc>> ProcSchedClasses;
+ std::vector<MCWriteProcResEntry> WriteProcResources;
+ std::vector<MCWriteLatencyEntry> WriteLatencies;
+ std::vector<std::string> WriterNames;
+ std::vector<MCReadAdvanceEntry> ReadAdvanceEntries;
+
+ // Reserve an invalid entry at index 0
+ SchedClassTables() {
+ ProcSchedClasses.resize(1);
+ WriteProcResources.resize(1);
+ WriteLatencies.resize(1);
+ WriterNames.push_back("InvalidWrite");
+ ReadAdvanceEntries.resize(1);
+ }
+ };
+
+ struct LessWriteProcResources {
+ bool operator()(const MCWriteProcResEntry &LHS,
+ const MCWriteProcResEntry &RHS) {
+ return LHS.ProcResourceIdx < RHS.ProcResourceIdx;
+ }
+ };
+
+ const CodeGenTarget &TGT;
+ RecordKeeper &Records;
+ CodeGenSchedModels &SchedModels;
+ std::string Target;
+
+ void Enumeration(raw_ostream &OS, DenseMap<Record *, unsigned> &FeatureMap);
+ void EmitSubtargetInfoMacroCalls(raw_ostream &OS);
+ unsigned FeatureKeyValues(raw_ostream &OS,
+ const DenseMap<Record *, unsigned> &FeatureMap);
+ unsigned CPUKeyValues(raw_ostream &OS,
+ const DenseMap<Record *, unsigned> &FeatureMap);
+ void FormItineraryStageString(const std::string &Names,
+ Record *ItinData, std::string &ItinString,
+ unsigned &NStages);
+ void FormItineraryOperandCycleString(Record *ItinData, std::string &ItinString,
+ unsigned &NOperandCycles);
+ void FormItineraryBypassString(const std::string &Names,
+ Record *ItinData,
+ std::string &ItinString, unsigned NOperandCycles);
+ void EmitStageAndOperandCycleData(raw_ostream &OS,
+ std::vector<std::vector<InstrItinerary>>
+ &ProcItinLists);
+ void EmitItineraries(raw_ostream &OS,
+ std::vector<std::vector<InstrItinerary>>
+ &ProcItinLists);
+ unsigned EmitRegisterFileTables(const CodeGenProcModel &ProcModel,
+ raw_ostream &OS);
+ void EmitLoadStoreQueueInfo(const CodeGenProcModel &ProcModel,
+ raw_ostream &OS);
+ void EmitExtraProcessorInfo(const CodeGenProcModel &ProcModel,
+ raw_ostream &OS);
+ void EmitProcessorProp(raw_ostream &OS, const Record *R, StringRef Name,
+ char Separator);
+ void EmitProcessorResourceSubUnits(const CodeGenProcModel &ProcModel,
+ raw_ostream &OS);
+ void EmitProcessorResources(const CodeGenProcModel &ProcModel,
+ raw_ostream &OS);
+ Record *FindWriteResources(const CodeGenSchedRW &SchedWrite,
+ const CodeGenProcModel &ProcModel);
+ Record *FindReadAdvance(const CodeGenSchedRW &SchedRead,
+ const CodeGenProcModel &ProcModel);
+ void ExpandProcResources(RecVec &PRVec, std::vector<int64_t> &Cycles,
+ const CodeGenProcModel &ProcModel);
+ void GenSchedClassTables(const CodeGenProcModel &ProcModel,
+ SchedClassTables &SchedTables);
+ void EmitSchedClassTables(SchedClassTables &SchedTables, raw_ostream &OS);
+ void EmitProcessorModels(raw_ostream &OS);
+ void EmitSchedModelHelpers(const std::string &ClassName, raw_ostream &OS);
+ void emitSchedModelHelpersImpl(raw_ostream &OS,
+ bool OnlyExpandMCInstPredicates = false);
+ void emitGenMCSubtargetInfo(raw_ostream &OS);
+ void EmitMCInstrAnalysisPredicateFunctions(raw_ostream &OS);
+
+ void EmitSchedModel(raw_ostream &OS);
+ void EmitHwModeCheck(const std::string &ClassName, raw_ostream &OS);
+ void ParseFeaturesFunction(raw_ostream &OS);
+
+public:
+ SubtargetEmitter(RecordKeeper &R, CodeGenTarget &TGT)
+ : TGT(TGT), Records(R), SchedModels(TGT.getSchedModels()),
+ Target(TGT.getName()) {}
+
+ void run(raw_ostream &o);
+};
+
+} // end anonymous namespace
+
+//
+// Enumeration - Emit the specified class as an enumeration.
+//
+void SubtargetEmitter::Enumeration(raw_ostream &OS,
+ DenseMap<Record *, unsigned> &FeatureMap) {
+ // Get all records of class and sort
+ std::vector<Record*> DefList =
+ Records.getAllDerivedDefinitions("SubtargetFeature");
+ llvm::sort(DefList, LessRecord());
+
+ unsigned N = DefList.size();
+ if (N == 0)
+ return;
+ if (N + 1 > MAX_SUBTARGET_FEATURES)
+ PrintFatalError("Too many subtarget features! Bump MAX_SUBTARGET_FEATURES.");
+
+ OS << "namespace " << Target << " {\n";
+
+ // Open enumeration.
+ OS << "enum {\n";
+
+ // For each record
+ for (unsigned i = 0; i < N; ++i) {
+ // Next record
+ Record *Def = DefList[i];
+
+ // Get and emit name
+ OS << " " << Def->getName() << " = " << i << ",\n";
+
+ // Save the index for this feature.
+ FeatureMap[Def] = i;
+ }
+
+ OS << " "
+ << "NumSubtargetFeatures = " << N << "\n";
+
+ // Close enumeration and namespace
+ OS << "};\n";
+ OS << "} // end namespace " << Target << "\n";
+}
+
+static void printFeatureMask(raw_ostream &OS, RecVec &FeatureList,
+ const DenseMap<Record *, unsigned> &FeatureMap) {
+ std::array<uint64_t, MAX_SUBTARGET_WORDS> Mask = {};
+ for (const Record *Feature : FeatureList) {
+ unsigned Bit = FeatureMap.lookup(Feature);
+ Mask[Bit / 64] |= 1ULL << (Bit % 64);
+ }
+
+ OS << "{ { { ";
+ for (unsigned i = 0; i != Mask.size(); ++i) {
+ OS << "0x";
+ OS.write_hex(Mask[i]);
+ OS << "ULL, ";
+ }
+ OS << "} } }";
+}
+
+/// Emit some information about the SubtargetFeature as calls to a macro so
+/// that they can be used from C++.
+void SubtargetEmitter::EmitSubtargetInfoMacroCalls(raw_ostream &OS) {
+ OS << "\n#ifdef GET_SUBTARGETINFO_MACRO\n";
+
+ std::vector<Record *> FeatureList =
+ Records.getAllDerivedDefinitions("SubtargetFeature");
+ llvm::sort(FeatureList, LessRecordFieldName());
+
+ for (const Record *Feature : FeatureList) {
+ const StringRef Attribute = Feature->getValueAsString("Attribute");
+ const StringRef Value = Feature->getValueAsString("Value");
+
+ // Only handle boolean features for now, excluding BitVectors and enums.
+ const bool IsBool = (Value == "false" || Value == "true") &&
+ !StringRef(Attribute).contains('[');
+ if (!IsBool)
+ continue;
+
+ // Some features default to true, with values set to false if enabled.
+ const char *Default = Value == "false" ? "true" : "false";
+
+ // Define the getter with lowercased first char: xxxYyy() { return XxxYyy; }
+ const std::string Getter =
+ Attribute.substr(0, 1).lower() + Attribute.substr(1).str();
+
+ OS << "GET_SUBTARGETINFO_MACRO(" << Attribute << ", " << Default << ", "
+ << Getter << ")\n";
+ }
+ OS << "#undef GET_SUBTARGETINFO_MACRO\n";
+ OS << "#endif // GET_SUBTARGETINFO_MACRO\n\n";
+
+ OS << "\n#ifdef GET_SUBTARGETINFO_MC_DESC\n";
+ OS << "#undef GET_SUBTARGETINFO_MC_DESC\n\n";
+}
+
+//
+// FeatureKeyValues - Emit data of all the subtarget features. Used by the
+// command line.
+//
+unsigned SubtargetEmitter::FeatureKeyValues(
+ raw_ostream &OS, const DenseMap<Record *, unsigned> &FeatureMap) {
+ // Gather and sort all the features
+ std::vector<Record*> FeatureList =
+ Records.getAllDerivedDefinitions("SubtargetFeature");
+
+ if (FeatureList.empty())
+ return 0;
+
+ llvm::sort(FeatureList, LessRecordFieldName());
+
+ // Begin feature table
+ OS << "// Sorted (by key) array of values for CPU features.\n"
+ << "extern const llvm::SubtargetFeatureKV " << Target
+ << "FeatureKV[] = {\n";
+
+ // For each feature
+ unsigned NumFeatures = 0;
+ for (const Record *Feature : FeatureList) {
+ // Next feature
+ StringRef Name = Feature->getName();
+ StringRef CommandLineName = Feature->getValueAsString("Name");
+ StringRef Desc = Feature->getValueAsString("Desc");
+
+ if (CommandLineName.empty()) continue;
+
+ // Emit as { "feature", "description", { featureEnum }, { i1 , i2 , ... , in } }
+ OS << " { "
+ << "\"" << CommandLineName << "\", "
+ << "\"" << Desc << "\", "
+ << Target << "::" << Name << ", ";
+
+ RecVec ImpliesList = Feature->getValueAsListOfDefs("Implies");
+
+ printFeatureMask(OS, ImpliesList, FeatureMap);
+
+ OS << " },\n";
+ ++NumFeatures;
+ }
+
+ // End feature table
+ OS << "};\n";
+
+ return NumFeatures;
+}
+
+//
+// CPUKeyValues - Emit data of all the subtarget processors. Used by command
+// line.
+//
+unsigned
+SubtargetEmitter::CPUKeyValues(raw_ostream &OS,
+ const DenseMap<Record *, unsigned> &FeatureMap) {
+ // Gather and sort processor information
+ std::vector<Record*> ProcessorList =
+ Records.getAllDerivedDefinitions("Processor");
+ llvm::sort(ProcessorList, LessRecordFieldName());
+
+ // Begin processor table
+ OS << "// Sorted (by key) array of values for CPU subtype.\n"
+ << "extern const llvm::SubtargetSubTypeKV " << Target
+ << "SubTypeKV[] = {\n";
+
+ // For each processor
+ for (Record *Processor : ProcessorList) {
+ StringRef Name = Processor->getValueAsString("Name");
+ RecVec FeatureList = Processor->getValueAsListOfDefs("Features");
+ RecVec TuneFeatureList = Processor->getValueAsListOfDefs("TuneFeatures");
+
+ // Emit as { "cpu", "description", 0, { f1 , f2 , ... fn } },
+ OS << " { "
+ << "\"" << Name << "\", ";
+
+ printFeatureMask(OS, FeatureList, FeatureMap);
+ OS << ", ";
+ printFeatureMask(OS, TuneFeatureList, FeatureMap);
+
+ // Emit the scheduler model pointer.
+ const std::string &ProcModelName =
+ SchedModels.getModelForProc(Processor).ModelName;
+ OS << ", &" << ProcModelName << " },\n";
+ }
+
+ // End processor table
+ OS << "};\n";
+
+ return ProcessorList.size();
+}
+
+//
+// FormItineraryStageString - Compose a string containing the stage
+// data initialization for the specified itinerary. N is the number
+// of stages.
+//
+void SubtargetEmitter::FormItineraryStageString(const std::string &Name,
+ Record *ItinData,
+ std::string &ItinString,
+ unsigned &NStages) {
+ // Get states list
+ RecVec StageList = ItinData->getValueAsListOfDefs("Stages");
+
+ // For each stage
+ unsigned N = NStages = StageList.size();
+ for (unsigned i = 0; i < N;) {
+ // Next stage
+ const Record *Stage = StageList[i];
+
+ // Form string as ,{ cycles, u1 | u2 | ... | un, timeinc, kind }
+ int Cycles = Stage->getValueAsInt("Cycles");
+ ItinString += " { " + itostr(Cycles) + ", ";
+
+ // Get unit list
+ RecVec UnitList = Stage->getValueAsListOfDefs("Units");
+
+ // For each unit
+ for (unsigned j = 0, M = UnitList.size(); j < M;) {
+ // Add name and bitwise or
+ ItinString += Name + "FU::" + UnitList[j]->getName().str();
+ if (++j < M) ItinString += " | ";
+ }
+
+ int TimeInc = Stage->getValueAsInt("TimeInc");
+ ItinString += ", " + itostr(TimeInc);
+
+ int Kind = Stage->getValueAsInt("Kind");
+ ItinString += ", (llvm::InstrStage::ReservationKinds)" + itostr(Kind);
+
+ // Close off stage
+ ItinString += " }";
+ if (++i < N) ItinString += ", ";
+ }
+}
+
+//
+// FormItineraryOperandCycleString - Compose a string containing the
+// operand cycle initialization for the specified itinerary. N is the
+// number of operands that has cycles specified.
+//
+void SubtargetEmitter::FormItineraryOperandCycleString(Record *ItinData,
+ std::string &ItinString, unsigned &NOperandCycles) {
+ // Get operand cycle list
+ std::vector<int64_t> OperandCycleList =
+ ItinData->getValueAsListOfInts("OperandCycles");
+
+ // For each operand cycle
+ NOperandCycles = OperandCycleList.size();
+ ListSeparator LS;
+ for (int OCycle : OperandCycleList) {
+ // Next operand cycle
+ ItinString += LS;
+ ItinString += " " + itostr(OCycle);
+ }
+}
+
+void SubtargetEmitter::FormItineraryBypassString(const std::string &Name,
+ Record *ItinData,
+ std::string &ItinString,
+ unsigned NOperandCycles) {
+ RecVec BypassList = ItinData->getValueAsListOfDefs("Bypasses");
+ unsigned N = BypassList.size();
+ unsigned i = 0;
+ ListSeparator LS;
+ for (; i < N; ++i) {
+ ItinString += LS;
+ ItinString += Name + "Bypass::" + BypassList[i]->getName().str();
+ }
+ for (; i < NOperandCycles; ++i) {
+ ItinString += LS;
+ ItinString += " 0";
+ }
+}
+
+//
+// EmitStageAndOperandCycleData - Generate unique itinerary stages and operand
+// cycle tables. Create a list of InstrItinerary objects (ProcItinLists) indexed
+// by CodeGenSchedClass::Index.
+//
+void SubtargetEmitter::
+EmitStageAndOperandCycleData(raw_ostream &OS,
+ std::vector<std::vector<InstrItinerary>>
+ &ProcItinLists) {
+ // Multiple processor models may share an itinerary record. Emit it once.
+ SmallPtrSet<Record*, 8> ItinsDefSet;
+
+ // Emit functional units for all the itineraries.
+ for (const CodeGenProcModel &ProcModel : SchedModels.procModels()) {
+
+ if (!ItinsDefSet.insert(ProcModel.ItinsDef).second)
+ continue;
+
+ RecVec FUs = ProcModel.ItinsDef->getValueAsListOfDefs("FU");
+ if (FUs.empty())
+ continue;
+
+ StringRef Name = ProcModel.ItinsDef->getName();
+ OS << "\n// Functional units for \"" << Name << "\"\n"
+ << "namespace " << Name << "FU {\n";
+
+ for (unsigned j = 0, FUN = FUs.size(); j < FUN; ++j)
+ OS << " const InstrStage::FuncUnits " << FUs[j]->getName()
+ << " = 1ULL << " << j << ";\n";
+
+ OS << "} // end namespace " << Name << "FU\n";
+
+ RecVec BPs = ProcModel.ItinsDef->getValueAsListOfDefs("BP");
+ if (!BPs.empty()) {
+ OS << "\n// Pipeline forwarding paths for itineraries \"" << Name
+ << "\"\n" << "namespace " << Name << "Bypass {\n";
+
+ OS << " const unsigned NoBypass = 0;\n";
+ for (unsigned j = 0, BPN = BPs.size(); j < BPN; ++j)
+ OS << " const unsigned " << BPs[j]->getName()
+ << " = 1 << " << j << ";\n";
+
+ OS << "} // end namespace " << Name << "Bypass\n";
+ }
+ }
+
+ // Begin stages table
+ std::string StageTable = "\nextern const llvm::InstrStage " + Target +
+ "Stages[] = {\n";
+ StageTable += " { 0, 0, 0, llvm::InstrStage::Required }, // No itinerary\n";
+
+ // Begin operand cycle table
+ std::string OperandCycleTable = "extern const unsigned " + Target +
+ "OperandCycles[] = {\n";
+ OperandCycleTable += " 0, // No itinerary\n";
+
+ // Begin pipeline bypass table
+ std::string BypassTable = "extern const unsigned " + Target +
+ "ForwardingPaths[] = {\n";
+ BypassTable += " 0, // No itinerary\n";
+
+ // For each Itinerary across all processors, add a unique entry to the stages,
+ // operand cycles, and pipeline bypass tables. Then add the new Itinerary
+ // object with computed offsets to the ProcItinLists result.
+ unsigned StageCount = 1, OperandCycleCount = 1;
+ std::map<std::string, unsigned> ItinStageMap, ItinOperandMap;
+ for (const CodeGenProcModel &ProcModel : SchedModels.procModels()) {
+ // Add process itinerary to the list.
+ ProcItinLists.resize(ProcItinLists.size()+1);
+
+ // If this processor defines no itineraries, then leave the itinerary list
+ // empty.
+ std::vector<InstrItinerary> &ItinList = ProcItinLists.back();
+ if (!ProcModel.hasItineraries())
+ continue;
+
+ StringRef Name = ProcModel.ItinsDef->getName();
+
+ ItinList.resize(SchedModels.numInstrSchedClasses());
+ assert(ProcModel.ItinDefList.size() == ItinList.size() && "bad Itins");
+
+ for (unsigned SchedClassIdx = 0, SchedClassEnd = ItinList.size();
+ SchedClassIdx < SchedClassEnd; ++SchedClassIdx) {
+
+ // Next itinerary data
+ Record *ItinData = ProcModel.ItinDefList[SchedClassIdx];
+
+ // Get string and stage count
+ std::string ItinStageString;
+ unsigned NStages = 0;
+ if (ItinData)
+ FormItineraryStageString(std::string(Name), ItinData, ItinStageString,
+ NStages);
+
+ // Get string and operand cycle count
+ std::string ItinOperandCycleString;
+ unsigned NOperandCycles = 0;
+ std::string ItinBypassString;
+ if (ItinData) {
+ FormItineraryOperandCycleString(ItinData, ItinOperandCycleString,
+ NOperandCycles);
+
+ FormItineraryBypassString(std::string(Name), ItinData, ItinBypassString,
+ NOperandCycles);
+ }
+
+ // Check to see if stage already exists and create if it doesn't
+ uint16_t FindStage = 0;
+ if (NStages > 0) {
+ FindStage = ItinStageMap[ItinStageString];
+ if (FindStage == 0) {
+ // Emit as { cycles, u1 | u2 | ... | un, timeinc }, // indices
+ StageTable += ItinStageString + ", // " + itostr(StageCount);
+ if (NStages > 1)
+ StageTable += "-" + itostr(StageCount + NStages - 1);
+ StageTable += "\n";
+ // Record Itin class number.
+ ItinStageMap[ItinStageString] = FindStage = StageCount;
+ StageCount += NStages;
+ }
+ }
+
+ // Check to see if operand cycle already exists and create if it doesn't
+ uint16_t FindOperandCycle = 0;
+ if (NOperandCycles > 0) {
+ std::string ItinOperandString = ItinOperandCycleString+ItinBypassString;
+ FindOperandCycle = ItinOperandMap[ItinOperandString];
+ if (FindOperandCycle == 0) {
+ // Emit as cycle, // index
+ OperandCycleTable += ItinOperandCycleString + ", // ";
+ std::string OperandIdxComment = itostr(OperandCycleCount);
+ if (NOperandCycles > 1)
+ OperandIdxComment += "-"
+ + itostr(OperandCycleCount + NOperandCycles - 1);
+ OperandCycleTable += OperandIdxComment + "\n";
+ // Record Itin class number.
+ ItinOperandMap[ItinOperandCycleString] =
+ FindOperandCycle = OperandCycleCount;
+ // Emit as bypass, // index
+ BypassTable += ItinBypassString + ", // " + OperandIdxComment + "\n";
+ OperandCycleCount += NOperandCycles;
+ }
+ }
+
+ // Set up itinerary as location and location + stage count
+ int16_t NumUOps = ItinData ? ItinData->getValueAsInt("NumMicroOps") : 0;
+ InstrItinerary Intinerary = {
+ NumUOps,
+ FindStage,
+ uint16_t(FindStage + NStages),
+ FindOperandCycle,
+ uint16_t(FindOperandCycle + NOperandCycles),
+ };
+
+ // Inject - empty slots will be 0, 0
+ ItinList[SchedClassIdx] = Intinerary;
+ }
+ }
+
+ // Closing stage
+ StageTable += " { 0, 0, 0, llvm::InstrStage::Required } // End stages\n";
+ StageTable += "};\n";
+
+ // Closing operand cycles
+ OperandCycleTable += " 0 // End operand cycles\n";
+ OperandCycleTable += "};\n";
+
+ BypassTable += " 0 // End bypass tables\n";
+ BypassTable += "};\n";
+
+ // Emit tables.
+ OS << StageTable;
+ OS << OperandCycleTable;
+ OS << BypassTable;
+}
+
+//
+// EmitProcessorData - Generate data for processor itineraries that were
+// computed during EmitStageAndOperandCycleData(). ProcItinLists lists all
+// Itineraries for each processor. The Itinerary lists are indexed on
+// CodeGenSchedClass::Index.
+//
+void SubtargetEmitter::
+EmitItineraries(raw_ostream &OS,
+ std::vector<std::vector<InstrItinerary>> &ProcItinLists) {
+ // Multiple processor models may share an itinerary record. Emit it once.
+ SmallPtrSet<Record*, 8> ItinsDefSet;
+
+ // For each processor's machine model
+ std::vector<std::vector<InstrItinerary>>::iterator
+ ProcItinListsIter = ProcItinLists.begin();
+ for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
+ PE = SchedModels.procModelEnd(); PI != PE; ++PI, ++ProcItinListsIter) {
+
+ Record *ItinsDef = PI->ItinsDef;
+ if (!ItinsDefSet.insert(ItinsDef).second)
+ continue;
+
+ // Get the itinerary list for the processor.
+ assert(ProcItinListsIter != ProcItinLists.end() && "bad iterator");
+ std::vector<InstrItinerary> &ItinList = *ProcItinListsIter;
+
+ // Empty itineraries aren't referenced anywhere in the tablegen output
+ // so don't emit them.
+ if (ItinList.empty())
+ continue;
+
+ OS << "\n";
+ OS << "static const llvm::InstrItinerary ";
+
+ // Begin processor itinerary table
+ OS << ItinsDef->getName() << "[] = {\n";
+
+ // For each itinerary class in CodeGenSchedClass::Index order.
+ for (unsigned j = 0, M = ItinList.size(); j < M; ++j) {
+ InstrItinerary &Intinerary = ItinList[j];
+
+ // Emit Itinerary in the form of
+ // { firstStage, lastStage, firstCycle, lastCycle } // index
+ OS << " { " <<
+ Intinerary.NumMicroOps << ", " <<
+ Intinerary.FirstStage << ", " <<
+ Intinerary.LastStage << ", " <<
+ Intinerary.FirstOperandCycle << ", " <<
+ Intinerary.LastOperandCycle << " }" <<
+ ", // " << j << " " << SchedModels.getSchedClass(j).Name << "\n";
+ }
+ // End processor itinerary table
+ OS << " { 0, uint16_t(~0U), uint16_t(~0U), uint16_t(~0U), uint16_t(~0U) }"
+ "// end marker\n";
+ OS << "};\n";
+ }
+}
+
+// Emit either the value defined in the TableGen Record, or the default
+// value defined in the C++ header. The Record is null if the processor does not
+// define a model.
+void SubtargetEmitter::EmitProcessorProp(raw_ostream &OS, const Record *R,
+ StringRef Name, char Separator) {
+ OS << " ";
+ int V = R ? R->getValueAsInt(Name) : -1;
+ if (V >= 0)
+ OS << V << Separator << " // " << Name;
+ else
+ OS << "MCSchedModel::Default" << Name << Separator;
+ OS << '\n';
+}
+
+void SubtargetEmitter::EmitProcessorResourceSubUnits(
+ const CodeGenProcModel &ProcModel, raw_ostream &OS) {
+ OS << "\nstatic const unsigned " << ProcModel.ModelName
+ << "ProcResourceSubUnits[] = {\n"
+ << " 0, // Invalid\n";
+
+ for (unsigned i = 0, e = ProcModel.ProcResourceDefs.size(); i < e; ++i) {
+ Record *PRDef = ProcModel.ProcResourceDefs[i];
+ if (!PRDef->isSubClassOf("ProcResGroup"))
+ continue;
+ RecVec ResUnits = PRDef->getValueAsListOfDefs("Resources");
+ for (Record *RUDef : ResUnits) {
+ Record *const RU =
+ SchedModels.findProcResUnits(RUDef, ProcModel, PRDef->getLoc());
+ for (unsigned J = 0; J < RU->getValueAsInt("NumUnits"); ++J) {
+ OS << " " << ProcModel.getProcResourceIdx(RU) << ", ";
+ }
+ }
+ OS << " // " << PRDef->getName() << "\n";
+ }
+ OS << "};\n";
+}
+
+static void EmitRetireControlUnitInfo(const CodeGenProcModel &ProcModel,
+ raw_ostream &OS) {
+ int64_t ReorderBufferSize = 0, MaxRetirePerCycle = 0;
+ if (Record *RCU = ProcModel.RetireControlUnit) {
+ ReorderBufferSize =
+ std::max(ReorderBufferSize, RCU->getValueAsInt("ReorderBufferSize"));
+ MaxRetirePerCycle =
+ std::max(MaxRetirePerCycle, RCU->getValueAsInt("MaxRetirePerCycle"));
+ }
+
+ OS << ReorderBufferSize << ", // ReorderBufferSize\n ";
+ OS << MaxRetirePerCycle << ", // MaxRetirePerCycle\n ";
+}
+
+static void EmitRegisterFileInfo(const CodeGenProcModel &ProcModel,
+ unsigned NumRegisterFiles,
+ unsigned NumCostEntries, raw_ostream &OS) {
+ if (NumRegisterFiles)
+ OS << ProcModel.ModelName << "RegisterFiles,\n " << (1 + NumRegisterFiles);
+ else
+ OS << "nullptr,\n 0";
+
+ OS << ", // Number of register files.\n ";
+ if (NumCostEntries)
+ OS << ProcModel.ModelName << "RegisterCosts,\n ";
+ else
+ OS << "nullptr,\n ";
+ OS << NumCostEntries << ", // Number of register cost entries.\n";
+}
+
+unsigned
+SubtargetEmitter::EmitRegisterFileTables(const CodeGenProcModel &ProcModel,
+ raw_ostream &OS) {
+ if (llvm::all_of(ProcModel.RegisterFiles, [](const CodeGenRegisterFile &RF) {
+ return RF.hasDefaultCosts();
+ }))
+ return 0;
+
+ // Print the RegisterCost table first.
+ OS << "\n// {RegisterClassID, Register Cost, AllowMoveElimination }\n";
+ OS << "static const llvm::MCRegisterCostEntry " << ProcModel.ModelName
+ << "RegisterCosts"
+ << "[] = {\n";
+
+ for (const CodeGenRegisterFile &RF : ProcModel.RegisterFiles) {
+ // Skip register files with a default cost table.
+ if (RF.hasDefaultCosts())
+ continue;
+ // Add entries to the cost table.
+ for (const CodeGenRegisterCost &RC : RF.Costs) {
+ OS << " { ";
+ Record *Rec = RC.RCDef;
+ if (Rec->getValue("Namespace"))
+ OS << Rec->getValueAsString("Namespace") << "::";
+ OS << Rec->getName() << "RegClassID, " << RC.Cost << ", "
+ << RC.AllowMoveElimination << "},\n";
+ }
+ }
+ OS << "};\n";
+
+ // Now generate a table with register file info.
+ OS << "\n // {Name, #PhysRegs, #CostEntries, IndexToCostTbl, "
+ << "MaxMovesEliminatedPerCycle, AllowZeroMoveEliminationOnly }\n";
+ OS << "static const llvm::MCRegisterFileDesc " << ProcModel.ModelName
+ << "RegisterFiles"
+ << "[] = {\n"
+ << " { \"InvalidRegisterFile\", 0, 0, 0, 0, 0 },\n";
+ unsigned CostTblIndex = 0;
+
+ for (const CodeGenRegisterFile &RD : ProcModel.RegisterFiles) {
+ OS << " { ";
+ OS << '"' << RD.Name << '"' << ", " << RD.NumPhysRegs << ", ";
+ unsigned NumCostEntries = RD.Costs.size();
+ OS << NumCostEntries << ", " << CostTblIndex << ", "
+ << RD.MaxMovesEliminatedPerCycle << ", "
+ << RD.AllowZeroMoveEliminationOnly << "},\n";
+ CostTblIndex += NumCostEntries;
+ }
+ OS << "};\n";
+
+ return CostTblIndex;
+}
+
+void SubtargetEmitter::EmitLoadStoreQueueInfo(const CodeGenProcModel &ProcModel,
+ raw_ostream &OS) {
+ unsigned QueueID = 0;
+ if (ProcModel.LoadQueue) {
+ const Record *Queue = ProcModel.LoadQueue->getValueAsDef("QueueDescriptor");
+ QueueID = 1 + std::distance(ProcModel.ProcResourceDefs.begin(),
+ find(ProcModel.ProcResourceDefs, Queue));
+ }
+ OS << " " << QueueID << ", // Resource Descriptor for the Load Queue\n";
+
+ QueueID = 0;
+ if (ProcModel.StoreQueue) {
+ const Record *Queue =
+ ProcModel.StoreQueue->getValueAsDef("QueueDescriptor");
+ QueueID = 1 + std::distance(ProcModel.ProcResourceDefs.begin(),
+ find(ProcModel.ProcResourceDefs, Queue));
+ }
+ OS << " " << QueueID << ", // Resource Descriptor for the Store Queue\n";
+}
+
+void SubtargetEmitter::EmitExtraProcessorInfo(const CodeGenProcModel &ProcModel,
+ raw_ostream &OS) {
+ // Generate a table of register file descriptors (one entry per each user
+ // defined register file), and a table of register costs.
+ unsigned NumCostEntries = EmitRegisterFileTables(ProcModel, OS);
+
+ // Now generate a table for the extra processor info.
+ OS << "\nstatic const llvm::MCExtraProcessorInfo " << ProcModel.ModelName
+ << "ExtraInfo = {\n ";
+
+ // Add information related to the retire control unit.
+ EmitRetireControlUnitInfo(ProcModel, OS);
+
+ // Add information related to the register files (i.e. where to find register
+ // file descriptors and register costs).
+ EmitRegisterFileInfo(ProcModel, ProcModel.RegisterFiles.size(),
+ NumCostEntries, OS);
+
+ // Add information about load/store queues.
+ EmitLoadStoreQueueInfo(ProcModel, OS);
+
+ OS << "};\n";
+}
+
+void SubtargetEmitter::EmitProcessorResources(const CodeGenProcModel &ProcModel,
+ raw_ostream &OS) {
+ EmitProcessorResourceSubUnits(ProcModel, OS);
+
+ OS << "\n// {Name, NumUnits, SuperIdx, BufferSize, SubUnitsIdxBegin}\n";
+ OS << "static const llvm::MCProcResourceDesc " << ProcModel.ModelName
+ << "ProcResources"
+ << "[] = {\n"
+ << " {\"InvalidUnit\", 0, 0, 0, 0},\n";
+
+ unsigned SubUnitsOffset = 1;
+ for (unsigned i = 0, e = ProcModel.ProcResourceDefs.size(); i < e; ++i) {
+ Record *PRDef = ProcModel.ProcResourceDefs[i];
+
+ Record *SuperDef = nullptr;
+ unsigned SuperIdx = 0;
+ unsigned NumUnits = 0;
+ const unsigned SubUnitsBeginOffset = SubUnitsOffset;
+ int BufferSize = PRDef->getValueAsInt("BufferSize");
+ if (PRDef->isSubClassOf("ProcResGroup")) {
+ RecVec ResUnits = PRDef->getValueAsListOfDefs("Resources");
+ for (Record *RU : ResUnits) {
+ NumUnits += RU->getValueAsInt("NumUnits");
+ SubUnitsOffset += RU->getValueAsInt("NumUnits");
+ }
+ }
+ else {
+ // Find the SuperIdx
+ if (PRDef->getValueInit("Super")->isComplete()) {
+ SuperDef =
+ SchedModels.findProcResUnits(PRDef->getValueAsDef("Super"),
+ ProcModel, PRDef->getLoc());
+ SuperIdx = ProcModel.getProcResourceIdx(SuperDef);
+ }
+ NumUnits = PRDef->getValueAsInt("NumUnits");
+ }
+ // Emit the ProcResourceDesc
+ OS << " {\"" << PRDef->getName() << "\", ";
+ if (PRDef->getName().size() < 15)
+ OS.indent(15 - PRDef->getName().size());
+ OS << NumUnits << ", " << SuperIdx << ", " << BufferSize << ", ";
+ if (SubUnitsBeginOffset != SubUnitsOffset) {
+ OS << ProcModel.ModelName << "ProcResourceSubUnits + "
+ << SubUnitsBeginOffset;
+ } else {
+ OS << "nullptr";
+ }
+ OS << "}, // #" << i+1;
+ if (SuperDef)
+ OS << ", Super=" << SuperDef->getName();
+ OS << "\n";
+ }
+ OS << "};\n";
+}
+
+// Find the WriteRes Record that defines processor resources for this
+// SchedWrite.
+Record *SubtargetEmitter::FindWriteResources(
+ const CodeGenSchedRW &SchedWrite, const CodeGenProcModel &ProcModel) {
+
+ // Check if the SchedWrite is already subtarget-specific and directly
+ // specifies a set of processor resources.
+ if (SchedWrite.TheDef->isSubClassOf("SchedWriteRes"))
+ return SchedWrite.TheDef;
+
+ Record *AliasDef = nullptr;
+ for (Record *A : SchedWrite.Aliases) {
+ const CodeGenSchedRW &AliasRW =
+ SchedModels.getSchedRW(A->getValueAsDef("AliasRW"));
+ if (AliasRW.TheDef->getValueInit("SchedModel")->isComplete()) {
+ Record *ModelDef = AliasRW.TheDef->getValueAsDef("SchedModel");
+ if (&SchedModels.getProcModel(ModelDef) != &ProcModel)
+ continue;
+ }
+ if (AliasDef)
+ PrintFatalError(AliasRW.TheDef->getLoc(), "Multiple aliases "
+ "defined for processor " + ProcModel.ModelName +
+ " Ensure only one SchedAlias exists per RW.");
+ AliasDef = AliasRW.TheDef;
+ }
+ if (AliasDef && AliasDef->isSubClassOf("SchedWriteRes"))
+ return AliasDef;
+
+ // Check this processor's list of write resources.
+ Record *ResDef = nullptr;
+ for (Record *WR : ProcModel.WriteResDefs) {
+ if (!WR->isSubClassOf("WriteRes"))
+ continue;
+ if (AliasDef == WR->getValueAsDef("WriteType")
+ || SchedWrite.TheDef == WR->getValueAsDef("WriteType")) {
+ if (ResDef) {
+ PrintFatalError(WR->getLoc(), "Resources are defined for both "
+ "SchedWrite and its alias on processor " +
+ ProcModel.ModelName);
+ }
+ ResDef = WR;
+ }
+ }
+ // TODO: If ProcModel has a base model (previous generation processor),
+ // then call FindWriteResources recursively with that model here.
+ if (!ResDef) {
+ PrintFatalError(ProcModel.ModelDef->getLoc(),
+ Twine("Processor does not define resources for ") +
+ SchedWrite.TheDef->getName());
+ }
+ return ResDef;
+}
+
+/// Find the ReadAdvance record for the given SchedRead on this processor or
+/// return NULL.
+Record *SubtargetEmitter::FindReadAdvance(const CodeGenSchedRW &SchedRead,
+ const CodeGenProcModel &ProcModel) {
+ // Check for SchedReads that directly specify a ReadAdvance.
+ if (SchedRead.TheDef->isSubClassOf("SchedReadAdvance"))
+ return SchedRead.TheDef;
+
+ // Check this processor's list of aliases for SchedRead.
+ Record *AliasDef = nullptr;
+ for (Record *A : SchedRead.Aliases) {
+ const CodeGenSchedRW &AliasRW =
+ SchedModels.getSchedRW(A->getValueAsDef("AliasRW"));
+ if (AliasRW.TheDef->getValueInit("SchedModel")->isComplete()) {
+ Record *ModelDef = AliasRW.TheDef->getValueAsDef("SchedModel");
+ if (&SchedModels.getProcModel(ModelDef) != &ProcModel)
+ continue;
+ }
+ if (AliasDef)
+ PrintFatalError(AliasRW.TheDef->getLoc(), "Multiple aliases "
+ "defined for processor " + ProcModel.ModelName +
+ " Ensure only one SchedAlias exists per RW.");
+ AliasDef = AliasRW.TheDef;
+ }
+ if (AliasDef && AliasDef->isSubClassOf("SchedReadAdvance"))
+ return AliasDef;
+
+ // Check this processor's ReadAdvanceList.
+ Record *ResDef = nullptr;
+ for (Record *RA : ProcModel.ReadAdvanceDefs) {
+ if (!RA->isSubClassOf("ReadAdvance"))
+ continue;
+ if (AliasDef == RA->getValueAsDef("ReadType")
+ || SchedRead.TheDef == RA->getValueAsDef("ReadType")) {
+ if (ResDef) {
+ PrintFatalError(RA->getLoc(), "Resources are defined for both "
+ "SchedRead and its alias on processor " +
+ ProcModel.ModelName);
+ }
+ ResDef = RA;
+ }
+ }
+ // TODO: If ProcModel has a base model (previous generation processor),
+ // then call FindReadAdvance recursively with that model here.
+ if (!ResDef && SchedRead.TheDef->getName() != "ReadDefault") {
+ PrintFatalError(ProcModel.ModelDef->getLoc(),
+ Twine("Processor does not define resources for ") +
+ SchedRead.TheDef->getName());
+ }
+ return ResDef;
+}
+
+// Expand an explicit list of processor resources into a full list of implied
+// resource groups and super resources that cover them.
+void SubtargetEmitter::ExpandProcResources(RecVec &PRVec,
+ std::vector<int64_t> &Cycles,
+ const CodeGenProcModel &PM) {
+ assert(PRVec.size() == Cycles.size() && "failed precondition");
+ for (unsigned i = 0, e = PRVec.size(); i != e; ++i) {
+ Record *PRDef = PRVec[i];
+ RecVec SubResources;
+ if (PRDef->isSubClassOf("ProcResGroup"))
+ SubResources = PRDef->getValueAsListOfDefs("Resources");
+ else {
+ SubResources.push_back(PRDef);
+ PRDef = SchedModels.findProcResUnits(PRDef, PM, PRDef->getLoc());
+ for (Record *SubDef = PRDef;
+ SubDef->getValueInit("Super")->isComplete();) {
+ if (SubDef->isSubClassOf("ProcResGroup")) {
+ // Disallow this for simplicitly.
+ PrintFatalError(SubDef->getLoc(), "Processor resource group "
+ " cannot be a super resources.");
+ }
+ Record *SuperDef =
+ SchedModels.findProcResUnits(SubDef->getValueAsDef("Super"), PM,
+ SubDef->getLoc());
+ PRVec.push_back(SuperDef);
+ Cycles.push_back(Cycles[i]);
+ SubDef = SuperDef;
+ }
+ }
+ for (Record *PR : PM.ProcResourceDefs) {
+ if (PR == PRDef || !PR->isSubClassOf("ProcResGroup"))
+ continue;
+ RecVec SuperResources = PR->getValueAsListOfDefs("Resources");
+ RecIter SubI = SubResources.begin(), SubE = SubResources.end();
+ for( ; SubI != SubE; ++SubI) {
+ if (!is_contained(SuperResources, *SubI)) {
+ break;
+ }
+ }
+ if (SubI == SubE) {
+ PRVec.push_back(PR);
+ Cycles.push_back(Cycles[i]);
+ }
+ }
+ }
+}
+
+// Generate the SchedClass table for this processor and update global
+// tables. Must be called for each processor in order.
+void SubtargetEmitter::GenSchedClassTables(const CodeGenProcModel &ProcModel,
+ SchedClassTables &SchedTables) {
+ SchedTables.ProcSchedClasses.resize(SchedTables.ProcSchedClasses.size() + 1);
+ if (!ProcModel.hasInstrSchedModel())
+ return;
+
+ std::vector<MCSchedClassDesc> &SCTab = SchedTables.ProcSchedClasses.back();
+ LLVM_DEBUG(dbgs() << "\n+++ SCHED CLASSES (GenSchedClassTables) +++\n");
+ for (const CodeGenSchedClass &SC : SchedModels.schedClasses()) {
+ LLVM_DEBUG(SC.dump(&SchedModels));
+
+ SCTab.resize(SCTab.size() + 1);
+ MCSchedClassDesc &SCDesc = SCTab.back();
+ // SCDesc.Name is guarded by NDEBUG
+ SCDesc.NumMicroOps = 0;
+ SCDesc.BeginGroup = false;
+ SCDesc.EndGroup = false;
+ SCDesc.RetireOOO = false;
+ SCDesc.WriteProcResIdx = 0;
+ SCDesc.WriteLatencyIdx = 0;
+ SCDesc.ReadAdvanceIdx = 0;
+
+ // A Variant SchedClass has no resources of its own.
+ bool HasVariants = false;
+ for (const CodeGenSchedTransition &CGT :
+ make_range(SC.Transitions.begin(), SC.Transitions.end())) {
+ if (CGT.ProcIndex == ProcModel.Index) {
+ HasVariants = true;
+ break;
+ }
+ }
+ if (HasVariants) {
+ SCDesc.NumMicroOps = MCSchedClassDesc::VariantNumMicroOps;
+ continue;
+ }
+
+ // Determine if the SchedClass is actually reachable on this processor. If
+ // not don't try to locate the processor resources, it will fail.
+ // If ProcIndices contains 0, this class applies to all processors.
+ assert(!SC.ProcIndices.empty() && "expect at least one procidx");
+ if (SC.ProcIndices[0] != 0) {
+ if (!is_contained(SC.ProcIndices, ProcModel.Index))
+ continue;
+ }
+ IdxVec Writes = SC.Writes;
+ IdxVec Reads = SC.Reads;
+ if (!SC.InstRWs.empty()) {
+ // This class has a default ReadWrite list which can be overridden by
+ // InstRW definitions.
+ Record *RWDef = nullptr;
+ for (Record *RW : SC.InstRWs) {
+ Record *RWModelDef = RW->getValueAsDef("SchedModel");
+ if (&ProcModel == &SchedModels.getProcModel(RWModelDef)) {
+ RWDef = RW;
+ break;
+ }
+ }
+ if (RWDef) {
+ Writes.clear();
+ Reads.clear();
+ SchedModels.findRWs(RWDef->getValueAsListOfDefs("OperandReadWrites"),
+ Writes, Reads);
+ }
+ }
+ if (Writes.empty()) {
+ // Check this processor's itinerary class resources.
+ for (Record *I : ProcModel.ItinRWDefs) {
+ RecVec Matched = I->getValueAsListOfDefs("MatchedItinClasses");
+ if (is_contained(Matched, SC.ItinClassDef)) {
+ SchedModels.findRWs(I->getValueAsListOfDefs("OperandReadWrites"),
+ Writes, Reads);
+ break;
+ }
+ }
+ if (Writes.empty()) {
+ LLVM_DEBUG(dbgs() << ProcModel.ModelName
+ << " does not have resources for class " << SC.Name
+ << '\n');
+ SCDesc.NumMicroOps = MCSchedClassDesc::InvalidNumMicroOps;
+ }
+ }
+ // Sum resources across all operand writes.
+ std::vector<MCWriteProcResEntry> WriteProcResources;
+ std::vector<MCWriteLatencyEntry> WriteLatencies;
+ std::vector<std::string> WriterNames;
+ std::vector<MCReadAdvanceEntry> ReadAdvanceEntries;
+ for (unsigned W : Writes) {
+ IdxVec WriteSeq;
+ SchedModels.expandRWSeqForProc(W, WriteSeq, /*IsRead=*/false,
+ ProcModel);
+
+ // For each operand, create a latency entry.
+ MCWriteLatencyEntry WLEntry;
+ WLEntry.Cycles = 0;
+ unsigned WriteID = WriteSeq.back();
+ WriterNames.push_back(SchedModels.getSchedWrite(WriteID).Name);
+ // If this Write is not referenced by a ReadAdvance, don't distinguish it
+ // from other WriteLatency entries.
+ if (!SchedModels.hasReadOfWrite(
+ SchedModels.getSchedWrite(WriteID).TheDef)) {
+ WriteID = 0;
+ }
+ WLEntry.WriteResourceID = WriteID;
+
+ for (unsigned WS : WriteSeq) {
+
+ Record *WriteRes =
+ FindWriteResources(SchedModels.getSchedWrite(WS), ProcModel);
+
+ // Mark the parent class as invalid for unsupported write types.
+ if (WriteRes->getValueAsBit("Unsupported")) {
+ SCDesc.NumMicroOps = MCSchedClassDesc::InvalidNumMicroOps;
+ break;
+ }
+ WLEntry.Cycles += WriteRes->getValueAsInt("Latency");
+ SCDesc.NumMicroOps += WriteRes->getValueAsInt("NumMicroOps");
+ SCDesc.BeginGroup |= WriteRes->getValueAsBit("BeginGroup");
+ SCDesc.EndGroup |= WriteRes->getValueAsBit("EndGroup");
+ SCDesc.BeginGroup |= WriteRes->getValueAsBit("SingleIssue");
+ SCDesc.EndGroup |= WriteRes->getValueAsBit("SingleIssue");
+ SCDesc.RetireOOO |= WriteRes->getValueAsBit("RetireOOO");
+
+ // Create an entry for each ProcResource listed in WriteRes.
+ RecVec PRVec = WriteRes->getValueAsListOfDefs("ProcResources");
+ std::vector<int64_t> Cycles =
+ WriteRes->getValueAsListOfInts("ResourceCycles");
+
+ if (Cycles.empty()) {
+ // If ResourceCycles is not provided, default to one cycle per
+ // resource.
+ Cycles.resize(PRVec.size(), 1);
+ } else if (Cycles.size() != PRVec.size()) {
+ // If ResourceCycles is provided, check consistency.
+ PrintFatalError(
+ WriteRes->getLoc(),
+ Twine("Inconsistent resource cycles: !size(ResourceCycles) != "
+ "!size(ProcResources): ")
+ .concat(Twine(PRVec.size()))
+ .concat(" vs ")
+ .concat(Twine(Cycles.size())));
+ }
+
+ ExpandProcResources(PRVec, Cycles, ProcModel);
+
+ for (unsigned PRIdx = 0, PREnd = PRVec.size();
+ PRIdx != PREnd; ++PRIdx) {
+ MCWriteProcResEntry WPREntry;
+ WPREntry.ProcResourceIdx = ProcModel.getProcResourceIdx(PRVec[PRIdx]);
+ assert(WPREntry.ProcResourceIdx && "Bad ProcResourceIdx");
+ WPREntry.Cycles = Cycles[PRIdx];
+ // If this resource is already used in this sequence, add the current
+ // entry's cycles so that the same resource appears to be used
+ // serially, rather than multiple parallel uses. This is important for
+ // in-order machine where the resource consumption is a hazard.
+ unsigned WPRIdx = 0, WPREnd = WriteProcResources.size();
+ for( ; WPRIdx != WPREnd; ++WPRIdx) {
+ if (WriteProcResources[WPRIdx].ProcResourceIdx
+ == WPREntry.ProcResourceIdx) {
+ WriteProcResources[WPRIdx].Cycles += WPREntry.Cycles;
+ break;
+ }
+ }
+ if (WPRIdx == WPREnd)
+ WriteProcResources.push_back(WPREntry);
+ }
+ }
+ WriteLatencies.push_back(WLEntry);
+ }
+ // Create an entry for each operand Read in this SchedClass.
+ // Entries must be sorted first by UseIdx then by WriteResourceID.
+ for (unsigned UseIdx = 0, EndIdx = Reads.size();
+ UseIdx != EndIdx; ++UseIdx) {
+ Record *ReadAdvance =
+ FindReadAdvance(SchedModels.getSchedRead(Reads[UseIdx]), ProcModel);
+ if (!ReadAdvance)
+ continue;
+
+ // Mark the parent class as invalid for unsupported write types.
+ if (ReadAdvance->getValueAsBit("Unsupported")) {
+ SCDesc.NumMicroOps = MCSchedClassDesc::InvalidNumMicroOps;
+ break;
+ }
+ RecVec ValidWrites = ReadAdvance->getValueAsListOfDefs("ValidWrites");
+ IdxVec WriteIDs;
+ if (ValidWrites.empty())
+ WriteIDs.push_back(0);
+ else {
+ for (Record *VW : ValidWrites) {
+ WriteIDs.push_back(SchedModels.getSchedRWIdx(VW, /*IsRead=*/false));
+ }
+ }
+ llvm::sort(WriteIDs);
+ for(unsigned W : WriteIDs) {
+ MCReadAdvanceEntry RAEntry;
+ RAEntry.UseIdx = UseIdx;
+ RAEntry.WriteResourceID = W;
+ RAEntry.Cycles = ReadAdvance->getValueAsInt("Cycles");
+ ReadAdvanceEntries.push_back(RAEntry);
+ }
+ }
+ if (SCDesc.NumMicroOps == MCSchedClassDesc::InvalidNumMicroOps) {
+ WriteProcResources.clear();
+ WriteLatencies.clear();
+ ReadAdvanceEntries.clear();
+ }
+ // Add the information for this SchedClass to the global tables using basic
+ // compression.
+ //
+ // WritePrecRes entries are sorted by ProcResIdx.
+ llvm::sort(WriteProcResources, LessWriteProcResources());
+
+ SCDesc.NumWriteProcResEntries = WriteProcResources.size();
+ std::vector<MCWriteProcResEntry>::iterator WPRPos =
+ std::search(SchedTables.WriteProcResources.begin(),
+ SchedTables.WriteProcResources.end(),
+ WriteProcResources.begin(), WriteProcResources.end());
+ if (WPRPos != SchedTables.WriteProcResources.end())
+ SCDesc.WriteProcResIdx = WPRPos - SchedTables.WriteProcResources.begin();
+ else {
+ SCDesc.WriteProcResIdx = SchedTables.WriteProcResources.size();
+ SchedTables.WriteProcResources.insert(WPRPos, WriteProcResources.begin(),
+ WriteProcResources.end());
+ }
+ // Latency entries must remain in operand order.
+ SCDesc.NumWriteLatencyEntries = WriteLatencies.size();
+ std::vector<MCWriteLatencyEntry>::iterator WLPos =
+ std::search(SchedTables.WriteLatencies.begin(),
+ SchedTables.WriteLatencies.end(),
+ WriteLatencies.begin(), WriteLatencies.end());
+ if (WLPos != SchedTables.WriteLatencies.end()) {
+ unsigned idx = WLPos - SchedTables.WriteLatencies.begin();
+ SCDesc.WriteLatencyIdx = idx;
+ for (unsigned i = 0, e = WriteLatencies.size(); i < e; ++i)
+ if (SchedTables.WriterNames[idx + i].find(WriterNames[i]) ==
+ std::string::npos) {
+ SchedTables.WriterNames[idx + i] += std::string("_") + WriterNames[i];
+ }
+ }
+ else {
+ SCDesc.WriteLatencyIdx = SchedTables.WriteLatencies.size();
+ llvm::append_range(SchedTables.WriteLatencies, WriteLatencies);
+ llvm::append_range(SchedTables.WriterNames, WriterNames);
+ }
+ // ReadAdvanceEntries must remain in operand order.
+ SCDesc.NumReadAdvanceEntries = ReadAdvanceEntries.size();
+ std::vector<MCReadAdvanceEntry>::iterator RAPos =
+ std::search(SchedTables.ReadAdvanceEntries.begin(),
+ SchedTables.ReadAdvanceEntries.end(),
+ ReadAdvanceEntries.begin(), ReadAdvanceEntries.end());
+ if (RAPos != SchedTables.ReadAdvanceEntries.end())
+ SCDesc.ReadAdvanceIdx = RAPos - SchedTables.ReadAdvanceEntries.begin();
+ else {
+ SCDesc.ReadAdvanceIdx = SchedTables.ReadAdvanceEntries.size();
+ llvm::append_range(SchedTables.ReadAdvanceEntries, ReadAdvanceEntries);
+ }
+ }
+}
+
+// Emit SchedClass tables for all processors and associated global tables.
+void SubtargetEmitter::EmitSchedClassTables(SchedClassTables &SchedTables,
+ raw_ostream &OS) {
+ // Emit global WriteProcResTable.
+ OS << "\n// {ProcResourceIdx, Cycles}\n"
+ << "extern const llvm::MCWriteProcResEntry "
+ << Target << "WriteProcResTable[] = {\n"
+ << " { 0, 0}, // Invalid\n";
+ for (unsigned WPRIdx = 1, WPREnd = SchedTables.WriteProcResources.size();
+ WPRIdx != WPREnd; ++WPRIdx) {
+ MCWriteProcResEntry &WPREntry = SchedTables.WriteProcResources[WPRIdx];
+ OS << " {" << format("%2d", WPREntry.ProcResourceIdx) << ", "
+ << format("%2d", WPREntry.Cycles) << "}";
+ if (WPRIdx + 1 < WPREnd)
+ OS << ',';
+ OS << " // #" << WPRIdx << '\n';
+ }
+ OS << "}; // " << Target << "WriteProcResTable\n";
+
+ // Emit global WriteLatencyTable.
+ OS << "\n// {Cycles, WriteResourceID}\n"
+ << "extern const llvm::MCWriteLatencyEntry "
+ << Target << "WriteLatencyTable[] = {\n"
+ << " { 0, 0}, // Invalid\n";
+ for (unsigned WLIdx = 1, WLEnd = SchedTables.WriteLatencies.size();
+ WLIdx != WLEnd; ++WLIdx) {
+ MCWriteLatencyEntry &WLEntry = SchedTables.WriteLatencies[WLIdx];
+ OS << " {" << format("%2d", WLEntry.Cycles) << ", "
+ << format("%2d", WLEntry.WriteResourceID) << "}";
+ if (WLIdx + 1 < WLEnd)
+ OS << ',';
+ OS << " // #" << WLIdx << " " << SchedTables.WriterNames[WLIdx] << '\n';
+ }
+ OS << "}; // " << Target << "WriteLatencyTable\n";
+
+ // Emit global ReadAdvanceTable.
+ OS << "\n// {UseIdx, WriteResourceID, Cycles}\n"
+ << "extern const llvm::MCReadAdvanceEntry "
+ << Target << "ReadAdvanceTable[] = {\n"
+ << " {0, 0, 0}, // Invalid\n";
+ for (unsigned RAIdx = 1, RAEnd = SchedTables.ReadAdvanceEntries.size();
+ RAIdx != RAEnd; ++RAIdx) {
+ MCReadAdvanceEntry &RAEntry = SchedTables.ReadAdvanceEntries[RAIdx];
+ OS << " {" << RAEntry.UseIdx << ", "
+ << format("%2d", RAEntry.WriteResourceID) << ", "
+ << format("%2d", RAEntry.Cycles) << "}";
+ if (RAIdx + 1 < RAEnd)
+ OS << ',';
+ OS << " // #" << RAIdx << '\n';
+ }
+ OS << "}; // " << Target << "ReadAdvanceTable\n";
+
+ // Emit a SchedClass table for each processor.
+ for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
+ PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
+ if (!PI->hasInstrSchedModel())
+ continue;
+
+ std::vector<MCSchedClassDesc> &SCTab =
+ SchedTables.ProcSchedClasses[1 + (PI - SchedModels.procModelBegin())];
+
+ OS << "\n// {Name, NumMicroOps, BeginGroup, EndGroup, RetireOOO,"
+ << " WriteProcResIdx,#, WriteLatencyIdx,#, ReadAdvanceIdx,#}\n";
+ OS << "static const llvm::MCSchedClassDesc "
+ << PI->ModelName << "SchedClasses[] = {\n";
+
+ // The first class is always invalid. We no way to distinguish it except by
+ // name and position.
+ assert(SchedModels.getSchedClass(0).Name == "NoInstrModel"
+ && "invalid class not first");
+ OS << " {DBGFIELD(\"InvalidSchedClass\") "
+ << MCSchedClassDesc::InvalidNumMicroOps
+ << ", false, false, false, 0, 0, 0, 0, 0, 0},\n";
+
+ for (unsigned SCIdx = 1, SCEnd = SCTab.size(); SCIdx != SCEnd; ++SCIdx) {
+ MCSchedClassDesc &MCDesc = SCTab[SCIdx];
+ const CodeGenSchedClass &SchedClass = SchedModels.getSchedClass(SCIdx);
+ OS << " {DBGFIELD(\"" << SchedClass.Name << "\") ";
+ if (SchedClass.Name.size() < 18)
+ OS.indent(18 - SchedClass.Name.size());
+ OS << MCDesc.NumMicroOps
+ << ", " << ( MCDesc.BeginGroup ? "true" : "false" )
+ << ", " << ( MCDesc.EndGroup ? "true" : "false" )
+ << ", " << ( MCDesc.RetireOOO ? "true" : "false" )
+ << ", " << format("%2d", MCDesc.WriteProcResIdx)
+ << ", " << MCDesc.NumWriteProcResEntries
+ << ", " << format("%2d", MCDesc.WriteLatencyIdx)
+ << ", " << MCDesc.NumWriteLatencyEntries
+ << ", " << format("%2d", MCDesc.ReadAdvanceIdx)
+ << ", " << MCDesc.NumReadAdvanceEntries
+ << "}, // #" << SCIdx << '\n';
+ }
+ OS << "}; // " << PI->ModelName << "SchedClasses\n";
+ }
+}
+
+void SubtargetEmitter::EmitProcessorModels(raw_ostream &OS) {
+ // For each processor model.
+ for (const CodeGenProcModel &PM : SchedModels.procModels()) {
+ // Emit extra processor info if available.
+ if (PM.hasExtraProcessorInfo())
+ EmitExtraProcessorInfo(PM, OS);
+ // Emit processor resource table.
+ if (PM.hasInstrSchedModel())
+ EmitProcessorResources(PM, OS);
+ else if(!PM.ProcResourceDefs.empty())
+ PrintFatalError(PM.ModelDef->getLoc(), "SchedMachineModel defines "
+ "ProcResources without defining WriteRes SchedWriteRes");
+
+ // Begin processor itinerary properties
+ OS << "\n";
+ OS << "static const llvm::MCSchedModel " << PM.ModelName << " = {\n";
+ EmitProcessorProp(OS, PM.ModelDef, "IssueWidth", ',');
+ EmitProcessorProp(OS, PM.ModelDef, "MicroOpBufferSize", ',');
+ EmitProcessorProp(OS, PM.ModelDef, "LoopMicroOpBufferSize", ',');
+ EmitProcessorProp(OS, PM.ModelDef, "LoadLatency", ',');
+ EmitProcessorProp(OS, PM.ModelDef, "HighLatency", ',');
+ EmitProcessorProp(OS, PM.ModelDef, "MispredictPenalty", ',');
+
+ bool PostRAScheduler =
+ (PM.ModelDef ? PM.ModelDef->getValueAsBit("PostRAScheduler") : false);
+
+ OS << " " << (PostRAScheduler ? "true" : "false") << ", // "
+ << "PostRAScheduler\n";
+
+ bool CompleteModel =
+ (PM.ModelDef ? PM.ModelDef->getValueAsBit("CompleteModel") : false);
+
+ OS << " " << (CompleteModel ? "true" : "false") << ", // "
+ << "CompleteModel\n";
+
+ OS << " " << PM.Index << ", // Processor ID\n";
+ if (PM.hasInstrSchedModel())
+ OS << " " << PM.ModelName << "ProcResources" << ",\n"
+ << " " << PM.ModelName << "SchedClasses" << ",\n"
+ << " " << PM.ProcResourceDefs.size()+1 << ",\n"
+ << " " << (SchedModels.schedClassEnd()
+ - SchedModels.schedClassBegin()) << ",\n";
+ else
+ OS << " nullptr, nullptr, 0, 0,"
+ << " // No instruction-level machine model.\n";
+ if (PM.hasItineraries())
+ OS << " " << PM.ItinsDef->getName() << ",\n";
+ else
+ OS << " nullptr, // No Itinerary\n";
+ if (PM.hasExtraProcessorInfo())
+ OS << " &" << PM.ModelName << "ExtraInfo,\n";
+ else
+ OS << " nullptr // No extra processor descriptor\n";
+ OS << "};\n";
+ }
+}
+
+//
+// EmitSchedModel - Emits all scheduling model tables, folding common patterns.
+//
+void SubtargetEmitter::EmitSchedModel(raw_ostream &OS) {
+ OS << "#ifdef DBGFIELD\n"
+ << "#error \"<target>GenSubtargetInfo.inc requires a DBGFIELD macro\"\n"
+ << "#endif\n"
+ << "#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)\n"
+ << "#define DBGFIELD(x) x,\n"
+ << "#else\n"
+ << "#define DBGFIELD(x)\n"
+ << "#endif\n";
+
+ if (SchedModels.hasItineraries()) {
+ std::vector<std::vector<InstrItinerary>> ProcItinLists;
+ // Emit the stage data
+ EmitStageAndOperandCycleData(OS, ProcItinLists);
+ EmitItineraries(OS, ProcItinLists);
+ }
+ OS << "\n// ===============================================================\n"
+ << "// Data tables for the new per-operand machine model.\n";
+
+ SchedClassTables SchedTables;
+ for (const CodeGenProcModel &ProcModel : SchedModels.procModels()) {
+ GenSchedClassTables(ProcModel, SchedTables);
+ }
+ EmitSchedClassTables(SchedTables, OS);
+
+ OS << "\n#undef DBGFIELD\n";
+
+ // Emit the processor machine model
+ EmitProcessorModels(OS);
+}
+
+static void emitPredicateProlog(const RecordKeeper &Records, raw_ostream &OS) {
+ std::string Buffer;
+ raw_string_ostream Stream(Buffer);
+
+ // Collect all the PredicateProlog records and print them to the output
+ // stream.
+ std::vector<Record *> Prologs =
+ Records.getAllDerivedDefinitions("PredicateProlog");
+ llvm::sort(Prologs, LessRecord());
+ for (Record *P : Prologs)
+ Stream << P->getValueAsString("Code") << '\n';
+
+ OS << Buffer;
+}
+
+static bool isTruePredicate(const Record *Rec) {
+ return Rec->isSubClassOf("MCSchedPredicate") &&
+ Rec->getValueAsDef("Pred")->isSubClassOf("MCTrue");
+}
+
+static void emitPredicates(const CodeGenSchedTransition &T,
+ const CodeGenSchedClass &SC, PredicateExpander &PE,
+ raw_ostream &OS) {
+ std::string Buffer;
+ raw_string_ostream SS(Buffer);
+
+ // If not all predicates are MCTrue, then we need an if-stmt.
+ unsigned NumNonTruePreds =
+ T.PredTerm.size() - count_if(T.PredTerm, isTruePredicate);
+
+ SS.indent(PE.getIndentLevel() * 2);
+
+ if (NumNonTruePreds) {
+ bool FirstNonTruePredicate = true;
+ SS << "if (";
+
+ PE.setIndentLevel(PE.getIndentLevel() + 2);
+
+ for (const Record *Rec : T.PredTerm) {
+ // Skip predicates that evaluate to "true".
+ if (isTruePredicate(Rec))
+ continue;
+
+ if (FirstNonTruePredicate) {
+ FirstNonTruePredicate = false;
+ } else {
+ SS << "\n";
+ SS.indent(PE.getIndentLevel() * 2);
+ SS << "&& ";
+ }
+
+ if (Rec->isSubClassOf("MCSchedPredicate")) {
+ PE.expandPredicate(SS, Rec->getValueAsDef("Pred"));
+ continue;
+ }
+
+ // Expand this legacy predicate and wrap it around braces if there is more
+ // than one predicate to expand.
+ SS << ((NumNonTruePreds > 1) ? "(" : "")
+ << Rec->getValueAsString("Predicate")
+ << ((NumNonTruePreds > 1) ? ")" : "");
+ }
+
+ SS << ")\n"; // end of if-stmt
+ PE.decreaseIndentLevel();
+ SS.indent(PE.getIndentLevel() * 2);
+ PE.decreaseIndentLevel();
+ }
+
+ SS << "return " << T.ToClassIdx << "; // " << SC.Name << '\n';
+ OS << Buffer;
+}
+
+// Used by method `SubtargetEmitter::emitSchedModelHelpersImpl()` to generate
+// epilogue code for the auto-generated helper.
+static void emitSchedModelHelperEpilogue(raw_ostream &OS,
+ bool ShouldReturnZero) {
+ if (ShouldReturnZero) {
+ OS << " // Don't know how to resolve this scheduling class.\n"
+ << " return 0;\n";
+ return;
+ }
+
+ OS << " report_fatal_error(\"Expected a variant SchedClass\");\n";
+}
+
+static bool hasMCSchedPredicates(const CodeGenSchedTransition &T) {
+ return all_of(T.PredTerm, [](const Record *Rec) {
+ return Rec->isSubClassOf("MCSchedPredicate");
+ });
+}
+
+static void collectVariantClasses(const CodeGenSchedModels &SchedModels,
+ IdxVec &VariantClasses,
+ bool OnlyExpandMCInstPredicates) {
+ for (const CodeGenSchedClass &SC : SchedModels.schedClasses()) {
+ // Ignore non-variant scheduling classes.
+ if (SC.Transitions.empty())
+ continue;
+
+ if (OnlyExpandMCInstPredicates) {
+ // Ignore this variant scheduling class no transitions use any meaningful
+ // MCSchedPredicate definitions.
+ if (llvm::none_of(SC.Transitions, hasMCSchedPredicates))
+ continue;
+ }
+
+ VariantClasses.push_back(SC.Index);
+ }
+}
+
+static void collectProcessorIndices(const CodeGenSchedClass &SC,
+ IdxVec &ProcIndices) {
+ // A variant scheduling class may define transitions for multiple
+ // processors. This function identifies wich processors are associated with
+ // transition rules specified by variant class `SC`.
+ for (const CodeGenSchedTransition &T : SC.Transitions) {
+ IdxVec PI;
+ std::set_union(&T.ProcIndex, &T.ProcIndex + 1, ProcIndices.begin(),
+ ProcIndices.end(), std::back_inserter(PI));
+ ProcIndices.swap(PI);
+ }
+}
+
+static bool isAlwaysTrue(const CodeGenSchedTransition &T) {
+ return llvm::all_of(T.PredTerm, isTruePredicate);
+}
+
+void SubtargetEmitter::emitSchedModelHelpersImpl(
+ raw_ostream &OS, bool OnlyExpandMCInstPredicates) {
+ IdxVec VariantClasses;
+ collectVariantClasses(SchedModels, VariantClasses,
+ OnlyExpandMCInstPredicates);
+
+ if (VariantClasses.empty()) {
+ emitSchedModelHelperEpilogue(OS, OnlyExpandMCInstPredicates);
+ return;
+ }
+
+ // Construct a switch statement where the condition is a check on the
+ // scheduling class identifier. There is a `case` for every variant class
+ // defined by the processor models of this target.
+ // Each `case` implements a number of rules to resolve (i.e. to transition from)
+ // a variant scheduling class to another scheduling class. Rules are
+ // described by instances of CodeGenSchedTransition. Note that transitions may
+ // not be valid for all processors.
+ OS << " switch (SchedClass) {\n";
+ for (unsigned VC : VariantClasses) {
+ IdxVec ProcIndices;
+ const CodeGenSchedClass &SC = SchedModels.getSchedClass(VC);
+ collectProcessorIndices(SC, ProcIndices);
+
+ OS << " case " << VC << ": // " << SC.Name << '\n';
+
+ PredicateExpander PE(Target);
+ PE.setByRef(false);
+ PE.setExpandForMC(OnlyExpandMCInstPredicates);
+ for (unsigned PI : ProcIndices) {
+ OS << " ";
+
+ // Emit a guard on the processor ID.
+ if (PI != 0) {
+ OS << (OnlyExpandMCInstPredicates
+ ? "if (CPUID == "
+ : "if (SchedModel->getProcessorID() == ");
+ OS << PI << ") ";
+ OS << "{ // " << (SchedModels.procModelBegin() + PI)->ModelName << '\n';
+ }
+
+ // Now emit transitions associated with processor PI.
+ const CodeGenSchedTransition *FinalT = nullptr;
+ for (const CodeGenSchedTransition &T : SC.Transitions) {
+ if (PI != 0 && T.ProcIndex != PI)
+ continue;
+
+ // Emit only transitions based on MCSchedPredicate, if it's the case.
+ // At least the transition specified by NoSchedPred is emitted,
+ // which becomes the default transition for those variants otherwise
+ // not based on MCSchedPredicate.
+ // FIXME: preferably, llvm-mca should instead assume a reasonable
+ // default when a variant transition is not based on MCSchedPredicate
+ // for a given processor.
+ if (OnlyExpandMCInstPredicates && !hasMCSchedPredicates(T))
+ continue;
+
+ // If transition is folded to 'return X' it should be the last one.
+ if (isAlwaysTrue(T)) {
+ FinalT = &T;
+ continue;
+ }
+ PE.setIndentLevel(3);
+ emitPredicates(T, SchedModels.getSchedClass(T.ToClassIdx), PE, OS);
+ }
+ if (FinalT)
+ emitPredicates(*FinalT, SchedModels.getSchedClass(FinalT->ToClassIdx),
+ PE, OS);
+
+ OS << " }\n";
+
+ if (PI == 0)
+ break;
+ }
+
+ if (SC.isInferred())
+ OS << " return " << SC.Index << ";\n";
+ OS << " break;\n";
+ }
+
+ OS << " };\n";
+
+ emitSchedModelHelperEpilogue(OS, OnlyExpandMCInstPredicates);
+}
+
+void SubtargetEmitter::EmitSchedModelHelpers(const std::string &ClassName,
+ raw_ostream &OS) {
+ OS << "unsigned " << ClassName
+ << "\n::resolveSchedClass(unsigned SchedClass, const MachineInstr *MI,"
+ << " const TargetSchedModel *SchedModel) const {\n";
+
+ // Emit the predicate prolog code.
+ emitPredicateProlog(Records, OS);
+
+ // Emit target predicates.
+ emitSchedModelHelpersImpl(OS);
+
+ OS << "} // " << ClassName << "::resolveSchedClass\n\n";
+
+ OS << "unsigned " << ClassName
+ << "\n::resolveVariantSchedClass(unsigned SchedClass, const MCInst *MI,"
+ << " const MCInstrInfo *MCII, unsigned CPUID) const {\n"
+ << " return " << Target << "_MC"
+ << "::resolveVariantSchedClassImpl(SchedClass, MI, MCII, CPUID);\n"
+ << "} // " << ClassName << "::resolveVariantSchedClass\n\n";
+
+ STIPredicateExpander PE(Target);
+ PE.setClassPrefix(ClassName);
+ PE.setExpandDefinition(true);
+ PE.setByRef(false);
+ PE.setIndentLevel(0);
+
+ for (const STIPredicateFunction &Fn : SchedModels.getSTIPredicates())
+ PE.expandSTIPredicate(OS, Fn);
+}
+
+void SubtargetEmitter::EmitHwModeCheck(const std::string &ClassName,
+ raw_ostream &OS) {
+ const CodeGenHwModes &CGH = TGT.getHwModes();
+ assert(CGH.getNumModeIds() > 0);
+ if (CGH.getNumModeIds() == 1)
+ return;
+
+ OS << "unsigned " << ClassName << "::getHwMode() const {\n";
+ for (unsigned M = 1, NumModes = CGH.getNumModeIds(); M != NumModes; ++M) {
+ const HwMode &HM = CGH.getMode(M);
+ OS << " if (checkFeatures(\"" << HM.Features
+ << "\")) return " << M << ";\n";
+ }
+ OS << " return 0;\n}\n";
+}
+
+// Produces a subtarget specific function for parsing
+// the subtarget features string.
+void SubtargetEmitter::ParseFeaturesFunction(raw_ostream &OS) {
+ std::vector<Record*> Features =
+ Records.getAllDerivedDefinitions("SubtargetFeature");
+ llvm::sort(Features, LessRecord());
+
+ OS << "// ParseSubtargetFeatures - Parses features string setting specified\n"
+ << "// subtarget options.\n"
+ << "void llvm::";
+ OS << Target;
+ OS << "Subtarget::ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, "
+ << "StringRef FS) {\n"
+ << " LLVM_DEBUG(dbgs() << \"\\nFeatures:\" << FS);\n"
+ << " LLVM_DEBUG(dbgs() << \"\\nCPU:\" << CPU);\n"
+ << " LLVM_DEBUG(dbgs() << \"\\nTuneCPU:\" << TuneCPU << \"\\n\\n\");\n";
+
+ if (Features.empty()) {
+ OS << "}\n";
+ return;
+ }
+
+ OS << " InitMCProcessorInfo(CPU, TuneCPU, FS);\n"
+ << " const FeatureBitset &Bits = getFeatureBits();\n";
+
+ for (Record *R : Features) {
+ // Next record
+ StringRef Instance = R->getName();
+ StringRef Value = R->getValueAsString("Value");
+ StringRef Attribute = R->getValueAsString("Attribute");
+
+ if (Value=="true" || Value=="false")
+ OS << " if (Bits[" << Target << "::"
+ << Instance << "]) "
+ << Attribute << " = " << Value << ";\n";
+ else
+ OS << " if (Bits[" << Target << "::"
+ << Instance << "] && "
+ << Attribute << " < " << Value << ") "
+ << Attribute << " = " << Value << ";\n";
+ }
+
+ OS << "}\n";
+}
+
+void SubtargetEmitter::emitGenMCSubtargetInfo(raw_ostream &OS) {
+ OS << "namespace " << Target << "_MC {\n"
+ << "unsigned resolveVariantSchedClassImpl(unsigned SchedClass,\n"
+ << " const MCInst *MI, const MCInstrInfo *MCII, unsigned CPUID) {\n";
+ emitSchedModelHelpersImpl(OS, /* OnlyExpandMCPredicates */ true);
+ OS << "}\n";
+ OS << "} // end namespace " << Target << "_MC\n\n";
+
+ OS << "struct " << Target
+ << "GenMCSubtargetInfo : public MCSubtargetInfo {\n";
+ OS << " " << Target << "GenMCSubtargetInfo(const Triple &TT,\n"
+ << " StringRef CPU, StringRef TuneCPU, StringRef FS,\n"
+ << " ArrayRef<SubtargetFeatureKV> PF,\n"
+ << " ArrayRef<SubtargetSubTypeKV> PD,\n"
+ << " const MCWriteProcResEntry *WPR,\n"
+ << " const MCWriteLatencyEntry *WL,\n"
+ << " const MCReadAdvanceEntry *RA, const InstrStage *IS,\n"
+ << " const unsigned *OC, const unsigned *FP) :\n"
+ << " MCSubtargetInfo(TT, CPU, TuneCPU, FS, PF, PD,\n"
+ << " WPR, WL, RA, IS, OC, FP) { }\n\n"
+ << " unsigned resolveVariantSchedClass(unsigned SchedClass,\n"
+ << " const MCInst *MI, const MCInstrInfo *MCII,\n"
+ << " unsigned CPUID) const override {\n"
+ << " return " << Target << "_MC"
+ << "::resolveVariantSchedClassImpl(SchedClass, MI, MCII, CPUID);\n";
+ OS << " }\n";
+ if (TGT.getHwModes().getNumModeIds() > 1)
+ OS << " unsigned getHwMode() const override;\n";
+ OS << "};\n";
+ EmitHwModeCheck(Target + "GenMCSubtargetInfo", OS);
+}
+
+void SubtargetEmitter::EmitMCInstrAnalysisPredicateFunctions(raw_ostream &OS) {
+ OS << "\n#ifdef GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS\n";
+ OS << "#undef GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS\n\n";
+
+ STIPredicateExpander PE(Target);
+ PE.setExpandForMC(true);
+ PE.setByRef(true);
+ for (const STIPredicateFunction &Fn : SchedModels.getSTIPredicates())
+ PE.expandSTIPredicate(OS, Fn);
+
+ OS << "#endif // GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS\n\n";
+
+ OS << "\n#ifdef GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS\n";
+ OS << "#undef GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS\n\n";
+
+ std::string ClassPrefix = Target + "MCInstrAnalysis";
+ PE.setExpandDefinition(true);
+ PE.setClassPrefix(ClassPrefix);
+ PE.setIndentLevel(0);
+ for (const STIPredicateFunction &Fn : SchedModels.getSTIPredicates())
+ PE.expandSTIPredicate(OS, Fn);
+
+ OS << "#endif // GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS\n\n";
+}
+
+//
+// SubtargetEmitter::run - Main subtarget enumeration emitter.
+//
+void SubtargetEmitter::run(raw_ostream &OS) {
+ emitSourceFileHeader("Subtarget Enumeration Source Fragment", OS);
+
+ OS << "\n#ifdef GET_SUBTARGETINFO_ENUM\n";
+ OS << "#undef GET_SUBTARGETINFO_ENUM\n\n";
+
+ DenseMap<Record *, unsigned> FeatureMap;
+
+ OS << "namespace llvm {\n";
+ Enumeration(OS, FeatureMap);
+ OS << "} // end namespace llvm\n\n";
+ OS << "#endif // GET_SUBTARGETINFO_ENUM\n\n";
+
+ EmitSubtargetInfoMacroCalls(OS);
+
+ OS << "namespace llvm {\n";
+#if 0
+ OS << "namespace {\n";
+#endif
+ unsigned NumFeatures = FeatureKeyValues(OS, FeatureMap);
+ OS << "\n";
+ EmitSchedModel(OS);
+ OS << "\n";
+ unsigned NumProcs = CPUKeyValues(OS, FeatureMap);
+ OS << "\n";
+#if 0
+ OS << "} // end anonymous namespace\n\n";
+#endif
+
+ // MCInstrInfo initialization routine.
+ emitGenMCSubtargetInfo(OS);
+
+ OS << "\nstatic inline MCSubtargetInfo *create" << Target
+ << "MCSubtargetInfoImpl("
+ << "const Triple &TT, StringRef CPU, StringRef TuneCPU, StringRef FS) {\n";
+ OS << " return new " << Target
+ << "GenMCSubtargetInfo(TT, CPU, TuneCPU, FS, ";
+ if (NumFeatures)
+ OS << Target << "FeatureKV, ";
+ else
+ OS << "std::nullopt, ";
+ if (NumProcs)
+ OS << Target << "SubTypeKV, ";
+ else
+ OS << "None, ";
+ OS << '\n'; OS.indent(22);
+ OS << Target << "WriteProcResTable, "
+ << Target << "WriteLatencyTable, "
+ << Target << "ReadAdvanceTable, ";
+ OS << '\n'; OS.indent(22);
+ if (SchedModels.hasItineraries()) {
+ OS << Target << "Stages, "
+ << Target << "OperandCycles, "
+ << Target << "ForwardingPaths";
+ } else
+ OS << "nullptr, nullptr, nullptr";
+ OS << ");\n}\n\n";
+
+ OS << "} // end namespace llvm\n\n";
+
+ OS << "#endif // GET_SUBTARGETINFO_MC_DESC\n\n";
+
+ OS << "\n#ifdef GET_SUBTARGETINFO_TARGET_DESC\n";
+ OS << "#undef GET_SUBTARGETINFO_TARGET_DESC\n\n";
+
+ OS << "#include \"llvm/Support/Debug.h\"\n";
+ OS << "#include \"llvm/Support/raw_ostream.h\"\n\n";
+ ParseFeaturesFunction(OS);
+
+ OS << "#endif // GET_SUBTARGETINFO_TARGET_DESC\n\n";
+
+ // Create a TargetSubtargetInfo subclass to hide the MC layer initialization.
+ OS << "\n#ifdef GET_SUBTARGETINFO_HEADER\n";
+ OS << "#undef GET_SUBTARGETINFO_HEADER\n\n";
+
+ std::string ClassName = Target + "GenSubtargetInfo";
+ OS << "namespace llvm {\n";
+ OS << "class DFAPacketizer;\n";
+ OS << "namespace " << Target << "_MC {\n"
+ << "unsigned resolveVariantSchedClassImpl(unsigned SchedClass,"
+ << " const MCInst *MI, const MCInstrInfo *MCII, unsigned CPUID);\n"
+ << "} // end namespace " << Target << "_MC\n\n";
+ OS << "struct " << ClassName << " : public TargetSubtargetInfo {\n"
+ << " explicit " << ClassName << "(const Triple &TT, StringRef CPU, "
+ << "StringRef TuneCPU, StringRef FS);\n"
+ << "public:\n"
+ << " unsigned resolveSchedClass(unsigned SchedClass, "
+ << " const MachineInstr *DefMI,"
+ << " const TargetSchedModel *SchedModel) const override;\n"
+ << " unsigned resolveVariantSchedClass(unsigned SchedClass,"
+ << " const MCInst *MI, const MCInstrInfo *MCII,"
+ << " unsigned CPUID) const override;\n"
+ << " DFAPacketizer *createDFAPacketizer(const InstrItineraryData *IID)"
+ << " const;\n";
+ if (TGT.getHwModes().getNumModeIds() > 1)
+ OS << " unsigned getHwMode() const override;\n";
+
+ STIPredicateExpander PE(Target);
+ PE.setByRef(false);
+ for (const STIPredicateFunction &Fn : SchedModels.getSTIPredicates())
+ PE.expandSTIPredicate(OS, Fn);
+
+ OS << "};\n"
+ << "} // end namespace llvm\n\n";
+
+ OS << "#endif // GET_SUBTARGETINFO_HEADER\n\n";
+
+ OS << "\n#ifdef GET_SUBTARGETINFO_CTOR\n";
+ OS << "#undef GET_SUBTARGETINFO_CTOR\n\n";
+
+ OS << "#include \"llvm/CodeGen/TargetSchedule.h\"\n\n";
+ OS << "namespace llvm {\n";
+ OS << "extern const llvm::SubtargetFeatureKV " << Target << "FeatureKV[];\n";
+ OS << "extern const llvm::SubtargetSubTypeKV " << Target << "SubTypeKV[];\n";
+ OS << "extern const llvm::MCWriteProcResEntry "
+ << Target << "WriteProcResTable[];\n";
+ OS << "extern const llvm::MCWriteLatencyEntry "
+ << Target << "WriteLatencyTable[];\n";
+ OS << "extern const llvm::MCReadAdvanceEntry "
+ << Target << "ReadAdvanceTable[];\n";
+
+ if (SchedModels.hasItineraries()) {
+ OS << "extern const llvm::InstrStage " << Target << "Stages[];\n";
+ OS << "extern const unsigned " << Target << "OperandCycles[];\n";
+ OS << "extern const unsigned " << Target << "ForwardingPaths[];\n";
+ }
+
+ OS << ClassName << "::" << ClassName << "(const Triple &TT, StringRef CPU, "
+ << "StringRef TuneCPU, StringRef FS)\n"
+ << " : TargetSubtargetInfo(TT, CPU, TuneCPU, FS, ";
+ if (NumFeatures)
+ OS << "ArrayRef(" << Target << "FeatureKV, " << NumFeatures << "), ";
+ else
+ OS << "std::nullopt, ";
+ if (NumProcs)
+ OS << "ArrayRef(" << Target << "SubTypeKV, " << NumProcs << "), ";
+ else
+ OS << "None, ";
+ OS << '\n'; OS.indent(24);
+ OS << Target << "WriteProcResTable, "
+ << Target << "WriteLatencyTable, "
+ << Target << "ReadAdvanceTable, ";
+ OS << '\n'; OS.indent(24);
+ if (SchedModels.hasItineraries()) {
+ OS << Target << "Stages, "
+ << Target << "OperandCycles, "
+ << Target << "ForwardingPaths";
+ } else
+ OS << "nullptr, nullptr, nullptr";
+ OS << ") {}\n\n";
+
+ EmitSchedModelHelpers(ClassName, OS);
+ EmitHwModeCheck(ClassName, OS);
+
+ OS << "} // end namespace llvm\n\n";
+
+ OS << "#endif // GET_SUBTARGETINFO_CTOR\n\n";
+
+ EmitMCInstrAnalysisPredicateFunctions(OS);
+}
+
+namespace llvm {
+
+void EmitSubtarget(RecordKeeper &RK, raw_ostream &OS) {
+ CodeGenTarget CGTarget(RK);
+ SubtargetEmitter(RK, CGTarget).run(OS);
+}
+
+} // end namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/SubtargetFeatureInfo.cpp b/contrib/libs/llvm16/utils/TableGen/SubtargetFeatureInfo.cpp
new file mode 100644
index 0000000000..2a63fc4903
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/SubtargetFeatureInfo.cpp
@@ -0,0 +1,166 @@
+//===- SubtargetFeatureInfo.cpp - Helpers for subtarget features ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "SubtargetFeatureInfo.h"
+#include "Types.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include <map>
+
+using namespace llvm;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void SubtargetFeatureInfo::dump() const {
+ errs() << getEnumName() << " " << Index << "\n" << *TheDef;
+}
+#endif
+
+std::vector<std::pair<Record *, SubtargetFeatureInfo>>
+SubtargetFeatureInfo::getAll(const RecordKeeper &Records) {
+ std::vector<std::pair<Record *, SubtargetFeatureInfo>> SubtargetFeatures;
+ std::vector<Record *> AllPredicates =
+ Records.getAllDerivedDefinitions("Predicate");
+ for (Record *Pred : AllPredicates) {
+ // Ignore predicates that are not intended for the assembler.
+ //
+ // The "AssemblerMatcherPredicate" string should be promoted to an argument
+ // if we re-use the machinery for non-assembler purposes in future.
+ if (!Pred->getValueAsBit("AssemblerMatcherPredicate"))
+ continue;
+
+ if (Pred->getName().empty())
+ PrintFatalError(Pred->getLoc(), "Predicate has no name!");
+
+ // Ignore always true predicates.
+ if (Pred->getValueAsString("CondString").empty())
+ continue;
+
+ SubtargetFeatures.emplace_back(
+ Pred, SubtargetFeatureInfo(Pred, SubtargetFeatures.size()));
+ }
+ return SubtargetFeatures;
+}
+
+void SubtargetFeatureInfo::emitSubtargetFeatureBitEnumeration(
+ SubtargetFeatureInfoMap &SubtargetFeatures, raw_ostream &OS) {
+ OS << "// Bits for subtarget features that participate in "
+ << "instruction matching.\n";
+ OS << "enum SubtargetFeatureBits : "
+ << getMinimalTypeForRange(SubtargetFeatures.size()) << " {\n";
+ for (const auto &SF : SubtargetFeatures) {
+ const SubtargetFeatureInfo &SFI = SF.second;
+ OS << " " << SFI.getEnumBitName() << " = " << SFI.Index << ",\n";
+ }
+ OS << "};\n\n";
+}
+
+void SubtargetFeatureInfo::emitNameTable(
+ SubtargetFeatureInfoMap &SubtargetFeatures, raw_ostream &OS) {
+ // Need to sort the name table so that lookup by the log of the enum value
+ // gives the proper name. More specifically, for a feature of value 1<<n,
+ // SubtargetFeatureNames[n] should be the name of the feature.
+ uint64_t IndexUB = 0;
+ for (const auto &SF : SubtargetFeatures)
+ if (IndexUB <= SF.second.Index)
+ IndexUB = SF.second.Index+1;
+
+ std::vector<std::string> Names;
+ if (IndexUB > 0)
+ Names.resize(IndexUB);
+ for (const auto &SF : SubtargetFeatures)
+ Names[SF.second.Index] = SF.second.getEnumName();
+
+ OS << "static const char *SubtargetFeatureNames[] = {\n";
+ for (uint64_t I = 0; I < IndexUB; ++I)
+ OS << " \"" << Names[I] << "\",\n";
+
+ // A small number of targets have no predicates. Null terminate the array to
+ // avoid a zero-length array.
+ OS << " nullptr\n"
+ << "};\n\n";
+}
+
+void SubtargetFeatureInfo::emitComputeAvailableFeatures(
+ StringRef TargetName, StringRef ClassName, StringRef FuncName,
+ SubtargetFeatureInfoMap &SubtargetFeatures, raw_ostream &OS,
+ StringRef ExtraParams) {
+ OS << "PredicateBitset " << TargetName << ClassName << "::\n"
+ << FuncName << "(const " << TargetName << "Subtarget *Subtarget";
+ if (!ExtraParams.empty())
+ OS << ", " << ExtraParams;
+ OS << ") const {\n";
+ OS << " PredicateBitset Features;\n";
+ for (const auto &SF : SubtargetFeatures) {
+ const SubtargetFeatureInfo &SFI = SF.second;
+ StringRef CondStr = SFI.TheDef->getValueAsString("CondString");
+ assert(!CondStr.empty() && "true predicate should have been filtered");
+
+ OS << " if (" << CondStr << ")\n";
+ OS << " Features.set(" << SFI.getEnumBitName() << ");\n";
+ }
+ OS << " return Features;\n";
+ OS << "}\n\n";
+}
+
+// If ParenIfBinOp is true, print a surrounding () if Val uses && or ||.
+static bool emitFeaturesAux(StringRef TargetName, const Init &Val,
+ bool ParenIfBinOp, raw_ostream &OS) {
+ if (auto *D = dyn_cast<DefInit>(&Val)) {
+ if (!D->getDef()->isSubClassOf("SubtargetFeature"))
+ return true;
+ OS << "FB[" << TargetName << "::" << D->getAsString() << "]";
+ return false;
+ }
+ if (auto *D = dyn_cast<DagInit>(&Val)) {
+ std::string Op = D->getOperator()->getAsString();
+ if (Op == "not" && D->getNumArgs() == 1) {
+ OS << '!';
+ return emitFeaturesAux(TargetName, *D->getArg(0), true, OS);
+ }
+ if ((Op == "any_of" || Op == "all_of") && D->getNumArgs() > 0) {
+ bool Paren = D->getNumArgs() > 1 && std::exchange(ParenIfBinOp, true);
+ if (Paren)
+ OS << '(';
+ ListSeparator LS(Op == "any_of" ? " || " : " && ");
+ for (auto *Arg : D->getArgs()) {
+ OS << LS;
+ if (emitFeaturesAux(TargetName, *Arg, ParenIfBinOp, OS))
+ return true;
+ }
+ if (Paren)
+ OS << ')';
+ return false;
+ }
+ }
+ return true;
+}
+
+void SubtargetFeatureInfo::emitComputeAssemblerAvailableFeatures(
+ StringRef TargetName, StringRef ClassName, StringRef FuncName,
+ SubtargetFeatureInfoMap &SubtargetFeatures, raw_ostream &OS) {
+ OS << "FeatureBitset ";
+ if (!ClassName.empty())
+ OS << TargetName << ClassName << "::\n";
+ OS << FuncName << "(const FeatureBitset &FB) ";
+ if (!ClassName.empty())
+ OS << "const ";
+ OS << "{\n";
+ OS << " FeatureBitset Features;\n";
+ for (const auto &SF : SubtargetFeatures) {
+ const SubtargetFeatureInfo &SFI = SF.second;
+
+ OS << " if (";
+ emitFeaturesAux(TargetName, *SFI.TheDef->getValueAsDag("AssemblerCondDag"),
+ /*ParenIfBinOp=*/false, OS);
+ OS << ")\n";
+ OS << " Features.set(" << SFI.getEnumBitName() << ");\n";
+ }
+ OS << " return Features;\n";
+ OS << "}\n\n";
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/SubtargetFeatureInfo.h b/contrib/libs/llvm16/utils/TableGen/SubtargetFeatureInfo.h
new file mode 100644
index 0000000000..8c8a448793
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/SubtargetFeatureInfo.h
@@ -0,0 +1,101 @@
+//===- SubtargetFeatureInfo.h - Helpers for subtarget features --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTIL_TABLEGEN_SUBTARGETFEATUREINFO_H
+#define LLVM_UTIL_TABLEGEN_SUBTARGETFEATUREINFO_H
+
+#include "llvm/TableGen/Record.h"
+#include <map>
+#include <string>
+#include <vector>
+
+namespace llvm {
+struct SubtargetFeatureInfo;
+using SubtargetFeatureInfoMap = std::map<Record *, SubtargetFeatureInfo, LessRecordByID>;
+
+/// Helper class for storing information on a subtarget feature which
+/// participates in instruction matching.
+struct SubtargetFeatureInfo {
+ /// The predicate record for this feature.
+ Record *TheDef;
+
+ /// An unique index assigned to represent this feature.
+ uint64_t Index;
+
+ SubtargetFeatureInfo(Record *D, uint64_t Idx) : TheDef(D), Index(Idx) {}
+
+ /// The name of the enumerated constant identifying this feature.
+ std::string getEnumName() const {
+ return "Feature_" + TheDef->getName().str();
+ }
+
+ /// The name of the enumerated constant identifying the bitnumber for
+ /// this feature.
+ std::string getEnumBitName() const {
+ return "Feature_" + TheDef->getName().str() + "Bit";
+ }
+
+ bool mustRecomputePerFunction() const {
+ return TheDef->getValueAsBit("RecomputePerFunction");
+ }
+
+ void dump() const;
+ static std::vector<std::pair<Record *, SubtargetFeatureInfo>>
+ getAll(const RecordKeeper &Records);
+
+ /// Emit the subtarget feature flag definitions.
+ ///
+ /// This version emits the bit index for the feature and can therefore support
+ /// more than 64 feature bits.
+ static void
+ emitSubtargetFeatureBitEnumeration(SubtargetFeatureInfoMap &SubtargetFeatures,
+ raw_ostream &OS);
+
+ static void emitNameTable(SubtargetFeatureInfoMap &SubtargetFeatures,
+ raw_ostream &OS);
+
+ /// Emit the function to compute the list of available features given a
+ /// subtarget.
+ ///
+ /// This version is used for subtarget features defined using Predicate<>
+ /// and supports more than 64 feature bits.
+ ///
+ /// \param TargetName The name of the target as used in class prefixes (e.g.
+ /// <TargetName>Subtarget)
+ /// \param ClassName The name of the class (without the <Target> prefix)
+ /// that will contain the generated functions.
+ /// \param FuncName The name of the function to emit.
+ /// \param SubtargetFeatures A map of TableGen records to the
+ /// SubtargetFeatureInfo equivalent.
+ /// \param ExtraParams Additional arguments to the generated function.
+ static void
+ emitComputeAvailableFeatures(StringRef TargetName, StringRef ClassName,
+ StringRef FuncName,
+ SubtargetFeatureInfoMap &SubtargetFeatures,
+ raw_ostream &OS, StringRef ExtraParams = "");
+
+ /// Emit the function to compute the list of available features given a
+ /// subtarget.
+ ///
+ /// This version is used for subtarget features defined using
+ /// AssemblerPredicate<> and supports up to 64 feature bits.
+ ///
+ /// \param TargetName The name of the target as used in class prefixes (e.g.
+ /// <TargetName>Subtarget)
+ /// \param ClassName The name of the class (without the <Target> prefix)
+ /// that will contain the generated functions.
+ /// \param FuncName The name of the function to emit.
+ /// \param SubtargetFeatures A map of TableGen records to the
+ /// SubtargetFeatureInfo equivalent.
+ static void emitComputeAssemblerAvailableFeatures(
+ StringRef TargetName, StringRef ClassName, StringRef FuncName,
+ SubtargetFeatureInfoMap &SubtargetFeatures, raw_ostream &OS);
+};
+} // end namespace llvm
+
+#endif // LLVM_UTIL_TABLEGEN_SUBTARGETFEATUREINFO_H
diff --git a/contrib/libs/llvm16/utils/TableGen/TableGen.cpp b/contrib/libs/llvm16/utils/TableGen/TableGen.cpp
new file mode 100644
index 0000000000..746e2dd1db
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/TableGen.cpp
@@ -0,0 +1,312 @@
+//===- TableGen.cpp - Top-Level TableGen implementation for LLVM ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the main function for LLVM's TableGen.
+//
+//===----------------------------------------------------------------------===//
+
+#include "TableGenBackends.h" // Declares all backends.
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/InitLLVM.h"
+#include "llvm/TableGen/Main.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/SetTheory.h"
+
+using namespace llvm;
+
+enum ActionType {
+ PrintRecords,
+ PrintDetailedRecords,
+ NullBackend,
+ DumpJSON,
+ GenEmitter,
+ GenRegisterInfo,
+ GenInstrInfo,
+ GenInstrDocs,
+ GenAsmWriter,
+ GenAsmMatcher,
+ GenDisassembler,
+ GenPseudoLowering,
+ GenCompressInst,
+ GenCallingConv,
+ GenDAGISel,
+ GenDFAPacketizer,
+ GenFastISel,
+ GenSubtarget,
+ GenIntrinsicEnums,
+ GenIntrinsicImpl,
+ PrintEnums,
+ PrintSets,
+ GenOptParserDefs,
+ GenOptRST,
+ GenCTags,
+ GenAttributes,
+ GenSearchableTables,
+ GenGlobalISel,
+ GenGICombiner,
+ GenX86EVEX2VEXTables,
+ GenX86FoldTables,
+ GenX86MnemonicTables,
+ GenRegisterBank,
+ GenExegesis,
+ GenAutomata,
+ GenDirectivesEnumDecl,
+ GenDirectivesEnumImpl,
+ GenDXILOperation,
+ GenRISCVTargetDef,
+};
+
+namespace llvm {
+cl::opt<bool> EmitLongStrLiterals(
+ "long-string-literals",
+ cl::desc("when emitting large string tables, prefer string literals over "
+ "comma-separated char literals. This can be a readability and "
+ "compile-time performance win, but upsets some compilers"),
+ cl::Hidden, cl::init(true));
+} // end namespace llvm
+
+namespace {
+cl::opt<ActionType> Action(
+ cl::desc("Action to perform:"),
+ cl::values(
+ clEnumValN(PrintRecords, "print-records",
+ "Print all records to stdout (default)"),
+ clEnumValN(PrintDetailedRecords, "print-detailed-records",
+ "Print full details of all records to stdout"),
+ clEnumValN(NullBackend, "null-backend",
+ "Do nothing after parsing (useful for timing)"),
+ clEnumValN(DumpJSON, "dump-json",
+ "Dump all records as machine-readable JSON"),
+ clEnumValN(GenEmitter, "gen-emitter", "Generate machine code emitter"),
+ clEnumValN(GenRegisterInfo, "gen-register-info",
+ "Generate registers and register classes info"),
+ clEnumValN(GenInstrInfo, "gen-instr-info",
+ "Generate instruction descriptions"),
+ clEnumValN(GenInstrDocs, "gen-instr-docs",
+ "Generate instruction documentation"),
+ clEnumValN(GenCallingConv, "gen-callingconv",
+ "Generate calling convention descriptions"),
+ clEnumValN(GenAsmWriter, "gen-asm-writer", "Generate assembly writer"),
+ clEnumValN(GenDisassembler, "gen-disassembler",
+ "Generate disassembler"),
+ clEnumValN(GenPseudoLowering, "gen-pseudo-lowering",
+ "Generate pseudo instruction lowering"),
+ clEnumValN(GenCompressInst, "gen-compress-inst-emitter",
+ "Generate RISCV compressed instructions."),
+ clEnumValN(GenAsmMatcher, "gen-asm-matcher",
+ "Generate assembly instruction matcher"),
+ clEnumValN(GenDAGISel, "gen-dag-isel",
+ "Generate a DAG instruction selector"),
+ clEnumValN(GenDFAPacketizer, "gen-dfa-packetizer",
+ "Generate DFA Packetizer for VLIW targets"),
+ clEnumValN(GenFastISel, "gen-fast-isel",
+ "Generate a \"fast\" instruction selector"),
+ clEnumValN(GenSubtarget, "gen-subtarget",
+ "Generate subtarget enumerations"),
+ clEnumValN(GenIntrinsicEnums, "gen-intrinsic-enums",
+ "Generate intrinsic enums"),
+ clEnumValN(GenIntrinsicImpl, "gen-intrinsic-impl",
+ "Generate intrinsic information"),
+ clEnumValN(PrintEnums, "print-enums", "Print enum values for a class"),
+ clEnumValN(PrintSets, "print-sets",
+ "Print expanded sets for testing DAG exprs"),
+ clEnumValN(GenOptParserDefs, "gen-opt-parser-defs",
+ "Generate option definitions"),
+ clEnumValN(GenOptRST, "gen-opt-rst", "Generate option RST"),
+ clEnumValN(GenCTags, "gen-ctags", "Generate ctags-compatible index"),
+ clEnumValN(GenAttributes, "gen-attrs", "Generate attributes"),
+ clEnumValN(GenSearchableTables, "gen-searchable-tables",
+ "Generate generic binary-searchable table"),
+ clEnumValN(GenGlobalISel, "gen-global-isel",
+ "Generate GlobalISel selector"),
+ clEnumValN(GenGICombiner, "gen-global-isel-combiner",
+ "Generate GlobalISel combiner"),
+ clEnumValN(GenX86EVEX2VEXTables, "gen-x86-EVEX2VEX-tables",
+ "Generate X86 EVEX to VEX compress tables"),
+ clEnumValN(GenX86FoldTables, "gen-x86-fold-tables",
+ "Generate X86 fold tables"),
+ clEnumValN(GenX86MnemonicTables, "gen-x86-mnemonic-tables",
+ "Generate X86 mnemonic tables"),
+ clEnumValN(GenRegisterBank, "gen-register-bank",
+ "Generate registers bank descriptions"),
+ clEnumValN(GenExegesis, "gen-exegesis",
+ "Generate llvm-exegesis tables"),
+ clEnumValN(GenAutomata, "gen-automata", "Generate generic automata"),
+ clEnumValN(GenDirectivesEnumDecl, "gen-directive-decl",
+ "Generate directive related declaration code (header file)"),
+ clEnumValN(GenDirectivesEnumImpl, "gen-directive-impl",
+ "Generate directive related implementation code"),
+ clEnumValN(GenDXILOperation, "gen-dxil-operation",
+ "Generate DXIL operation information"),
+ clEnumValN(GenRISCVTargetDef, "gen-riscv-target-def",
+ "Generate the list of CPU for RISCV")));
+cl::OptionCategory PrintEnumsCat("Options for -print-enums");
+cl::opt<std::string> Class("class", cl::desc("Print Enum list for this class"),
+ cl::value_desc("class name"),
+ cl::cat(PrintEnumsCat));
+
+bool LLVMTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
+ switch (Action) {
+ case PrintRecords:
+ OS << Records; // No argument, dump all contents
+ break;
+ case PrintDetailedRecords:
+ EmitDetailedRecords(Records, OS);
+ break;
+ case NullBackend: // No backend at all.
+ break;
+ case DumpJSON:
+ EmitJSON(Records, OS);
+ break;
+ case GenEmitter:
+ EmitCodeEmitter(Records, OS);
+ break;
+ case GenRegisterInfo:
+ EmitRegisterInfo(Records, OS);
+ break;
+ case GenInstrInfo:
+ EmitInstrInfo(Records, OS);
+ break;
+ case GenInstrDocs:
+ EmitInstrDocs(Records, OS);
+ break;
+ case GenCallingConv:
+ EmitCallingConv(Records, OS);
+ break;
+ case GenAsmWriter:
+ EmitAsmWriter(Records, OS);
+ break;
+ case GenAsmMatcher:
+ EmitAsmMatcher(Records, OS);
+ break;
+ case GenDisassembler:
+ EmitDisassembler(Records, OS);
+ break;
+ case GenPseudoLowering:
+ EmitPseudoLowering(Records, OS);
+ break;
+ case GenCompressInst:
+ EmitCompressInst(Records, OS);
+ break;
+ case GenDAGISel:
+ EmitDAGISel(Records, OS);
+ break;
+ case GenDFAPacketizer:
+ EmitDFAPacketizer(Records, OS);
+ break;
+ case GenFastISel:
+ EmitFastISel(Records, OS);
+ break;
+ case GenSubtarget:
+ EmitSubtarget(Records, OS);
+ break;
+ case GenIntrinsicEnums:
+ EmitIntrinsicEnums(Records, OS);
+ break;
+ case GenIntrinsicImpl:
+ EmitIntrinsicImpl(Records, OS);
+ break;
+ case GenOptParserDefs:
+ EmitOptParser(Records, OS);
+ break;
+ case GenOptRST:
+ EmitOptRST(Records, OS);
+ break;
+ case PrintEnums:
+ {
+ for (Record *Rec : Records.getAllDerivedDefinitions(Class))
+ OS << Rec->getName() << ", ";
+ OS << "\n";
+ break;
+ }
+ case PrintSets:
+ {
+ SetTheory Sets;
+ Sets.addFieldExpander("Set", "Elements");
+ for (Record *Rec : Records.getAllDerivedDefinitions("Set")) {
+ OS << Rec->getName() << " = [";
+ const std::vector<Record*> *Elts = Sets.expand(Rec);
+ assert(Elts && "Couldn't expand Set instance");
+ for (Record *Elt : *Elts)
+ OS << ' ' << Elt->getName();
+ OS << " ]\n";
+ }
+ break;
+ }
+ case GenCTags:
+ EmitCTags(Records, OS);
+ break;
+ case GenAttributes:
+ EmitAttributes(Records, OS);
+ break;
+ case GenSearchableTables:
+ EmitSearchableTables(Records, OS);
+ break;
+ case GenGlobalISel:
+ EmitGlobalISel(Records, OS);
+ break;
+ case GenGICombiner:
+ EmitGICombiner(Records, OS);
+ break;
+ case GenRegisterBank:
+ EmitRegisterBank(Records, OS);
+ break;
+ case GenX86EVEX2VEXTables:
+ EmitX86EVEX2VEXTables(Records, OS);
+ break;
+ case GenX86MnemonicTables:
+ EmitX86MnemonicTables(Records, OS);
+ break;
+ case GenX86FoldTables:
+ EmitX86FoldTables(Records, OS);
+ break;
+ case GenExegesis:
+ EmitExegesis(Records, OS);
+ break;
+ case GenAutomata:
+ EmitAutomata(Records, OS);
+ break;
+ case GenDirectivesEnumDecl:
+ EmitDirectivesDecl(Records, OS);
+ break;
+ case GenDirectivesEnumImpl:
+ EmitDirectivesImpl(Records, OS);
+ break;
+ case GenDXILOperation:
+ EmitDXILOperation(Records, OS);
+ break;
+ case GenRISCVTargetDef:
+ EmitRISCVTargetDef(Records, OS);
+ break;
+ }
+
+ return false;
+}
+}
+
+int main(int argc, char **argv) {
+ InitLLVM X(argc, argv);
+ cl::ParseCommandLineOptions(argc, argv);
+
+ return TableGenMain(argv[0], &LLVMTableGenMain);
+}
+
+#ifndef __has_feature
+#define __has_feature(x) 0
+#endif
+
+#if __has_feature(address_sanitizer) || \
+ (defined(__SANITIZE_ADDRESS__) && defined(__GNUC__)) || \
+ __has_feature(leak_sanitizer)
+
+#include <sanitizer/lsan_interface.h>
+// Disable LeakSanitizer for this binary as it has too many leaks that are not
+// very interesting to fix. See compiler-rt/include/sanitizer/lsan_interface.h .
+LLVM_ATTRIBUTE_USED int __lsan_is_turned_off() { return 1; }
+
+#endif
diff --git a/contrib/libs/llvm16/utils/TableGen/TableGenBackends.h b/contrib/libs/llvm16/utils/TableGen/TableGenBackends.h
new file mode 100644
index 0000000000..ac44babb12
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/TableGenBackends.h
@@ -0,0 +1,101 @@
+//===- TableGenBackends.h - Declarations for LLVM TableGen Backends -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declarations for all of the LLVM TableGen
+// backends. A "TableGen backend" is just a function. See below for a
+// precise description.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_TABLEGENBACKENDS_H
+#define LLVM_UTILS_TABLEGEN_TABLEGENBACKENDS_H
+
+// A TableGen backend is a function that looks like
+//
+// EmitFoo(RecordKeeper &RK, raw_ostream &OS /*, anything else you need */ )
+//
+// What you do inside of that function is up to you, but it will usually
+// involve generating C++ code to the provided raw_ostream.
+//
+// The RecordKeeper is just a top-level container for an in-memory
+// representation of the data encoded in the TableGen file. What a TableGen
+// backend does is walk around that in-memory representation and generate
+// stuff based on the information it contains.
+//
+// The in-memory representation is a node-graph (think of it like JSON but
+// with a richer ontology of types), where the nodes are subclasses of
+// Record. The methods `getClass`, `getDef` are the basic interface to
+// access the node-graph. RecordKeeper also provides a handy method
+// `getAllDerivedDefinitions`. Consult "include/llvm/TableGen/Record.h" for
+// the exact interfaces provided by Record's and RecordKeeper.
+//
+// A common pattern for TableGen backends is for the EmitFoo function to
+// instantiate a class which holds some context for the generation process,
+// and then have most of the work happen in that class's methods. This
+// pattern partly has historical roots in the previous TableGen backend API
+// that involved a class and an invocation like `FooEmitter(RK).run(OS)`.
+//
+// Remember to wrap private things in an anonymous namespace. For most
+// backends, this means that the EmitFoo function is the only thing not in
+// the anonymous namespace.
+
+
+// FIXME: Reorganize TableGen so that build dependencies can be more
+// accurately expressed. Currently, touching any of the emitters (or
+// anything that they transitively depend on) causes everything dependent
+// on TableGen to be rebuilt (this includes all the targets!). Perhaps have
+// a standalone TableGen binary and have the backends be loadable modules
+// of some sort; then the dependency could be expressed as being on the
+// module, and all the modules would have a common dependency on the
+// TableGen binary with as few dependencies as possible on the rest of
+// LLVM.
+
+
+namespace llvm {
+
+class raw_ostream;
+class RecordKeeper;
+
+void EmitIntrinsicEnums(RecordKeeper &RK, raw_ostream &OS);
+void EmitIntrinsicImpl(RecordKeeper &RK, raw_ostream &OS);
+void EmitAsmMatcher(RecordKeeper &RK, raw_ostream &OS);
+void EmitAsmWriter(RecordKeeper &RK, raw_ostream &OS);
+void EmitCallingConv(RecordKeeper &RK, raw_ostream &OS);
+void EmitCodeEmitter(RecordKeeper &RK, raw_ostream &OS);
+void EmitDAGISel(RecordKeeper &RK, raw_ostream &OS);
+void EmitDFAPacketizer(RecordKeeper &RK, raw_ostream &OS);
+void EmitDisassembler(RecordKeeper &RK, raw_ostream &OS);
+void EmitFastISel(RecordKeeper &RK, raw_ostream &OS);
+void EmitInstrInfo(RecordKeeper &RK, raw_ostream &OS);
+void EmitInstrDocs(RecordKeeper &RK, raw_ostream &OS);
+void EmitPseudoLowering(RecordKeeper &RK, raw_ostream &OS);
+void EmitCompressInst(RecordKeeper &RK, raw_ostream &OS);
+void EmitRegisterInfo(RecordKeeper &RK, raw_ostream &OS);
+void EmitSubtarget(RecordKeeper &RK, raw_ostream &OS);
+void EmitMapTable(RecordKeeper &RK, raw_ostream &OS);
+void EmitOptParser(RecordKeeper &RK, raw_ostream &OS);
+void EmitOptRST(RecordKeeper &RK, raw_ostream &OS);
+void EmitCTags(RecordKeeper &RK, raw_ostream &OS);
+void EmitAttributes(RecordKeeper &RK, raw_ostream &OS);
+void EmitSearchableTables(RecordKeeper &RK, raw_ostream &OS);
+void EmitGlobalISel(RecordKeeper &RK, raw_ostream &OS);
+void EmitGICombiner(RecordKeeper &RK, raw_ostream &OS);
+void EmitX86EVEX2VEXTables(RecordKeeper &RK, raw_ostream &OS);
+void EmitX86FoldTables(RecordKeeper &RK, raw_ostream &OS);
+void EmitX86MnemonicTables(RecordKeeper &RK, raw_ostream &OS);
+void EmitRegisterBank(RecordKeeper &RK, raw_ostream &OS);
+void EmitExegesis(RecordKeeper &RK, raw_ostream &OS);
+void EmitAutomata(RecordKeeper &RK, raw_ostream &OS);
+void EmitDirectivesDecl(RecordKeeper &RK, raw_ostream &OS);
+void EmitDirectivesImpl(RecordKeeper &RK, raw_ostream &OS);
+void EmitDXILOperation(RecordKeeper &RK, raw_ostream &OS);
+void EmitRISCVTargetDef(const RecordKeeper &RK, raw_ostream &OS);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/libs/llvm16/utils/TableGen/Types.cpp b/contrib/libs/llvm16/utils/TableGen/Types.cpp
new file mode 100644
index 0000000000..a6682da90e
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/Types.cpp
@@ -0,0 +1,44 @@
+//===- Types.cpp - Helper for the selection of C++ data types. ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "Types.h"
+
+// For LLVM_ATTRIBUTE_UNUSED
+#include "llvm/Support/Compiler.h"
+
+#include <cassert>
+
+using namespace llvm;
+
+const char *llvm::getMinimalTypeForRange(uint64_t Range, unsigned MaxSize LLVM_ATTRIBUTE_UNUSED) {
+ // TODO: The original callers only used 32 and 64 so these are the only
+ // values permitted. Rather than widen the supported values we should
+ // allow 64 for the callers that currently use 32 and remove the
+ // argument altogether.
+ assert((MaxSize == 32 || MaxSize == 64) && "Unexpected size");
+ assert(MaxSize <= 64 && "Unexpected size");
+ assert(((MaxSize > 32) ? Range <= 0xFFFFFFFFFFFFFFFFULL
+ : Range <= 0xFFFFFFFFULL) &&
+ "Enum too large");
+
+ if (Range > 0xFFFFFFFFULL)
+ return "uint64_t";
+ if (Range > 0xFFFF)
+ return "uint32_t";
+ if (Range > 0xFF)
+ return "uint16_t";
+ return "uint8_t";
+}
+
+const char *llvm::getMinimalTypeForEnumBitfield(uint64_t Size) {
+ uint64_t MaxIndex = Size;
+ if (MaxIndex > 0)
+ MaxIndex--;
+ assert(MaxIndex <= 64 && "Too many bits");
+ return getMinimalTypeForRange(1ULL << MaxIndex);
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/Types.h b/contrib/libs/llvm16/utils/TableGen/Types.h
new file mode 100644
index 0000000000..17c7742cca
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/Types.h
@@ -0,0 +1,24 @@
+//===- Types.h - Helper for the selection of C++ types. ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_TYPES_H
+#define LLVM_UTILS_TABLEGEN_TYPES_H
+
+#include <cstdint>
+
+namespace llvm {
+/// Returns the smallest unsigned integer type that can hold the given range.
+/// MaxSize indicates the largest size of integer to consider (in bits) and only
+/// supports values of at least 32.
+const char *getMinimalTypeForRange(uint64_t Range, unsigned MaxSize = 64);
+
+/// Returns the smallest unsigned integer type that can hold the given bitfield.
+const char *getMinimalTypeForEnumBitfield(uint64_t Size);
+}
+
+#endif
diff --git a/contrib/libs/llvm16/utils/TableGen/VarLenCodeEmitterGen.cpp b/contrib/libs/llvm16/utils/TableGen/VarLenCodeEmitterGen.cpp
new file mode 100644
index 0000000000..2c1acd8d91
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/VarLenCodeEmitterGen.cpp
@@ -0,0 +1,513 @@
+//===- VarLenCodeEmitterGen.cpp - CEG for variable-length insts -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// The CodeEmitterGen component for variable-length instructions.
+//
+// The basic CodeEmitterGen is almost exclusively designed for fixed-
+// length instructions. A good analogy for its encoding scheme is how printf
+// works: The (immutable) formatting string represent the fixed values in the
+// encoded instruction. Placeholders (i.e. %something), on the other hand,
+// represent encoding for instruction operands.
+// ```
+// printf("1101 %src 1001 %dst", <encoded value for operand `src`>,
+// <encoded value for operand `dst`>);
+// ```
+// VarLenCodeEmitterGen in this file provides an alternative encoding scheme
+// that works more like a C++ stream operator:
+// ```
+// OS << 0b1101;
+// if (Cond)
+// OS << OperandEncoding0;
+// OS << 0b1001 << OperandEncoding1;
+// ```
+// You are free to concatenate arbitrary types (and sizes) of encoding
+// fragments on any bit position, bringing more flexibilities on defining
+// encoding for variable-length instructions.
+//
+// In a more specific way, instruction encoding is represented by a DAG type
+// `Inst` field. Here is an example:
+// ```
+// dag Inst = (descend 0b1101, (operand "$src", 4), 0b1001,
+// (operand "$dst", 4));
+// ```
+// It represents the following instruction encoding:
+// ```
+// MSB LSB
+// 1101<encoding for operand src>1001<encoding for operand dst>
+// ```
+// For more details about DAG operators in the above snippet, please
+// refer to \file include/llvm/Target/Target.td.
+//
+// VarLenCodeEmitter will convert the above DAG into the same helper function
+// generated by CodeEmitter, `MCCodeEmitter::getBinaryCodeForInstr` (except
+// for few details).
+//
+//===----------------------------------------------------------------------===//
+
+#include "VarLenCodeEmitterGen.h"
+#include "CodeGenHwModes.h"
+#include "CodeGenInstruction.h"
+#include "CodeGenTarget.h"
+#include "InfoByHwMode.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/Error.h"
+
+using namespace llvm;
+
+namespace {
+
+class VarLenCodeEmitterGen {
+ RecordKeeper &Records;
+
+ DenseMap<Record *, VarLenInst> VarLenInsts;
+
+ // Emit based values (i.e. fixed bits in the encoded instructions)
+ void emitInstructionBaseValues(
+ raw_ostream &OS,
+ ArrayRef<const CodeGenInstruction *> NumberedInstructions,
+ CodeGenTarget &Target, int HwMode = -1);
+
+ std::string getInstructionCase(Record *R, CodeGenTarget &Target);
+ std::string getInstructionCaseForEncoding(Record *R, Record *EncodingDef,
+ CodeGenTarget &Target);
+
+public:
+ explicit VarLenCodeEmitterGen(RecordKeeper &R) : Records(R) {}
+
+ void run(raw_ostream &OS);
+};
+} // end anonymous namespace
+
+// Get the name of custom encoder or decoder, if there is any.
+// Returns `{encoder name, decoder name}`.
+static std::pair<StringRef, StringRef> getCustomCoders(ArrayRef<Init *> Args) {
+ std::pair<StringRef, StringRef> Result;
+ for (const auto *Arg : Args) {
+ const auto *DI = dyn_cast<DagInit>(Arg);
+ if (!DI)
+ continue;
+ const Init *Op = DI->getOperator();
+ if (!isa<DefInit>(Op))
+ continue;
+ // syntax: `(<encoder | decoder> "function name")`
+ StringRef OpName = cast<DefInit>(Op)->getDef()->getName();
+ if (OpName != "encoder" && OpName != "decoder")
+ continue;
+ if (!DI->getNumArgs() || !isa<StringInit>(DI->getArg(0)))
+ PrintFatalError("expected '" + OpName +
+ "' directive to be followed by a custom function name.");
+ StringRef FuncName = cast<StringInit>(DI->getArg(0))->getValue();
+ if (OpName == "encoder")
+ Result.first = FuncName;
+ else
+ Result.second = FuncName;
+ }
+ return Result;
+}
+
+VarLenInst::VarLenInst(const DagInit *DI, const RecordVal *TheDef)
+ : TheDef(TheDef), NumBits(0U) {
+ buildRec(DI);
+ for (const auto &S : Segments)
+ NumBits += S.BitWidth;
+}
+
+void VarLenInst::buildRec(const DagInit *DI) {
+ assert(TheDef && "The def record is nullptr ?");
+
+ std::string Op = DI->getOperator()->getAsString();
+
+ if (Op == "ascend" || Op == "descend") {
+ bool Reverse = Op == "descend";
+ int i = Reverse ? DI->getNumArgs() - 1 : 0;
+ int e = Reverse ? -1 : DI->getNumArgs();
+ int s = Reverse ? -1 : 1;
+ for (; i != e; i += s) {
+ const Init *Arg = DI->getArg(i);
+ if (const auto *BI = dyn_cast<BitsInit>(Arg)) {
+ if (!BI->isComplete())
+ PrintFatalError(TheDef->getLoc(),
+ "Expecting complete bits init in `" + Op + "`");
+ Segments.push_back({BI->getNumBits(), BI});
+ } else if (const auto *BI = dyn_cast<BitInit>(Arg)) {
+ if (!BI->isConcrete())
+ PrintFatalError(TheDef->getLoc(),
+ "Expecting concrete bit init in `" + Op + "`");
+ Segments.push_back({1, BI});
+ } else if (const auto *SubDI = dyn_cast<DagInit>(Arg)) {
+ buildRec(SubDI);
+ } else {
+ PrintFatalError(TheDef->getLoc(), "Unrecognized type of argument in `" +
+ Op + "`: " + Arg->getAsString());
+ }
+ }
+ } else if (Op == "operand") {
+ // (operand <operand name>, <# of bits>,
+ // [(encoder <custom encoder>)][, (decoder <custom decoder>)])
+ if (DI->getNumArgs() < 2)
+ PrintFatalError(TheDef->getLoc(),
+ "Expecting at least 2 arguments for `operand`");
+ HasDynamicSegment = true;
+ const Init *OperandName = DI->getArg(0), *NumBits = DI->getArg(1);
+ if (!isa<StringInit>(OperandName) || !isa<IntInit>(NumBits))
+ PrintFatalError(TheDef->getLoc(), "Invalid argument types for `operand`");
+
+ auto NumBitsVal = cast<IntInit>(NumBits)->getValue();
+ if (NumBitsVal <= 0)
+ PrintFatalError(TheDef->getLoc(), "Invalid number of bits for `operand`");
+
+ auto [CustomEncoder, CustomDecoder] =
+ getCustomCoders(DI->getArgs().slice(2));
+ Segments.push_back({static_cast<unsigned>(NumBitsVal), OperandName,
+ CustomEncoder, CustomDecoder});
+ } else if (Op == "slice") {
+ // (slice <operand name>, <high / low bit>, <low / high bit>,
+ // [(encoder <custom encoder>)][, (decoder <custom decoder>)])
+ if (DI->getNumArgs() < 3)
+ PrintFatalError(TheDef->getLoc(),
+ "Expecting at least 3 arguments for `slice`");
+ HasDynamicSegment = true;
+ Init *OperandName = DI->getArg(0), *HiBit = DI->getArg(1),
+ *LoBit = DI->getArg(2);
+ if (!isa<StringInit>(OperandName) || !isa<IntInit>(HiBit) ||
+ !isa<IntInit>(LoBit))
+ PrintFatalError(TheDef->getLoc(), "Invalid argument types for `slice`");
+
+ auto HiBitVal = cast<IntInit>(HiBit)->getValue(),
+ LoBitVal = cast<IntInit>(LoBit)->getValue();
+ if (HiBitVal < 0 || LoBitVal < 0)
+ PrintFatalError(TheDef->getLoc(), "Invalid bit range for `slice`");
+ bool NeedSwap = false;
+ unsigned NumBits = 0U;
+ if (HiBitVal < LoBitVal) {
+ NeedSwap = true;
+ NumBits = static_cast<unsigned>(LoBitVal - HiBitVal + 1);
+ } else {
+ NumBits = static_cast<unsigned>(HiBitVal - LoBitVal + 1);
+ }
+
+ auto [CustomEncoder, CustomDecoder] =
+ getCustomCoders(DI->getArgs().slice(3));
+
+ if (NeedSwap) {
+ // Normalization: Hi bit should always be the second argument.
+ Init *const NewArgs[] = {OperandName, LoBit, HiBit};
+ Segments.push_back({NumBits,
+ DagInit::get(DI->getOperator(), nullptr, NewArgs, {}),
+ CustomEncoder, CustomDecoder});
+ } else {
+ Segments.push_back({NumBits, DI, CustomEncoder, CustomDecoder});
+ }
+ }
+}
+
+void VarLenCodeEmitterGen::run(raw_ostream &OS) {
+ CodeGenTarget Target(Records);
+ auto Insts = Records.getAllDerivedDefinitions("Instruction");
+
+ auto NumberedInstructions = Target.getInstructionsByEnumValue();
+ const CodeGenHwModes &HWM = Target.getHwModes();
+
+ // The set of HwModes used by instruction encodings.
+ std::set<unsigned> HwModes;
+ for (const CodeGenInstruction *CGI : NumberedInstructions) {
+ Record *R = CGI->TheDef;
+
+ // Create the corresponding VarLenInst instance.
+ if (R->getValueAsString("Namespace") == "TargetOpcode" ||
+ R->getValueAsBit("isPseudo"))
+ continue;
+
+ if (const RecordVal *RV = R->getValue("EncodingInfos")) {
+ if (auto *DI = dyn_cast_or_null<DefInit>(RV->getValue())) {
+ EncodingInfoByHwMode EBM(DI->getDef(), HWM);
+ for (auto &KV : EBM) {
+ HwModes.insert(KV.first);
+ Record *EncodingDef = KV.second;
+ RecordVal *RV = EncodingDef->getValue("Inst");
+ DagInit *DI = cast<DagInit>(RV->getValue());
+ VarLenInsts.insert({EncodingDef, VarLenInst(DI, RV)});
+ }
+ continue;
+ }
+ }
+ RecordVal *RV = R->getValue("Inst");
+ DagInit *DI = cast<DagInit>(RV->getValue());
+ VarLenInsts.insert({R, VarLenInst(DI, RV)});
+ }
+
+ // Emit function declaration
+ OS << "void " << Target.getName()
+ << "MCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,\n"
+ << " SmallVectorImpl<MCFixup> &Fixups,\n"
+ << " APInt &Inst,\n"
+ << " APInt &Scratch,\n"
+ << " const MCSubtargetInfo &STI) const {\n";
+
+ // Emit instruction base values
+ if (HwModes.empty()) {
+ emitInstructionBaseValues(OS, NumberedInstructions, Target);
+ } else {
+ for (unsigned HwMode : HwModes)
+ emitInstructionBaseValues(OS, NumberedInstructions, Target, (int)HwMode);
+ }
+
+ if (!HwModes.empty()) {
+ OS << " const unsigned **Index;\n";
+ OS << " const uint64_t *InstBits;\n";
+ OS << " unsigned HwMode = STI.getHwMode();\n";
+ OS << " switch (HwMode) {\n";
+ OS << " default: llvm_unreachable(\"Unknown hardware mode!\"); break;\n";
+ for (unsigned I : HwModes) {
+ OS << " case " << I << ": InstBits = InstBits_" << HWM.getMode(I).Name
+ << "; Index = Index_" << HWM.getMode(I).Name << "; break;\n";
+ }
+ OS << " };\n";
+ }
+
+ // Emit helper function to retrieve base values.
+ OS << " auto getInstBits = [&](unsigned Opcode) -> APInt {\n"
+ << " unsigned NumBits = Index[Opcode][0];\n"
+ << " if (!NumBits)\n"
+ << " return APInt::getZeroWidth();\n"
+ << " unsigned Idx = Index[Opcode][1];\n"
+ << " ArrayRef<uint64_t> Data(&InstBits[Idx], "
+ << "APInt::getNumWords(NumBits));\n"
+ << " return APInt(NumBits, Data);\n"
+ << " };\n";
+
+ // Map to accumulate all the cases.
+ std::map<std::string, std::vector<std::string>> CaseMap;
+
+ // Construct all cases statement for each opcode
+ for (Record *R : Insts) {
+ if (R->getValueAsString("Namespace") == "TargetOpcode" ||
+ R->getValueAsBit("isPseudo"))
+ continue;
+ std::string InstName =
+ (R->getValueAsString("Namespace") + "::" + R->getName()).str();
+ std::string Case = getInstructionCase(R, Target);
+
+ CaseMap[Case].push_back(std::move(InstName));
+ }
+
+ // Emit initial function code
+ OS << " const unsigned opcode = MI.getOpcode();\n"
+ << " switch (opcode) {\n";
+
+ // Emit each case statement
+ for (const auto &C : CaseMap) {
+ const std::string &Case = C.first;
+ const auto &InstList = C.second;
+
+ ListSeparator LS("\n");
+ for (const auto &InstName : InstList)
+ OS << LS << " case " << InstName << ":";
+
+ OS << " {\n";
+ OS << Case;
+ OS << " break;\n"
+ << " }\n";
+ }
+ // Default case: unhandled opcode
+ OS << " default:\n"
+ << " std::string msg;\n"
+ << " raw_string_ostream Msg(msg);\n"
+ << " Msg << \"Not supported instr: \" << MI;\n"
+ << " report_fatal_error(Msg.str().c_str());\n"
+ << " }\n";
+ OS << "}\n\n";
+}
+
+static void emitInstBits(raw_ostream &IS, raw_ostream &SS, const APInt &Bits,
+ unsigned &Index) {
+ if (!Bits.getNumWords()) {
+ IS.indent(4) << "{/*NumBits*/0, /*Index*/0},";
+ return;
+ }
+
+ IS.indent(4) << "{/*NumBits*/" << Bits.getBitWidth() << ", "
+ << "/*Index*/" << Index << "},";
+
+ SS.indent(4);
+ for (unsigned I = 0; I < Bits.getNumWords(); ++I, ++Index)
+ SS << "UINT64_C(" << utostr(Bits.getRawData()[I]) << "),";
+}
+
+void VarLenCodeEmitterGen::emitInstructionBaseValues(
+ raw_ostream &OS, ArrayRef<const CodeGenInstruction *> NumberedInstructions,
+ CodeGenTarget &Target, int HwMode) {
+ std::string IndexArray, StorageArray;
+ raw_string_ostream IS(IndexArray), SS(StorageArray);
+
+ const CodeGenHwModes &HWM = Target.getHwModes();
+ if (HwMode == -1) {
+ IS << " static const unsigned Index[][2] = {\n";
+ SS << " static const uint64_t InstBits[] = {\n";
+ } else {
+ StringRef Name = HWM.getMode(HwMode).Name;
+ IS << " static const unsigned Index_" << Name << "[][2] = {\n";
+ SS << " static const uint64_t InstBits_" << Name << "[] = {\n";
+ }
+
+ unsigned NumFixedValueWords = 0U;
+ for (const CodeGenInstruction *CGI : NumberedInstructions) {
+ Record *R = CGI->TheDef;
+
+ if (R->getValueAsString("Namespace") == "TargetOpcode" ||
+ R->getValueAsBit("isPseudo")) {
+ IS.indent(4) << "{/*NumBits*/0, /*Index*/0},\n";
+ continue;
+ }
+
+ Record *EncodingDef = R;
+ if (const RecordVal *RV = R->getValue("EncodingInfos")) {
+ if (auto *DI = dyn_cast_or_null<DefInit>(RV->getValue())) {
+ EncodingInfoByHwMode EBM(DI->getDef(), HWM);
+ if (EBM.hasMode(HwMode))
+ EncodingDef = EBM.get(HwMode);
+ }
+ }
+
+ auto It = VarLenInsts.find(EncodingDef);
+ if (It == VarLenInsts.end())
+ PrintFatalError(EncodingDef, "VarLenInst not found for this record");
+ const VarLenInst &VLI = It->second;
+
+ unsigned i = 0U, BitWidth = VLI.size();
+
+ // Start by filling in fixed values.
+ APInt Value(BitWidth, 0);
+ auto SI = VLI.begin(), SE = VLI.end();
+ // Scan through all the segments that have fixed-bits values.
+ while (i < BitWidth && SI != SE) {
+ unsigned SegmentNumBits = SI->BitWidth;
+ if (const auto *BI = dyn_cast<BitsInit>(SI->Value)) {
+ for (unsigned Idx = 0U; Idx != SegmentNumBits; ++Idx) {
+ auto *B = cast<BitInit>(BI->getBit(Idx));
+ Value.setBitVal(i + Idx, B->getValue());
+ }
+ }
+ if (const auto *BI = dyn_cast<BitInit>(SI->Value))
+ Value.setBitVal(i, BI->getValue());
+
+ i += SegmentNumBits;
+ ++SI;
+ }
+
+ emitInstBits(IS, SS, Value, NumFixedValueWords);
+ IS << '\t' << "// " << R->getName() << "\n";
+ if (Value.getNumWords())
+ SS << '\t' << "// " << R->getName() << "\n";
+ }
+ IS.indent(4) << "{/*NumBits*/0, /*Index*/0}\n };\n";
+ SS.indent(4) << "UINT64_C(0)\n };\n";
+
+ OS << IS.str() << SS.str();
+}
+
+std::string VarLenCodeEmitterGen::getInstructionCase(Record *R,
+ CodeGenTarget &Target) {
+ std::string Case;
+ if (const RecordVal *RV = R->getValue("EncodingInfos")) {
+ if (auto *DI = dyn_cast_or_null<DefInit>(RV->getValue())) {
+ const CodeGenHwModes &HWM = Target.getHwModes();
+ EncodingInfoByHwMode EBM(DI->getDef(), HWM);
+ Case += " switch (HwMode) {\n";
+ Case += " default: llvm_unreachable(\"Unhandled HwMode\");\n";
+ for (auto &KV : EBM) {
+ Case += " case " + itostr(KV.first) + ": {\n";
+ Case += getInstructionCaseForEncoding(R, KV.second, Target);
+ Case += " break;\n";
+ Case += " }\n";
+ }
+ Case += " }\n";
+ return Case;
+ }
+ }
+ return getInstructionCaseForEncoding(R, R, Target);
+}
+
+std::string VarLenCodeEmitterGen::getInstructionCaseForEncoding(
+ Record *R, Record *EncodingDef, CodeGenTarget &Target) {
+ auto It = VarLenInsts.find(EncodingDef);
+ if (It == VarLenInsts.end())
+ PrintFatalError(EncodingDef, "Parsed encoding record not found");
+ const VarLenInst &VLI = It->second;
+ size_t BitWidth = VLI.size();
+
+ CodeGenInstruction &CGI = Target.getInstruction(R);
+
+ std::string Case;
+ raw_string_ostream SS(Case);
+ // Resize the scratch buffer.
+ if (BitWidth && !VLI.isFixedValueOnly())
+ SS.indent(6) << "Scratch = Scratch.zext(" << BitWidth << ");\n";
+ // Populate based value.
+ SS.indent(6) << "Inst = getInstBits(opcode);\n";
+
+ // Process each segment in VLI.
+ size_t Offset = 0U;
+ for (const auto &ES : VLI) {
+ unsigned NumBits = ES.BitWidth;
+ const Init *Val = ES.Value;
+ // If it's a StringInit or DagInit, it's a reference to an operand
+ // or part of an operand.
+ if (isa<StringInit>(Val) || isa<DagInit>(Val)) {
+ StringRef OperandName;
+ unsigned LoBit = 0U;
+ if (const auto *SV = dyn_cast<StringInit>(Val)) {
+ OperandName = SV->getValue();
+ } else {
+ // Normalized: (slice <operand name>, <high bit>, <low bit>)
+ const auto *DV = cast<DagInit>(Val);
+ OperandName = cast<StringInit>(DV->getArg(0))->getValue();
+ LoBit = static_cast<unsigned>(cast<IntInit>(DV->getArg(2))->getValue());
+ }
+
+ auto OpIdx = CGI.Operands.ParseOperandName(OperandName);
+ unsigned FlatOpIdx = CGI.Operands.getFlattenedOperandNumber(OpIdx);
+ StringRef CustomEncoder =
+ CGI.Operands[OpIdx.first].EncoderMethodNames[OpIdx.second];
+ if (ES.CustomEncoder.size())
+ CustomEncoder = ES.CustomEncoder;
+
+ SS.indent(6) << "Scratch.clearAllBits();\n";
+ SS.indent(6) << "// op: " << OperandName.drop_front(1) << "\n";
+ if (CustomEncoder.empty())
+ SS.indent(6) << "getMachineOpValue(MI, MI.getOperand("
+ << utostr(FlatOpIdx) << ")";
+ else
+ SS.indent(6) << CustomEncoder << "(MI, /*OpIdx=*/" << utostr(FlatOpIdx);
+
+ SS << ", /*Pos=*/" << utostr(Offset) << ", Scratch, Fixups, STI);\n";
+
+ SS.indent(6) << "Inst.insertBits("
+ << "Scratch.extractBits(" << utostr(NumBits) << ", "
+ << utostr(LoBit) << ")"
+ << ", " << Offset << ");\n";
+ }
+ Offset += NumBits;
+ }
+
+ StringRef PostEmitter = R->getValueAsString("PostEncoderMethod");
+ if (!PostEmitter.empty())
+ SS.indent(6) << "Inst = " << PostEmitter << "(MI, Inst, STI);\n";
+
+ return Case;
+}
+
+namespace llvm {
+
+void emitVarLenCodeEmitter(RecordKeeper &R, raw_ostream &OS) {
+ VarLenCodeEmitterGen(R).run(OS);
+}
+
+} // end namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/VarLenCodeEmitterGen.h b/contrib/libs/llvm16/utils/TableGen/VarLenCodeEmitterGen.h
new file mode 100644
index 0000000000..2b55fd1720
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/VarLenCodeEmitterGen.h
@@ -0,0 +1,59 @@
+//===- VarLenCodeEmitterGen.h - CEG for variable-length insts ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declare the CodeEmitterGen component for variable-length
+// instructions. See the .cpp file for more details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_VARLENCODEEMITTERGEN_H
+#define LLVM_UTILS_TABLEGEN_VARLENCODEEMITTERGEN_H
+
+#include "llvm/TableGen/Record.h"
+
+namespace llvm {
+
+struct EncodingSegment {
+ unsigned BitWidth;
+ const Init *Value;
+ StringRef CustomEncoder = "";
+ StringRef CustomDecoder = "";
+};
+
+class VarLenInst {
+ const RecordVal *TheDef;
+ size_t NumBits;
+
+ // Set if any of the segment is not fixed value.
+ bool HasDynamicSegment;
+
+ SmallVector<EncodingSegment, 4> Segments;
+
+ void buildRec(const DagInit *DI);
+
+public:
+ VarLenInst() : TheDef(nullptr), NumBits(0U), HasDynamicSegment(false) {}
+
+ explicit VarLenInst(const DagInit *DI, const RecordVal *TheDef);
+
+ /// Number of bits
+ size_t size() const { return NumBits; }
+
+ using const_iterator = decltype(Segments)::const_iterator;
+
+ const_iterator begin() const { return Segments.begin(); }
+ const_iterator end() const { return Segments.end(); }
+ size_t getNumSegments() const { return Segments.size(); }
+
+ bool isFixedValueOnly() const { return !HasDynamicSegment; }
+};
+
+void emitVarLenCodeEmitter(RecordKeeper &R, raw_ostream &OS);
+
+} // end namespace llvm
+#endif
diff --git a/contrib/libs/llvm16/utils/TableGen/WebAssemblyDisassemblerEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/WebAssemblyDisassemblerEmitter.cpp
new file mode 100644
index 0000000000..dc037e4409
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/WebAssemblyDisassemblerEmitter.cpp
@@ -0,0 +1,175 @@
+//===- WebAssemblyDisassemblerEmitter.cpp - Disassembler tables -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is part of the WebAssembly Disassembler Emitter.
+// It contains the implementation of the disassembler tables.
+// Documentation for the disassembler emitter in general can be found in
+// WebAssemblyDisassemblerEmitter.h.
+//
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyDisassemblerEmitter.h"
+#include "CodeGenInstruction.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/Record.h"
+
+namespace llvm {
+
+static constexpr int WebAssemblyInstructionTableSize = 256;
+
+void emitWebAssemblyDisassemblerTables(
+ raw_ostream &OS,
+ const ArrayRef<const CodeGenInstruction *> &NumberedInstructions) {
+ // First lets organize all opcodes by (prefix) byte. Prefix 0 is the
+ // starting table.
+ std::map<unsigned,
+ std::map<unsigned, std::pair<unsigned, const CodeGenInstruction *>>>
+ OpcodeTable;
+ for (unsigned I = 0; I != NumberedInstructions.size(); ++I) {
+ auto &CGI = *NumberedInstructions[I];
+ auto &Def = *CGI.TheDef;
+ if (!Def.getValue("Inst"))
+ continue;
+ auto &Inst = *Def.getValueAsBitsInit("Inst");
+ RecordKeeper &RK = Inst.getRecordKeeper();
+ unsigned Opc = static_cast<unsigned>(
+ cast<IntInit>(Inst.convertInitializerTo(IntRecTy::get(RK)))
+ ->getValue());
+ if (Opc == 0xFFFFFFFF)
+ continue; // No opcode defined.
+ assert(Opc <= 0xFFFFFF);
+ unsigned Prefix;
+ if (Opc <= 0xFFFF) {
+ Prefix = Opc >> 8;
+ Opc = Opc & 0xFF;
+ } else {
+ Prefix = Opc >> 16;
+ Opc = Opc & 0xFFFF;
+ }
+ auto &CGIP = OpcodeTable[Prefix][Opc];
+ // All wasm instructions have a StackBased field of type string, we only
+ // want the instructions for which this is "true".
+ bool IsStackBased = Def.getValueAsBit("StackBased");
+ if (!IsStackBased)
+ continue;
+ if (CGIP.second) {
+ // We already have an instruction for this slot, so decide which one
+ // should be the canonical one. This determines which variant gets
+ // printed in a disassembly. We want e.g. "call" not "i32.call", and
+ // "end" when we don't know if its "end_loop" or "end_block" etc.
+ bool IsCanonicalExisting = CGIP.second->TheDef->getValueAsBit("IsCanonical");
+ // We already have one marked explicitly as canonical, so keep it.
+ if (IsCanonicalExisting)
+ continue;
+ bool IsCanonicalNew = Def.getValueAsBit("IsCanonical");
+ // If the new one is explicitly marked as canonical, take it.
+ if (!IsCanonicalNew) {
+ // Neither the existing or new instruction is canonical.
+ // Pick the one with the shortest name as heuristic.
+ // Though ideally IsCanonical is always defined for at least one
+ // variant so this never has to apply.
+ if (CGIP.second->AsmString.size() <= CGI.AsmString.size())
+ continue;
+ }
+ }
+ // Set this instruction as the one to use.
+ CGIP = std::make_pair(I, &CGI);
+ }
+ OS << "#include \"MCTargetDesc/WebAssemblyMCTargetDesc.h\"\n";
+ OS << "\n";
+ OS << "namespace llvm {\n\n";
+ OS << "static constexpr int WebAssemblyInstructionTableSize = ";
+ OS << WebAssemblyInstructionTableSize << ";\n\n";
+ OS << "enum EntryType : uint8_t { ";
+ OS << "ET_Unused, ET_Prefix, ET_Instruction };\n\n";
+ OS << "struct WebAssemblyInstruction {\n";
+ OS << " uint16_t Opcode;\n";
+ OS << " EntryType ET;\n";
+ OS << " uint8_t NumOperands;\n";
+ OS << " uint16_t OperandStart;\n";
+ OS << "};\n\n";
+ std::vector<std::string> OperandTable, CurOperandList;
+ // Output one table per prefix.
+ for (auto &PrefixPair : OpcodeTable) {
+ if (PrefixPair.second.empty())
+ continue;
+ OS << "WebAssemblyInstruction InstructionTable" << PrefixPair.first;
+ OS << "[] = {\n";
+ for (unsigned I = 0; I < WebAssemblyInstructionTableSize; I++) {
+ auto InstIt = PrefixPair.second.find(I);
+ if (InstIt != PrefixPair.second.end()) {
+ // Regular instruction.
+ assert(InstIt->second.second);
+ auto &CGI = *InstIt->second.second;
+ OS << " // 0x";
+ OS.write_hex(static_cast<unsigned long long>(I));
+ OS << ": " << CGI.AsmString << "\n";
+ OS << " { " << InstIt->second.first << ", ET_Instruction, ";
+ OS << CGI.Operands.OperandList.size() << ", ";
+ // Collect operand types for storage in a shared list.
+ CurOperandList.clear();
+ for (auto &Op : CGI.Operands.OperandList) {
+ assert(Op.OperandType != "MCOI::OPERAND_UNKNOWN");
+ CurOperandList.push_back(Op.OperandType);
+ }
+ // See if we already have stored this sequence before. This is not
+ // strictly necessary but makes the table really small.
+ size_t OperandStart = OperandTable.size();
+ if (CurOperandList.size() <= OperandTable.size()) {
+ for (size_t J = 0; J <= OperandTable.size() - CurOperandList.size();
+ ++J) {
+ size_t K = 0;
+ for (; K < CurOperandList.size(); ++K) {
+ if (OperandTable[J + K] != CurOperandList[K]) break;
+ }
+ if (K == CurOperandList.size()) {
+ OperandStart = J;
+ break;
+ }
+ }
+ }
+ // Store operands if no prior occurrence.
+ if (OperandStart == OperandTable.size()) {
+ llvm::append_range(OperandTable, CurOperandList);
+ }
+ OS << OperandStart;
+ } else {
+ auto PrefixIt = OpcodeTable.find(I);
+ // If we have a non-empty table for it that's not 0, this is a prefix.
+ if (PrefixIt != OpcodeTable.end() && I && !PrefixPair.first) {
+ OS << " { 0, ET_Prefix, 0, 0";
+ } else {
+ OS << " { 0, ET_Unused, 0, 0";
+ }
+ }
+ OS << " },\n";
+ }
+ OS << "};\n\n";
+ }
+ // Create a table of all operands:
+ OS << "const uint8_t OperandTable[] = {\n";
+ for (auto &Op : OperandTable) {
+ OS << " " << Op << ",\n";
+ }
+ OS << "};\n\n";
+ // Create a table of all extension tables:
+ OS << "struct { uint8_t Prefix; const WebAssemblyInstruction *Table; }\n";
+ OS << "PrefixTable[] = {\n";
+ for (auto &PrefixPair : OpcodeTable) {
+ if (PrefixPair.second.empty() || !PrefixPair.first)
+ continue;
+ OS << " { " << PrefixPair.first << ", InstructionTable"
+ << PrefixPair.first;
+ OS << " },\n";
+ }
+ OS << " { 0, nullptr }\n};\n\n";
+ OS << "} // end namespace llvm\n";
+}
+
+} // namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/WebAssemblyDisassemblerEmitter.h b/contrib/libs/llvm16/utils/TableGen/WebAssemblyDisassemblerEmitter.h
new file mode 100644
index 0000000000..aba3a4bfd3
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/WebAssemblyDisassemblerEmitter.h
@@ -0,0 +1,30 @@
+//===- WebAssemblyDisassemblerEmitter.h - Disassembler tables ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is part of the WebAssembly Disassembler Emitter.
+// It contains the interface of the disassembler tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_WEBASSEMBLYDISASSEMBLEREMITTER_H
+#define LLVM_UTILS_TABLEGEN_WEBASSEMBLYDISASSEMBLEREMITTER_H
+
+#include "llvm/ADT/ArrayRef.h"
+
+namespace llvm {
+
+class CodeGenInstruction;
+class raw_ostream;
+
+void emitWebAssemblyDisassemblerTables(
+ raw_ostream &OS,
+ const ArrayRef<const CodeGenInstruction *> &NumberedInstructions);
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/libs/llvm16/utils/TableGen/X86DisassemblerShared.h b/contrib/libs/llvm16/utils/TableGen/X86DisassemblerShared.h
new file mode 100644
index 0000000000..093f220fda
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/X86DisassemblerShared.h
@@ -0,0 +1,57 @@
+//===- X86DisassemblerShared.h - Emitter shared header ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_X86DISASSEMBLERSHARED_H
+#define LLVM_UTILS_TABLEGEN_X86DISASSEMBLERSHARED_H
+
+#include <cstring>
+#include <string>
+
+#include "llvm/Support/X86DisassemblerDecoderCommon.h"
+
+struct InstructionSpecifier {
+ llvm::X86Disassembler::OperandSpecifier
+ operands[llvm::X86Disassembler::X86_MAX_OPERANDS];
+ llvm::X86Disassembler::InstructionContext insnContext;
+ std::string name;
+
+ InstructionSpecifier() {
+ insnContext = llvm::X86Disassembler::IC;
+ name = "";
+ memset(operands, 0, sizeof(operands));
+ }
+};
+
+/// Specifies whether a ModR/M byte is needed and (if so) which
+/// instruction each possible value of the ModR/M byte corresponds to. Once
+/// this information is known, we have narrowed down to a single instruction.
+struct ModRMDecision {
+ uint8_t modrm_type;
+ llvm::X86Disassembler::InstrUID instructionIDs[256];
+};
+
+/// Specifies which set of ModR/M->instruction tables to look at
+/// given a particular opcode.
+struct OpcodeDecision {
+ ModRMDecision modRMDecisions[256];
+};
+
+/// Specifies which opcode->instruction tables to look at given
+/// a particular context (set of attributes). Since there are many possible
+/// contexts, the decoder first uses CONTEXTS_SYM to determine which context
+/// applies given a specific set of attributes. Hence there are only IC_max
+/// entries in this table, rather than 2^(ATTR_max).
+struct ContextDecision {
+ OpcodeDecision opcodeDecisions[llvm::X86Disassembler::IC_max];
+
+ ContextDecision() {
+ memset(opcodeDecisions, 0, sizeof(opcodeDecisions));
+ }
+};
+
+#endif
diff --git a/contrib/libs/llvm16/utils/TableGen/X86DisassemblerTables.cpp b/contrib/libs/llvm16/utils/TableGen/X86DisassemblerTables.cpp
new file mode 100644
index 0000000000..601591d9f5
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/X86DisassemblerTables.cpp
@@ -0,0 +1,1089 @@
+//===- X86DisassemblerTables.cpp - Disassembler tables ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is part of the X86 Disassembler Emitter.
+// It contains the implementation of the disassembler tables.
+// Documentation for the disassembler emitter in general can be found in
+// X86DisassemblerEmitter.h.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86DisassemblerTables.h"
+#include "X86DisassemblerShared.h"
+#include "X86ModRMFilters.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+#include <map>
+
+using namespace llvm;
+using namespace X86Disassembler;
+
+/// stringForContext - Returns a string containing the name of a particular
+/// InstructionContext, usually for diagnostic purposes.
+///
+/// @param insnContext - The instruction class to transform to a string.
+/// @return - A statically-allocated string constant that contains the
+/// name of the instruction class.
+static inline const char* stringForContext(InstructionContext insnContext) {
+ switch (insnContext) {
+ default:
+ llvm_unreachable("Unhandled instruction class");
+#define ENUM_ENTRY(n, r, d) case n: return #n; break;
+#define ENUM_ENTRY_K_B(n, r, d) ENUM_ENTRY(n, r, d) ENUM_ENTRY(n##_K_B, r, d)\
+ ENUM_ENTRY(n##_KZ, r, d) ENUM_ENTRY(n##_K, r, d) ENUM_ENTRY(n##_B, r, d)\
+ ENUM_ENTRY(n##_KZ_B, r, d)
+ INSTRUCTION_CONTEXTS
+#undef ENUM_ENTRY
+#undef ENUM_ENTRY_K_B
+ }
+}
+
+/// stringForOperandType - Like stringForContext, but for OperandTypes.
+static inline const char* stringForOperandType(OperandType type) {
+ switch (type) {
+ default:
+ llvm_unreachable("Unhandled type");
+#define ENUM_ENTRY(i, d) case i: return #i;
+ TYPES
+#undef ENUM_ENTRY
+ }
+}
+
+/// stringForOperandEncoding - like stringForContext, but for
+/// OperandEncodings.
+static inline const char* stringForOperandEncoding(OperandEncoding encoding) {
+ switch (encoding) {
+ default:
+ llvm_unreachable("Unhandled encoding");
+#define ENUM_ENTRY(i, d) case i: return #i;
+ ENCODINGS
+#undef ENUM_ENTRY
+ }
+}
+
+/// inheritsFrom - Indicates whether all instructions in one class also belong
+/// to another class.
+///
+/// @param child - The class that may be the subset
+/// @param parent - The class that may be the superset
+/// @return - True if child is a subset of parent, false otherwise.
+static inline bool inheritsFrom(InstructionContext child,
+ InstructionContext parent, bool noPrefix = true,
+ bool VEX_LIG = false, bool VEX_WIG = false,
+ bool AdSize64 = false) {
+ if (child == parent)
+ return true;
+
+ switch (parent) {
+ case IC:
+ return(inheritsFrom(child, IC_64BIT, AdSize64) ||
+ (noPrefix && inheritsFrom(child, IC_OPSIZE, noPrefix)) ||
+ inheritsFrom(child, IC_ADSIZE) ||
+ (noPrefix && inheritsFrom(child, IC_XD, noPrefix)) ||
+ (noPrefix && inheritsFrom(child, IC_XS, noPrefix)));
+ case IC_64BIT:
+ return(inheritsFrom(child, IC_64BIT_REXW) ||
+ (noPrefix && inheritsFrom(child, IC_64BIT_OPSIZE, noPrefix)) ||
+ (!AdSize64 && inheritsFrom(child, IC_64BIT_ADSIZE)) ||
+ (noPrefix && inheritsFrom(child, IC_64BIT_XD, noPrefix)) ||
+ (noPrefix && inheritsFrom(child, IC_64BIT_XS, noPrefix)));
+ case IC_OPSIZE:
+ return inheritsFrom(child, IC_64BIT_OPSIZE) ||
+ inheritsFrom(child, IC_OPSIZE_ADSIZE);
+ case IC_ADSIZE:
+ return (noPrefix && inheritsFrom(child, IC_OPSIZE_ADSIZE, noPrefix));
+ case IC_OPSIZE_ADSIZE:
+ return false;
+ case IC_64BIT_ADSIZE:
+ return (noPrefix && inheritsFrom(child, IC_64BIT_OPSIZE_ADSIZE, noPrefix));
+ case IC_64BIT_OPSIZE_ADSIZE:
+ return false;
+ case IC_XD:
+ return inheritsFrom(child, IC_64BIT_XD);
+ case IC_XS:
+ return inheritsFrom(child, IC_64BIT_XS);
+ case IC_XD_OPSIZE:
+ return inheritsFrom(child, IC_64BIT_XD_OPSIZE);
+ case IC_XS_OPSIZE:
+ return inheritsFrom(child, IC_64BIT_XS_OPSIZE);
+ case IC_XD_ADSIZE:
+ return inheritsFrom(child, IC_64BIT_XD_ADSIZE);
+ case IC_XS_ADSIZE:
+ return inheritsFrom(child, IC_64BIT_XS_ADSIZE);
+ case IC_64BIT_REXW:
+ return((noPrefix && inheritsFrom(child, IC_64BIT_REXW_XS, noPrefix)) ||
+ (noPrefix && inheritsFrom(child, IC_64BIT_REXW_XD, noPrefix)) ||
+ (noPrefix && inheritsFrom(child, IC_64BIT_REXW_OPSIZE, noPrefix)) ||
+ (!AdSize64 && inheritsFrom(child, IC_64BIT_REXW_ADSIZE)));
+ case IC_64BIT_OPSIZE:
+ return inheritsFrom(child, IC_64BIT_REXW_OPSIZE) ||
+ (!AdSize64 && inheritsFrom(child, IC_64BIT_OPSIZE_ADSIZE)) ||
+ (!AdSize64 && inheritsFrom(child, IC_64BIT_REXW_ADSIZE));
+ case IC_64BIT_XD:
+ return(inheritsFrom(child, IC_64BIT_REXW_XD) ||
+ (!AdSize64 && inheritsFrom(child, IC_64BIT_XD_ADSIZE)));
+ case IC_64BIT_XS:
+ return(inheritsFrom(child, IC_64BIT_REXW_XS) ||
+ (!AdSize64 && inheritsFrom(child, IC_64BIT_XS_ADSIZE)));
+ case IC_64BIT_XD_OPSIZE:
+ case IC_64BIT_XS_OPSIZE:
+ return false;
+ case IC_64BIT_XD_ADSIZE:
+ case IC_64BIT_XS_ADSIZE:
+ return false;
+ case IC_64BIT_REXW_XD:
+ case IC_64BIT_REXW_XS:
+ case IC_64BIT_REXW_OPSIZE:
+ case IC_64BIT_REXW_ADSIZE:
+ return false;
+ case IC_VEX:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_VEX_L_W)) ||
+ (VEX_WIG && inheritsFrom(child, IC_VEX_W)) ||
+ (VEX_LIG && inheritsFrom(child, IC_VEX_L));
+ case IC_VEX_XS:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_VEX_L_W_XS)) ||
+ (VEX_WIG && inheritsFrom(child, IC_VEX_W_XS)) ||
+ (VEX_LIG && inheritsFrom(child, IC_VEX_L_XS));
+ case IC_VEX_XD:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_VEX_L_W_XD)) ||
+ (VEX_WIG && inheritsFrom(child, IC_VEX_W_XD)) ||
+ (VEX_LIG && inheritsFrom(child, IC_VEX_L_XD));
+ case IC_VEX_OPSIZE:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_VEX_L_W_OPSIZE)) ||
+ (VEX_WIG && inheritsFrom(child, IC_VEX_W_OPSIZE)) ||
+ (VEX_LIG && inheritsFrom(child, IC_VEX_L_OPSIZE));
+ case IC_VEX_W:
+ return VEX_LIG && inheritsFrom(child, IC_VEX_L_W);
+ case IC_VEX_W_XS:
+ return VEX_LIG && inheritsFrom(child, IC_VEX_L_W_XS);
+ case IC_VEX_W_XD:
+ return VEX_LIG && inheritsFrom(child, IC_VEX_L_W_XD);
+ case IC_VEX_W_OPSIZE:
+ return VEX_LIG && inheritsFrom(child, IC_VEX_L_W_OPSIZE);
+ case IC_VEX_L:
+ return VEX_WIG && inheritsFrom(child, IC_VEX_L_W);
+ case IC_VEX_L_XS:
+ return VEX_WIG && inheritsFrom(child, IC_VEX_L_W_XS);
+ case IC_VEX_L_XD:
+ return VEX_WIG && inheritsFrom(child, IC_VEX_L_W_XD);
+ case IC_VEX_L_OPSIZE:
+ return VEX_WIG && inheritsFrom(child, IC_VEX_L_W_OPSIZE);
+ case IC_VEX_L_W:
+ case IC_VEX_L_W_XS:
+ case IC_VEX_L_W_XD:
+ case IC_VEX_L_W_OPSIZE:
+ return false;
+ case IC_EVEX:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W)) ||
+ (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2));
+ case IC_EVEX_XS:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS)) ||
+ (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XS)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_XS)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XS));
+ case IC_EVEX_XD:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD)) ||
+ (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XD)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_XD)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XD));
+ case IC_EVEX_OPSIZE:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE)) ||
+ (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_OPSIZE)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_OPSIZE)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_OPSIZE));
+ case IC_EVEX_K:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_K)) ||
+ (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_K)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_K)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_K)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_K));
+ case IC_EVEX_XS_K:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS_K)) ||
+ (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_K)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XS_K)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_XS_K)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XS_K));
+ case IC_EVEX_XD_K:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD_K)) ||
+ (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_K)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XD_K)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_XD_K)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XD_K));
+ case IC_EVEX_OPSIZE_K:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_K)) ||
+ (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_K)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_OPSIZE_K)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_OPSIZE_K)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_OPSIZE_K));
+ case IC_EVEX_KZ:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_KZ)) ||
+ (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_KZ)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_KZ)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_KZ)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_KZ));
+ case IC_EVEX_XS_KZ:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS_KZ)) ||
+ (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_KZ)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XS_KZ)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_XS_KZ)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XS_KZ));
+ case IC_EVEX_XD_KZ:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD_KZ)) ||
+ (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_KZ)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XD_KZ)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_XD_KZ)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XD_KZ));
+ case IC_EVEX_OPSIZE_KZ:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_KZ)) ||
+ (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_KZ)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_OPSIZE_KZ)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_OPSIZE_KZ)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_OPSIZE_KZ));
+ case IC_EVEX_W:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W));
+ case IC_EVEX_W_XS:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_XS)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_XS));
+ case IC_EVEX_W_XD:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_XD)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_XD));
+ case IC_EVEX_W_OPSIZE:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE));
+ case IC_EVEX_W_K:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_K)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_K));
+ case IC_EVEX_W_XS_K:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_XS_K)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_XS_K));
+ case IC_EVEX_W_XD_K:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_XD_K)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_XD_K));
+ case IC_EVEX_W_OPSIZE_K:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_K)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_K));
+ case IC_EVEX_W_KZ:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_KZ)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_KZ));
+ case IC_EVEX_W_XS_KZ:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_XS_KZ)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_XS_KZ));
+ case IC_EVEX_W_XD_KZ:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_XD_KZ)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_XD_KZ));
+ case IC_EVEX_W_OPSIZE_KZ:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_KZ)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_KZ));
+ case IC_EVEX_L:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W);
+ case IC_EVEX_L_XS:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS);
+ case IC_EVEX_L_XD:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD);
+ case IC_EVEX_L_OPSIZE:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE);
+ case IC_EVEX_L_K:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_K);
+ case IC_EVEX_L_XS_K:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS_K);
+ case IC_EVEX_L_XD_K:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD_K);
+ case IC_EVEX_L_OPSIZE_K:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_K);
+ case IC_EVEX_L_KZ:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_KZ);
+ case IC_EVEX_L_XS_KZ:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS_KZ);
+ case IC_EVEX_L_XD_KZ:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD_KZ);
+ case IC_EVEX_L_OPSIZE_KZ:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_KZ);
+ case IC_EVEX_L_W:
+ case IC_EVEX_L_W_XS:
+ case IC_EVEX_L_W_XD:
+ case IC_EVEX_L_W_OPSIZE:
+ return false;
+ case IC_EVEX_L_W_K:
+ case IC_EVEX_L_W_XS_K:
+ case IC_EVEX_L_W_XD_K:
+ case IC_EVEX_L_W_OPSIZE_K:
+ return false;
+ case IC_EVEX_L_W_KZ:
+ case IC_EVEX_L_W_XS_KZ:
+ case IC_EVEX_L_W_XD_KZ:
+ case IC_EVEX_L_W_OPSIZE_KZ:
+ return false;
+ case IC_EVEX_L2:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W);
+ case IC_EVEX_L2_XS:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS);
+ case IC_EVEX_L2_XD:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD);
+ case IC_EVEX_L2_OPSIZE:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE);
+ case IC_EVEX_L2_K:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_K);
+ case IC_EVEX_L2_XS_K:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_K);
+ case IC_EVEX_L2_XD_K:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_K);
+ case IC_EVEX_L2_OPSIZE_K:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_K);
+ case IC_EVEX_L2_KZ:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_KZ);
+ case IC_EVEX_L2_XS_KZ:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_KZ);
+ case IC_EVEX_L2_XD_KZ:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_KZ);
+ case IC_EVEX_L2_OPSIZE_KZ:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_KZ);
+ case IC_EVEX_L2_W:
+ case IC_EVEX_L2_W_XS:
+ case IC_EVEX_L2_W_XD:
+ case IC_EVEX_L2_W_OPSIZE:
+ return false;
+ case IC_EVEX_L2_W_K:
+ case IC_EVEX_L2_W_XS_K:
+ case IC_EVEX_L2_W_XD_K:
+ case IC_EVEX_L2_W_OPSIZE_K:
+ return false;
+ case IC_EVEX_L2_W_KZ:
+ case IC_EVEX_L2_W_XS_KZ:
+ case IC_EVEX_L2_W_XD_KZ:
+ case IC_EVEX_L2_W_OPSIZE_KZ:
+ return false;
+ case IC_EVEX_B:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_B)) ||
+ (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_B)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_B));
+ case IC_EVEX_XS_B:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS_B)) ||
+ (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_B)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XS_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_XS_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XS_B));
+ case IC_EVEX_XD_B:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD_B)) ||
+ (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_B)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XD_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_XD_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XD_B));
+ case IC_EVEX_OPSIZE_B:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_B)) ||
+ (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_B)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_OPSIZE_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_OPSIZE_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_OPSIZE_B));
+ case IC_EVEX_K_B:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_K_B)) ||
+ (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_K_B)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_K_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_K_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_K_B));
+ case IC_EVEX_XS_K_B:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS_K_B)) ||
+ (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_K_B)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XS_K_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_XS_K_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XS_K_B));
+ case IC_EVEX_XD_K_B:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD_K_B)) ||
+ (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_K_B)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XD_K_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_XD_K_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XD_K_B));
+ case IC_EVEX_OPSIZE_K_B:
+ return (VEX_LIG && VEX_WIG &&
+ inheritsFrom(child, IC_EVEX_L_W_OPSIZE_K_B)) ||
+ (VEX_LIG && VEX_WIG &&
+ inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_K_B)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_OPSIZE_K_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_OPSIZE_K_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_OPSIZE_K_B));
+ case IC_EVEX_KZ_B:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_KZ_B)) ||
+ (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_KZ_B)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_KZ_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_KZ_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_KZ_B));
+ case IC_EVEX_XS_KZ_B:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS_KZ_B)) ||
+ (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_KZ_B)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XS_KZ_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_XS_KZ_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XS_KZ_B));
+ case IC_EVEX_XD_KZ_B:
+ return (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD_KZ_B)) ||
+ (VEX_LIG && VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_KZ_B)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_XD_KZ_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_XD_KZ_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_XD_KZ_B));
+ case IC_EVEX_OPSIZE_KZ_B:
+ return (VEX_LIG && VEX_WIG &&
+ inheritsFrom(child, IC_EVEX_L_W_OPSIZE_KZ_B)) ||
+ (VEX_LIG && VEX_WIG &&
+ inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_KZ_B)) ||
+ (VEX_WIG && inheritsFrom(child, IC_EVEX_W_OPSIZE_KZ_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L_OPSIZE_KZ_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_OPSIZE_KZ_B));
+ case IC_EVEX_W_B:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_B));
+ case IC_EVEX_W_XS_B:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_XS_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_XS_B));
+ case IC_EVEX_W_XD_B:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_XD_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_XD_B));
+ case IC_EVEX_W_OPSIZE_B:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_B));
+ case IC_EVEX_W_K_B:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_K_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_K_B));
+ case IC_EVEX_W_XS_K_B:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_XS_K_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_XS_K_B));
+ case IC_EVEX_W_XD_K_B:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_XD_K_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_XD_K_B));
+ case IC_EVEX_W_OPSIZE_K_B:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_K_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_K_B));
+ case IC_EVEX_W_KZ_B:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_KZ_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_KZ_B));
+ case IC_EVEX_W_XS_KZ_B:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_XS_KZ_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_XS_KZ_B));
+ case IC_EVEX_W_XD_KZ_B:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_XD_KZ_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_XD_KZ_B));
+ case IC_EVEX_W_OPSIZE_KZ_B:
+ return (VEX_LIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_KZ_B)) ||
+ (VEX_LIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_KZ_B));
+ case IC_EVEX_L_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_B);
+ case IC_EVEX_L_XS_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS_B);
+ case IC_EVEX_L_XD_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD_B);
+ case IC_EVEX_L_OPSIZE_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_B);
+ case IC_EVEX_L_K_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_K_B);
+ case IC_EVEX_L_XS_K_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS_K_B);
+ case IC_EVEX_L_XD_K_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD_K_B);
+ case IC_EVEX_L_OPSIZE_K_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_K_B);
+ case IC_EVEX_L_KZ_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_KZ_B);
+ case IC_EVEX_L_XS_KZ_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XS_KZ_B);
+ case IC_EVEX_L_XD_KZ_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_XD_KZ_B);
+ case IC_EVEX_L_OPSIZE_KZ_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L_W_OPSIZE_KZ_B);
+ case IC_EVEX_L_W_B:
+ case IC_EVEX_L_W_XS_B:
+ case IC_EVEX_L_W_XD_B:
+ case IC_EVEX_L_W_OPSIZE_B:
+ return false;
+ case IC_EVEX_L_W_K_B:
+ case IC_EVEX_L_W_XS_K_B:
+ case IC_EVEX_L_W_XD_K_B:
+ case IC_EVEX_L_W_OPSIZE_K_B:
+ return false;
+ case IC_EVEX_L_W_KZ_B:
+ case IC_EVEX_L_W_XS_KZ_B:
+ case IC_EVEX_L_W_XD_KZ_B:
+ case IC_EVEX_L_W_OPSIZE_KZ_B:
+ return false;
+ case IC_EVEX_L2_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_B);
+ case IC_EVEX_L2_XS_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_B);
+ case IC_EVEX_L2_XD_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_B);
+ case IC_EVEX_L2_OPSIZE_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_B);
+ case IC_EVEX_L2_K_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_K_B);
+ case IC_EVEX_L2_XS_K_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_K_B);
+ case IC_EVEX_L2_XD_K_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_K_B);
+ case IC_EVEX_L2_OPSIZE_K_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_K_B);
+ case IC_EVEX_L2_KZ_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_KZ_B);
+ case IC_EVEX_L2_XS_KZ_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XS_KZ_B);
+ case IC_EVEX_L2_XD_KZ_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_XD_KZ_B);
+ case IC_EVEX_L2_OPSIZE_KZ_B:
+ return VEX_WIG && inheritsFrom(child, IC_EVEX_L2_W_OPSIZE_KZ_B);
+ case IC_EVEX_L2_W_B:
+ case IC_EVEX_L2_W_XS_B:
+ case IC_EVEX_L2_W_XD_B:
+ case IC_EVEX_L2_W_OPSIZE_B:
+ return false;
+ case IC_EVEX_L2_W_K_B:
+ case IC_EVEX_L2_W_XS_K_B:
+ case IC_EVEX_L2_W_XD_K_B:
+ case IC_EVEX_L2_W_OPSIZE_K_B:
+ return false;
+ case IC_EVEX_L2_W_KZ_B:
+ case IC_EVEX_L2_W_XS_KZ_B:
+ case IC_EVEX_L2_W_XD_KZ_B:
+ case IC_EVEX_L2_W_OPSIZE_KZ_B:
+ return false;
+ default:
+ errs() << "Unknown instruction class: " <<
+ stringForContext((InstructionContext)parent) << "\n";
+ llvm_unreachable("Unknown instruction class");
+ }
+}
+
+/// outranks - Indicates whether, if an instruction has two different applicable
+/// classes, which class should be preferred when performing decode. This
+/// imposes a total ordering (ties are resolved toward "lower")
+///
+/// @param upper - The class that may be preferable
+/// @param lower - The class that may be less preferable
+/// @return - True if upper is to be preferred, false otherwise.
+static inline bool outranks(InstructionContext upper,
+ InstructionContext lower) {
+ assert(upper < IC_max);
+ assert(lower < IC_max);
+
+#define ENUM_ENTRY(n, r, d) r,
+#define ENUM_ENTRY_K_B(n, r, d) ENUM_ENTRY(n, r, d) \
+ ENUM_ENTRY(n##_K_B, r, d) ENUM_ENTRY(n##_KZ_B, r, d) \
+ ENUM_ENTRY(n##_KZ, r, d) ENUM_ENTRY(n##_K, r, d) ENUM_ENTRY(n##_B, r, d)
+ static int ranks[IC_max] = {
+ INSTRUCTION_CONTEXTS
+ };
+#undef ENUM_ENTRY
+#undef ENUM_ENTRY_K_B
+
+ return (ranks[upper] > ranks[lower]);
+}
+
+/// getDecisionType - Determines whether a ModRM decision with 255 entries can
+/// be compacted by eliminating redundant information.
+///
+/// @param decision - The decision to be compacted.
+/// @return - The compactest available representation for the decision.
+static ModRMDecisionType getDecisionType(ModRMDecision &decision) {
+ bool satisfiesOneEntry = true;
+ bool satisfiesSplitRM = true;
+ bool satisfiesSplitReg = true;
+ bool satisfiesSplitMisc = true;
+
+ for (unsigned index = 0; index < 256; ++index) {
+ if (decision.instructionIDs[index] != decision.instructionIDs[0])
+ satisfiesOneEntry = false;
+
+ if (((index & 0xc0) == 0xc0) &&
+ (decision.instructionIDs[index] != decision.instructionIDs[0xc0]))
+ satisfiesSplitRM = false;
+
+ if (((index & 0xc0) != 0xc0) &&
+ (decision.instructionIDs[index] != decision.instructionIDs[0x00]))
+ satisfiesSplitRM = false;
+
+ if (((index & 0xc0) == 0xc0) &&
+ (decision.instructionIDs[index] != decision.instructionIDs[index&0xf8]))
+ satisfiesSplitReg = false;
+
+ if (((index & 0xc0) != 0xc0) &&
+ (decision.instructionIDs[index] != decision.instructionIDs[index&0x38]))
+ satisfiesSplitMisc = false;
+ }
+
+ if (satisfiesOneEntry)
+ return MODRM_ONEENTRY;
+
+ if (satisfiesSplitRM)
+ return MODRM_SPLITRM;
+
+ if (satisfiesSplitReg && satisfiesSplitMisc)
+ return MODRM_SPLITREG;
+
+ if (satisfiesSplitMisc)
+ return MODRM_SPLITMISC;
+
+ return MODRM_FULL;
+}
+
+/// stringForDecisionType - Returns a statically-allocated string corresponding
+/// to a particular decision type.
+///
+/// @param dt - The decision type.
+/// @return - A pointer to the statically-allocated string (e.g.,
+/// "MODRM_ONEENTRY" for MODRM_ONEENTRY).
+static const char* stringForDecisionType(ModRMDecisionType dt) {
+#define ENUM_ENTRY(n) case n: return #n;
+ switch (dt) {
+ default:
+ llvm_unreachable("Unknown decision type");
+ MODRMTYPES
+ };
+#undef ENUM_ENTRY
+}
+
+DisassemblerTables::DisassemblerTables() {
+ for (unsigned i = 0; i < std::size(Tables); i++)
+ Tables[i] = std::make_unique<ContextDecision>();
+
+ HasConflicts = false;
+}
+
+DisassemblerTables::~DisassemblerTables() {
+}
+
+void DisassemblerTables::emitModRMDecision(raw_ostream &o1, raw_ostream &o2,
+ unsigned &i1, unsigned &i2,
+ unsigned &ModRMTableNum,
+ ModRMDecision &decision) const {
+ static uint32_t sEntryNumber = 1;
+ ModRMDecisionType dt = getDecisionType(decision);
+
+ if (dt == MODRM_ONEENTRY && decision.instructionIDs[0] == 0) {
+ // Empty table.
+ o2 << "{" << stringForDecisionType(dt) << ", 0}";
+ return;
+ }
+
+ std::vector<unsigned> ModRMDecision;
+
+ switch (dt) {
+ default:
+ llvm_unreachable("Unknown decision type");
+ case MODRM_ONEENTRY:
+ ModRMDecision.push_back(decision.instructionIDs[0]);
+ break;
+ case MODRM_SPLITRM:
+ ModRMDecision.push_back(decision.instructionIDs[0x00]);
+ ModRMDecision.push_back(decision.instructionIDs[0xc0]);
+ break;
+ case MODRM_SPLITREG:
+ for (unsigned index = 0; index < 64; index += 8)
+ ModRMDecision.push_back(decision.instructionIDs[index]);
+ for (unsigned index = 0xc0; index < 256; index += 8)
+ ModRMDecision.push_back(decision.instructionIDs[index]);
+ break;
+ case MODRM_SPLITMISC:
+ for (unsigned index = 0; index < 64; index += 8)
+ ModRMDecision.push_back(decision.instructionIDs[index]);
+ for (unsigned index = 0xc0; index < 256; ++index)
+ ModRMDecision.push_back(decision.instructionIDs[index]);
+ break;
+ case MODRM_FULL:
+ for (unsigned short InstructionID : decision.instructionIDs)
+ ModRMDecision.push_back(InstructionID);
+ break;
+ }
+
+ unsigned &EntryNumber = ModRMTable[ModRMDecision];
+ if (EntryNumber == 0) {
+ EntryNumber = ModRMTableNum;
+
+ ModRMTableNum += ModRMDecision.size();
+ o1 << "/*Table" << EntryNumber << "*/\n";
+ i1++;
+ for (unsigned I : ModRMDecision) {
+ o1.indent(i1 * 2) << format("0x%hx", I) << ", /*"
+ << InstructionSpecifiers[I].name << "*/\n";
+ }
+ i1--;
+ }
+
+ o2 << "{" << stringForDecisionType(dt) << ", " << EntryNumber << "}";
+
+ switch (dt) {
+ default:
+ llvm_unreachable("Unknown decision type");
+ case MODRM_ONEENTRY:
+ sEntryNumber += 1;
+ break;
+ case MODRM_SPLITRM:
+ sEntryNumber += 2;
+ break;
+ case MODRM_SPLITREG:
+ sEntryNumber += 16;
+ break;
+ case MODRM_SPLITMISC:
+ sEntryNumber += 8 + 64;
+ break;
+ case MODRM_FULL:
+ sEntryNumber += 256;
+ break;
+ }
+
+ // We assume that the index can fit into uint16_t.
+ assert(sEntryNumber < 65536U &&
+ "Index into ModRMDecision is too large for uint16_t!");
+ (void)sEntryNumber;
+}
+
+void DisassemblerTables::emitOpcodeDecision(raw_ostream &o1, raw_ostream &o2,
+ unsigned &i1, unsigned &i2,
+ unsigned &ModRMTableNum,
+ OpcodeDecision &opDecision) const {
+ o2 << "{";
+ ++i2;
+
+ unsigned index;
+ for (index = 0; index < 256; ++index) {
+ auto &decision = opDecision.modRMDecisions[index];
+ ModRMDecisionType dt = getDecisionType(decision);
+ if (!(dt == MODRM_ONEENTRY && decision.instructionIDs[0] == 0))
+ break;
+ }
+ if (index == 256) {
+ // If all 256 entries are MODRM_ONEENTRY, omit output.
+ static_assert(MODRM_ONEENTRY == 0);
+ --i2;
+ o2 << "},\n";
+ } else {
+ o2 << " /* struct OpcodeDecision */ {\n";
+ for (index = 0; index < 256; ++index) {
+ o2.indent(i2);
+
+ o2 << "/*0x" << format("%02hhx", index) << "*/";
+
+ emitModRMDecision(o1, o2, i1, i2, ModRMTableNum,
+ opDecision.modRMDecisions[index]);
+
+ if (index < 255)
+ o2 << ",";
+
+ o2 << "\n";
+ }
+ o2.indent(i2) << "}\n";
+ --i2;
+ o2.indent(i2) << "},\n";
+ }
+}
+
+void DisassemblerTables::emitContextDecision(raw_ostream &o1, raw_ostream &o2,
+ unsigned &i1, unsigned &i2,
+ unsigned &ModRMTableNum,
+ ContextDecision &decision,
+ const char* name) const {
+ o2.indent(i2) << "static const struct ContextDecision " << name << " = {{/* opcodeDecisions */\n";
+ i2++;
+
+ for (unsigned index = 0; index < IC_max; ++index) {
+ o2.indent(i2) << "/*";
+ o2 << stringForContext((InstructionContext)index);
+ o2 << "*/ ";
+
+ emitOpcodeDecision(o1, o2, i1, i2, ModRMTableNum,
+ decision.opcodeDecisions[index]);
+ }
+
+ i2--;
+ o2.indent(i2) << "}};" << "\n";
+}
+
+void DisassemblerTables::emitInstructionInfo(raw_ostream &o,
+ unsigned &i) const {
+ unsigned NumInstructions = InstructionSpecifiers.size();
+
+ o << "static const struct OperandSpecifier x86OperandSets[]["
+ << X86_MAX_OPERANDS << "] = {\n";
+
+ typedef SmallVector<std::pair<OperandEncoding, OperandType>,
+ X86_MAX_OPERANDS> OperandListTy;
+ std::map<OperandListTy, unsigned> OperandSets;
+
+ unsigned OperandSetNum = 0;
+ for (unsigned Index = 0; Index < NumInstructions; ++Index) {
+ OperandListTy OperandList;
+
+ for (auto Operand : InstructionSpecifiers[Index].operands) {
+ OperandEncoding Encoding = (OperandEncoding)Operand.encoding;
+ OperandType Type = (OperandType)Operand.type;
+ OperandList.push_back(std::make_pair(Encoding, Type));
+ }
+ unsigned &N = OperandSets[OperandList];
+ if (N != 0) continue;
+
+ N = ++OperandSetNum;
+
+ o << " { /* " << (OperandSetNum - 1) << " */\n";
+ for (unsigned i = 0, e = OperandList.size(); i != e; ++i) {
+ const char *Encoding = stringForOperandEncoding(OperandList[i].first);
+ const char *Type = stringForOperandType(OperandList[i].second);
+ o << " { " << Encoding << ", " << Type << " },\n";
+ }
+ o << " },\n";
+ }
+ o << "};" << "\n\n";
+
+ o.indent(i * 2) << "static const struct InstructionSpecifier ";
+ o << INSTRUCTIONS_STR "[" << InstructionSpecifiers.size() << "] = {\n";
+
+ i++;
+
+ for (unsigned index = 0; index < NumInstructions; ++index) {
+ o.indent(i * 2) << "{ /* " << index << " */\n";
+ i++;
+
+ OperandListTy OperandList;
+ for (auto Operand : InstructionSpecifiers[index].operands) {
+ OperandEncoding Encoding = (OperandEncoding)Operand.encoding;
+ OperandType Type = (OperandType)Operand.type;
+ OperandList.push_back(std::make_pair(Encoding, Type));
+ }
+ o.indent(i * 2) << (OperandSets[OperandList] - 1) << ",\n";
+
+ o.indent(i * 2) << "/* " << InstructionSpecifiers[index].name << " */\n";
+
+ i--;
+ o.indent(i * 2) << "},\n";
+ }
+
+ i--;
+ o.indent(i * 2) << "};" << "\n";
+}
+
+void DisassemblerTables::emitContextTable(raw_ostream &o, unsigned &i) const {
+ o.indent(i * 2) << "static const uint8_t " CONTEXTS_STR
+ "[" << ATTR_max << "] = {\n";
+ i++;
+
+ for (unsigned index = 0; index < ATTR_max; ++index) {
+ o.indent(i * 2);
+
+ if ((index & ATTR_EVEX) || (index & ATTR_VEX) || (index & ATTR_VEXL)) {
+ if (index & ATTR_EVEX)
+ o << "IC_EVEX";
+ else
+ o << "IC_VEX";
+
+ if ((index & ATTR_EVEX) && (index & ATTR_EVEXL2))
+ o << "_L2";
+ else if (index & ATTR_VEXL)
+ o << "_L";
+
+ if (index & ATTR_REXW)
+ o << "_W";
+
+ if (index & ATTR_OPSIZE)
+ o << "_OPSIZE";
+ else if (index & ATTR_XD)
+ o << "_XD";
+ else if (index & ATTR_XS)
+ o << "_XS";
+
+ if ((index & ATTR_EVEX)) {
+ if (index & ATTR_EVEXKZ)
+ o << "_KZ";
+ else if (index & ATTR_EVEXK)
+ o << "_K";
+
+ if (index & ATTR_EVEXB)
+ o << "_B";
+ }
+ }
+ else if ((index & ATTR_64BIT) && (index & ATTR_REXW) && (index & ATTR_XS))
+ o << "IC_64BIT_REXW_XS";
+ else if ((index & ATTR_64BIT) && (index & ATTR_REXW) && (index & ATTR_XD))
+ o << "IC_64BIT_REXW_XD";
+ else if ((index & ATTR_64BIT) && (index & ATTR_REXW) &&
+ (index & ATTR_OPSIZE))
+ o << "IC_64BIT_REXW_OPSIZE";
+ else if ((index & ATTR_64BIT) && (index & ATTR_REXW) &&
+ (index & ATTR_ADSIZE))
+ o << "IC_64BIT_REXW_ADSIZE";
+ else if ((index & ATTR_64BIT) && (index & ATTR_XD) && (index & ATTR_OPSIZE))
+ o << "IC_64BIT_XD_OPSIZE";
+ else if ((index & ATTR_64BIT) && (index & ATTR_XD) && (index & ATTR_ADSIZE))
+ o << "IC_64BIT_XD_ADSIZE";
+ else if ((index & ATTR_64BIT) && (index & ATTR_XS) && (index & ATTR_OPSIZE))
+ o << "IC_64BIT_XS_OPSIZE";
+ else if ((index & ATTR_64BIT) && (index & ATTR_XS) && (index & ATTR_ADSIZE))
+ o << "IC_64BIT_XS_ADSIZE";
+ else if ((index & ATTR_64BIT) && (index & ATTR_XS))
+ o << "IC_64BIT_XS";
+ else if ((index & ATTR_64BIT) && (index & ATTR_XD))
+ o << "IC_64BIT_XD";
+ else if ((index & ATTR_64BIT) && (index & ATTR_OPSIZE) &&
+ (index & ATTR_ADSIZE))
+ o << "IC_64BIT_OPSIZE_ADSIZE";
+ else if ((index & ATTR_64BIT) && (index & ATTR_OPSIZE))
+ o << "IC_64BIT_OPSIZE";
+ else if ((index & ATTR_64BIT) && (index & ATTR_ADSIZE))
+ o << "IC_64BIT_ADSIZE";
+ else if ((index & ATTR_64BIT) && (index & ATTR_REXW))
+ o << "IC_64BIT_REXW";
+ else if ((index & ATTR_64BIT))
+ o << "IC_64BIT";
+ else if ((index & ATTR_XS) && (index & ATTR_OPSIZE))
+ o << "IC_XS_OPSIZE";
+ else if ((index & ATTR_XD) && (index & ATTR_OPSIZE))
+ o << "IC_XD_OPSIZE";
+ else if ((index & ATTR_XS) && (index & ATTR_ADSIZE))
+ o << "IC_XS_ADSIZE";
+ else if ((index & ATTR_XD) && (index & ATTR_ADSIZE))
+ o << "IC_XD_ADSIZE";
+ else if (index & ATTR_XS)
+ o << "IC_XS";
+ else if (index & ATTR_XD)
+ o << "IC_XD";
+ else if ((index & ATTR_OPSIZE) && (index & ATTR_ADSIZE))
+ o << "IC_OPSIZE_ADSIZE";
+ else if (index & ATTR_OPSIZE)
+ o << "IC_OPSIZE";
+ else if (index & ATTR_ADSIZE)
+ o << "IC_ADSIZE";
+ else
+ o << "IC";
+
+ o << ", // " << index << "\n";
+ }
+
+ i--;
+ o.indent(i * 2) << "};" << "\n";
+}
+
+void DisassemblerTables::emitContextDecisions(raw_ostream &o1, raw_ostream &o2,
+ unsigned &i1, unsigned &i2,
+ unsigned &ModRMTableNum) const {
+ emitContextDecision(o1, o2, i1, i2, ModRMTableNum, *Tables[0], ONEBYTE_STR);
+ emitContextDecision(o1, o2, i1, i2, ModRMTableNum, *Tables[1], TWOBYTE_STR);
+ emitContextDecision(o1, o2, i1, i2, ModRMTableNum, *Tables[2], THREEBYTE38_STR);
+ emitContextDecision(o1, o2, i1, i2, ModRMTableNum, *Tables[3], THREEBYTE3A_STR);
+ emitContextDecision(o1, o2, i1, i2, ModRMTableNum, *Tables[4], XOP8_MAP_STR);
+ emitContextDecision(o1, o2, i1, i2, ModRMTableNum, *Tables[5], XOP9_MAP_STR);
+ emitContextDecision(o1, o2, i1, i2, ModRMTableNum, *Tables[6], XOPA_MAP_STR);
+ emitContextDecision(o1, o2, i1, i2, ModRMTableNum, *Tables[7], THREEDNOW_MAP_STR);
+ emitContextDecision(o1, o2, i1, i2, ModRMTableNum, *Tables[8], MAP5_STR);
+ emitContextDecision(o1, o2, i1, i2, ModRMTableNum, *Tables[9], MAP6_STR);
+}
+
+void DisassemblerTables::emit(raw_ostream &o) const {
+ unsigned i1 = 0;
+ unsigned i2 = 0;
+
+ std::string s1;
+ std::string s2;
+
+ raw_string_ostream o1(s1);
+ raw_string_ostream o2(s2);
+
+ emitInstructionInfo(o, i2);
+ o << "\n";
+
+ emitContextTable(o, i2);
+ o << "\n";
+
+ unsigned ModRMTableNum = 0;
+
+ o << "static const InstrUID modRMTable[] = {\n";
+ i1++;
+ std::vector<unsigned> EmptyTable(1, 0);
+ ModRMTable[EmptyTable] = ModRMTableNum;
+ ModRMTableNum += EmptyTable.size();
+ o1 << "/*EmptyTable*/\n";
+ o1.indent(i1 * 2) << "0x0,\n";
+ i1--;
+ emitContextDecisions(o1, o2, i1, i2, ModRMTableNum);
+
+ o << o1.str();
+ o << " 0x0\n";
+ o << "};\n";
+ o << "\n";
+ o << o2.str();
+ o << "\n";
+ o << "\n";
+}
+
+void DisassemblerTables::setTableFields(ModRMDecision &decision,
+ const ModRMFilter &filter,
+ InstrUID uid,
+ uint8_t opcode) {
+ for (unsigned index = 0; index < 256; ++index) {
+ if (filter.accepts(index)) {
+ if (decision.instructionIDs[index] == uid)
+ continue;
+
+ if (decision.instructionIDs[index] != 0) {
+ InstructionSpecifier &newInfo =
+ InstructionSpecifiers[uid];
+ InstructionSpecifier &previousInfo =
+ InstructionSpecifiers[decision.instructionIDs[index]];
+
+ if(previousInfo.name == "NOOP" && (newInfo.name == "XCHG16ar" ||
+ newInfo.name == "XCHG32ar" ||
+ newInfo.name == "XCHG64ar"))
+ continue; // special case for XCHG*ar and NOOP
+
+ if (outranks(previousInfo.insnContext, newInfo.insnContext))
+ continue;
+
+ if (previousInfo.insnContext == newInfo.insnContext) {
+ errs() << "Error: Primary decode conflict: ";
+ errs() << newInfo.name << " would overwrite " << previousInfo.name;
+ errs() << "\n";
+ errs() << "ModRM " << index << "\n";
+ errs() << "Opcode " << (uint16_t)opcode << "\n";
+ errs() << "Context " << stringForContext(newInfo.insnContext) << "\n";
+ HasConflicts = true;
+ }
+ }
+
+ decision.instructionIDs[index] = uid;
+ }
+ }
+}
+
+void DisassemblerTables::setTableFields(OpcodeType type,
+ InstructionContext insnContext,
+ uint8_t opcode,
+ const ModRMFilter &filter,
+ InstrUID uid,
+ bool is32bit,
+ bool noPrefix,
+ bool ignoresVEX_L,
+ bool ignoresVEX_W,
+ unsigned addressSize) {
+ ContextDecision &decision = *Tables[type];
+
+ for (unsigned index = 0; index < IC_max; ++index) {
+ if ((is32bit || addressSize == 16) &&
+ inheritsFrom((InstructionContext)index, IC_64BIT))
+ continue;
+
+ bool adSize64 = addressSize == 64;
+ if (inheritsFrom((InstructionContext)index,
+ InstructionSpecifiers[uid].insnContext, noPrefix,
+ ignoresVEX_L, ignoresVEX_W, adSize64))
+ setTableFields(decision.opcodeDecisions[index].modRMDecisions[opcode],
+ filter,
+ uid,
+ opcode);
+ }
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/X86DisassemblerTables.h b/contrib/libs/llvm16/utils/TableGen/X86DisassemblerTables.h
new file mode 100644
index 0000000000..966f7406ef
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/X86DisassemblerTables.h
@@ -0,0 +1,292 @@
+//===- X86DisassemblerTables.h - Disassembler tables ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is part of the X86 Disassembler Emitter.
+// It contains the interface of the disassembler tables.
+// Documentation for the disassembler emitter in general can be found in
+// X86DisassemblerEmitter.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_X86DISASSEMBLERTABLES_H
+#define LLVM_UTILS_TABLEGEN_X86DISASSEMBLERTABLES_H
+
+#include "X86DisassemblerShared.h"
+#include "llvm/Support/X86DisassemblerDecoderCommon.h"
+#include <map>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+class raw_ostream;
+
+namespace X86Disassembler {
+
+class ModRMFilter;
+
+/// DisassemblerTables - Encapsulates all the decode tables being generated by
+/// the table emitter. Contains functions to populate the tables as well as
+/// to emit them as hierarchical C structures suitable for consumption by the
+/// runtime.
+class DisassemblerTables {
+private:
+ /// The decoder tables. There is one for each opcode type:
+ /// [0] one-byte opcodes
+ /// [1] two-byte opcodes of the form 0f __
+ /// [2] three-byte opcodes of the form 0f 38 __
+ /// [3] three-byte opcodes of the form 0f 3a __
+ /// [4] XOP8 map opcode
+ /// [5] XOP9 map opcode
+ /// [6] XOPA map opcode
+ /// [7] 3dnow map opcode
+ /// [8] fixed length MAP5 opcode
+ /// [9] fixed length MAP6 opcode
+ std::unique_ptr<ContextDecision> Tables[10];
+
+ // Table of ModRM encodings.
+ typedef std::map<std::vector<unsigned>, unsigned> ModRMMapTy;
+ mutable ModRMMapTy ModRMTable;
+
+ /// The instruction information table
+ std::vector<InstructionSpecifier> InstructionSpecifiers;
+
+ /// True if there are primary decode conflicts in the instruction set
+ bool HasConflicts;
+
+ /// emitModRMDecision - Emits a table of entries corresponding to a single
+ /// ModR/M decision. Compacts the ModR/M decision if possible. ModR/M
+ /// decisions are printed as:
+ ///
+ /// { /* struct ModRMDecision */
+ /// TYPE,
+ /// modRMTablennnn
+ /// }
+ ///
+ /// where nnnn is a unique ID for the corresponding table of IDs.
+ /// TYPE indicates whether the table has one entry that is the same
+ /// regardless of ModR/M byte, two entries - one for bytes 0x00-0xbf and one
+ /// for bytes 0xc0-0xff -, or 256 entries, one for each possible byte.
+ /// nnnn is the number of a table for looking up these values. The tables
+ /// are written separately so that tables consisting entirely of zeros will
+ /// not be duplicated. (These all have the name modRMEmptyTable.) A table
+ /// is printed as:
+ ///
+ /// InstrUID modRMTablennnn[k] = {
+ /// nnnn, /* MNEMONIC */
+ /// ...
+ /// nnnn /* MNEMONIC */
+ /// };
+ ///
+ /// @param o1 - The output stream to print the ID table to.
+ /// @param o2 - The output stream to print the decision structure to.
+ /// @param i1 - The indentation level to use with stream o1.
+ /// @param i2 - The indentation level to use with stream o2.
+ /// @param ModRMTableNum - next table number for adding to ModRMTable.
+ /// @param decision - The ModR/M decision to emit. This decision has 256
+ /// entries - emitModRMDecision decides how to compact it.
+ void emitModRMDecision(raw_ostream &o1, raw_ostream &o2,
+ unsigned &i1, unsigned &i2, unsigned &ModRMTableNum,
+ ModRMDecision &decision) const;
+
+ /// emitOpcodeDecision - Emits an OpcodeDecision and all its subsidiary ModR/M
+ /// decisions. An OpcodeDecision is printed as:
+ ///
+ /// { /* struct OpcodeDecision */
+ /// /* 0x00 */
+ /// { /* struct ModRMDecision */
+ /// ...
+ /// }
+ /// ...
+ /// }
+ ///
+ /// where the ModRMDecision structure is printed as described in the
+ /// documentation for emitModRMDecision(). emitOpcodeDecision() passes on a
+ /// stream and indent level for the UID tables generated by
+ /// emitModRMDecision(), but does not use them itself.
+ ///
+ /// @param o1 - The output stream to print the ID tables generated by
+ /// emitModRMDecision() to.
+ /// @param o2 - The output stream for the decision structure itself.
+ /// @param i1 - The indent level to use with stream o1.
+ /// @param i2 - The indent level to use with stream o2.
+ /// @param ModRMTableNum - next table number for adding to ModRMTable.
+ /// @param decision - The OpcodeDecision to emit along with its subsidiary
+ /// structures.
+ void emitOpcodeDecision(raw_ostream &o1, raw_ostream &o2,
+ unsigned &i1, unsigned &i2, unsigned &ModRMTableNum,
+ OpcodeDecision &decision) const;
+
+ /// emitContextDecision - Emits a ContextDecision and all its subsidiary
+ /// Opcode and ModRMDecisions. A ContextDecision is printed as:
+ ///
+ /// struct ContextDecision NAME = {
+ /// { /* OpcodeDecisions */
+ /// /* IC */
+ /// { /* struct OpcodeDecision */
+ /// ...
+ /// },
+ /// ...
+ /// }
+ /// }
+ ///
+ /// NAME is the name of the ContextDecision (typically one of the four names
+ /// ONEBYTE_SYM, TWOBYTE_SYM, THREEBYTE38_SYM, THREEBYTE3A_SYM from
+ /// X86DisassemblerDecoderCommon.h).
+ /// IC is one of the contexts in InstructionContext. There is an opcode
+ /// decision for each possible context.
+ /// The OpcodeDecision structures are printed as described in the
+ /// documentation for emitOpcodeDecision.
+ ///
+ /// @param o1 - The output stream to print the ID tables generated by
+ /// emitModRMDecision() to.
+ /// @param o2 - The output stream to print the decision structure to.
+ /// @param i1 - The indent level to use with stream o1.
+ /// @param i2 - The indent level to use with stream o2.
+ /// @param ModRMTableNum - next table number for adding to ModRMTable.
+ /// @param decision - The ContextDecision to emit along with its subsidiary
+ /// structures.
+ /// @param name - The name for the ContextDecision.
+ void emitContextDecision(raw_ostream &o1, raw_ostream &o2,
+ unsigned &i1, unsigned &i2, unsigned &ModRMTableNum,
+ ContextDecision &decision, const char* name) const;
+
+ /// emitInstructionInfo - Prints the instruction specifier table, which has
+ /// one entry for each instruction, and contains name and operand
+ /// information. This table is printed as:
+ ///
+ /// struct InstructionSpecifier CONTEXTS_SYM[k] = {
+ /// {
+ /// /* nnnn */
+ /// "MNEMONIC",
+ /// 0xnn,
+ /// {
+ /// {
+ /// ENCODING,
+ /// TYPE
+ /// },
+ /// ...
+ /// }
+ /// },
+ /// };
+ ///
+ /// k is the total number of instructions.
+ /// nnnn is the ID of the current instruction (0-based). This table
+ /// includes entries for non-instructions like PHINODE.
+ /// 0xnn is the lowest possible opcode for the current instruction, used for
+ /// AddRegFrm instructions to compute the operand's value.
+ /// ENCODING and TYPE describe the encoding and type for a single operand.
+ ///
+ /// @param o - The output stream to which the instruction table should be
+ /// written.
+ /// @param i - The indent level for use with the stream.
+ void emitInstructionInfo(raw_ostream &o, unsigned &i) const;
+
+ /// emitContextTable - Prints the table that is used to translate from an
+ /// instruction attribute mask to an instruction context. This table is
+ /// printed as:
+ ///
+ /// InstructionContext CONTEXTS_STR[256] = {
+ /// IC, /* 0x00 */
+ /// ...
+ /// };
+ ///
+ /// IC is the context corresponding to the mask 0x00, and there are 256
+ /// possible masks.
+ ///
+ /// @param o - The output stream to which the context table should be written.
+ /// @param i - The indent level for use with the stream.
+ void emitContextTable(raw_ostream &o, uint32_t &i) const;
+
+ /// emitContextDecisions - Prints all four ContextDecision structures using
+ /// emitContextDecision().
+ ///
+ /// @param o1 - The output stream to print the ID tables generated by
+ /// emitModRMDecision() to.
+ /// @param o2 - The output stream to print the decision structures to.
+ /// @param i1 - The indent level to use with stream o1.
+ /// @param i2 - The indent level to use with stream o2.
+ /// @param ModRMTableNum - next table number for adding to ModRMTable.
+ void emitContextDecisions(raw_ostream &o1, raw_ostream &o2,
+ unsigned &i1, unsigned &i2,
+ unsigned &ModRMTableNum) const;
+
+ /// setTableFields - Uses a ModRMFilter to set the appropriate entries in a
+ /// ModRMDecision to refer to a particular instruction ID.
+ ///
+ /// @param decision - The ModRMDecision to populate.
+ /// @param filter - The filter to use in deciding which entries to populate.
+ /// @param uid - The unique ID to set matching entries to.
+ /// @param opcode - The opcode of the instruction, for error reporting.
+ void setTableFields(ModRMDecision &decision,
+ const ModRMFilter &filter,
+ InstrUID uid,
+ uint8_t opcode);
+public:
+ /// Constructor - Allocates space for the class decisions and clears them.
+ DisassemblerTables();
+
+ ~DisassemblerTables();
+
+ /// emit - Emits the instruction table, context table, and class decisions.
+ ///
+ /// @param o - The output stream to print the tables to.
+ void emit(raw_ostream &o) const;
+
+ /// setTableFields - Uses the opcode type, instruction context, opcode, and a
+ /// ModRMFilter as criteria to set a particular set of entries in the
+ /// decode tables to point to a specific uid.
+ ///
+ /// @param type - The opcode type (ONEBYTE, TWOBYTE, etc.)
+ /// @param insnContext - The context to use (IC, IC_64BIT, etc.)
+ /// @param opcode - The last byte of the opcode (not counting any escape
+ /// or extended opcodes).
+ /// @param filter - The ModRMFilter that decides which ModR/M byte values
+ /// correspond to the desired instruction.
+ /// @param uid - The unique ID of the instruction.
+ /// @param is32bit - Instructon is only 32-bit
+ /// @param noPrefix - Instruction record has no prefix.
+ /// @param ignoresVEX_L - Instruction ignores VEX.L
+ /// @param ignoresVEX_W - Instruction ignores VEX.W
+ /// @param AddrSize - Instructions address size 16/32/64. 0 is unspecified
+ void setTableFields(OpcodeType type,
+ InstructionContext insnContext,
+ uint8_t opcode,
+ const ModRMFilter &filter,
+ InstrUID uid,
+ bool is32bit,
+ bool noPrefix,
+ bool ignoresVEX_L,
+ bool ignoresVEX_W,
+ unsigned AddrSize);
+
+ /// specForUID - Returns the instruction specifier for a given unique
+ /// instruction ID. Used when resolving collisions.
+ ///
+ /// @param uid - The unique ID of the instruction.
+ /// @return - A reference to the instruction specifier.
+ InstructionSpecifier& specForUID(InstrUID uid) {
+ if (uid >= InstructionSpecifiers.size())
+ InstructionSpecifiers.resize(uid + 1);
+
+ return InstructionSpecifiers[uid];
+ }
+
+ // hasConflicts - Reports whether there were primary decode conflicts
+ // from any instructions added to the tables.
+ // @return - true if there were; false otherwise.
+
+ bool hasConflicts() {
+ return HasConflicts;
+ }
+};
+
+} // namespace X86Disassembler
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/libs/llvm16/utils/TableGen/X86EVEX2VEXTablesEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/X86EVEX2VEXTablesEmitter.cpp
new file mode 100644
index 0000000000..1384330ee8
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/X86EVEX2VEXTablesEmitter.cpp
@@ -0,0 +1,246 @@
+//===- utils/TableGen/X86EVEX2VEXTablesEmitter.cpp - X86 backend-*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// This tablegen backend is responsible for emitting the X86 backend EVEX2VEX
+/// compression tables.
+///
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenInstruction.h"
+#include "CodeGenTarget.h"
+#include "X86RecognizableInstr.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/TableGenBackend.h"
+
+using namespace llvm;
+using namespace X86Disassembler;
+
+namespace {
+
+class X86EVEX2VEXTablesEmitter {
+ RecordKeeper &Records;
+ CodeGenTarget Target;
+
+ // Hold all non-masked & non-broadcasted EVEX encoded instructions
+ std::vector<const CodeGenInstruction *> EVEXInsts;
+ // Hold all VEX encoded instructions. Divided into groups with same opcodes
+ // to make the search more efficient
+ std::map<uint64_t, std::vector<const CodeGenInstruction *>> VEXInsts;
+
+ typedef std::pair<const CodeGenInstruction *, const CodeGenInstruction *> Entry;
+ typedef std::pair<StringRef, StringRef> Predicate;
+
+ // Represent both compress tables
+ std::vector<Entry> EVEX2VEX128;
+ std::vector<Entry> EVEX2VEX256;
+ // Represent predicates of VEX instructions.
+ std::vector<Predicate> EVEX2VEXPredicates;
+
+public:
+ X86EVEX2VEXTablesEmitter(RecordKeeper &R) : Records(R), Target(R) {}
+
+ // run - Output X86 EVEX2VEX tables.
+ void run(raw_ostream &OS);
+
+private:
+ // Prints the given table as a C++ array of type
+ // X86EvexToVexCompressTableEntry
+ void printTable(const std::vector<Entry> &Table, raw_ostream &OS);
+ // Prints function which checks target feature specific predicate.
+ void printCheckPredicate(const std::vector<Predicate> &Predicates,
+ raw_ostream &OS);
+};
+
+void X86EVEX2VEXTablesEmitter::printTable(const std::vector<Entry> &Table,
+ raw_ostream &OS) {
+ StringRef Size = (Table == EVEX2VEX128) ? "128" : "256";
+
+ OS << "// X86 EVEX encoded instructions that have a VEX " << Size
+ << " encoding\n"
+ << "// (table format: <EVEX opcode, VEX-" << Size << " opcode>).\n"
+ << "static const X86EvexToVexCompressTableEntry X86EvexToVex" << Size
+ << "CompressTable[] = {\n"
+ << " // EVEX scalar with corresponding VEX.\n";
+
+ // Print all entries added to the table
+ for (const auto &Pair : Table) {
+ OS << " { X86::" << Pair.first->TheDef->getName()
+ << ", X86::" << Pair.second->TheDef->getName() << " },\n";
+ }
+
+ OS << "};\n\n";
+}
+
+void X86EVEX2VEXTablesEmitter::printCheckPredicate(
+ const std::vector<Predicate> &Predicates, raw_ostream &OS) {
+ OS << "static bool CheckVEXInstPredicate"
+ << "(MachineInstr &MI, const X86Subtarget *Subtarget) {\n"
+ << " unsigned Opc = MI.getOpcode();\n"
+ << " switch (Opc) {\n"
+ << " default: return true;\n";
+ for (const auto &Pair : Predicates)
+ OS << " case X86::" << Pair.first << ": return " << Pair.second << ";\n";
+ OS << " }\n"
+ << "}\n\n";
+}
+
+// Return true if the 2 BitsInits are equal
+// Calculates the integer value residing BitsInit object
+static inline uint64_t getValueFromBitsInit(const BitsInit *B) {
+ uint64_t Value = 0;
+ for (unsigned i = 0, e = B->getNumBits(); i != e; ++i) {
+ if (BitInit *Bit = dyn_cast<BitInit>(B->getBit(i)))
+ Value |= uint64_t(Bit->getValue()) << i;
+ else
+ PrintFatalError("Invalid VectSize bit");
+ }
+ return Value;
+}
+
+// Function object - Operator() returns true if the given VEX instruction
+// matches the EVEX instruction of this object.
+class IsMatch {
+ const CodeGenInstruction *EVEXInst;
+
+public:
+ IsMatch(const CodeGenInstruction *EVEXInst) : EVEXInst(EVEXInst) {}
+
+ bool operator()(const CodeGenInstruction *VEXInst) {
+ RecognizableInstrBase VEXRI(*VEXInst);
+ RecognizableInstrBase EVEXRI(*EVEXInst);
+ bool VEX_W = VEXRI.HasVEX_W;
+ bool EVEX_W = EVEXRI.HasVEX_W;
+ bool VEX_WIG = VEXRI.IgnoresVEX_W;
+ bool EVEX_WIG = EVEXRI.IgnoresVEX_W;
+ bool EVEX_W1_VEX_W0 = EVEXInst->TheDef->getValueAsBit("EVEX_W1_VEX_W0");
+
+ if (VEXRI.IsCodeGenOnly != EVEXRI.IsCodeGenOnly ||
+ // VEX/EVEX fields
+ VEXRI.OpPrefix != EVEXRI.OpPrefix || VEXRI.OpMap != EVEXRI.OpMap ||
+ VEXRI.HasVEX_4V != EVEXRI.HasVEX_4V ||
+ VEXRI.HasVEX_L != EVEXRI.HasVEX_L ||
+ // Match is allowed if either is VEX_WIG, or they match, or EVEX
+ // is VEX_W1X and VEX is VEX_W0.
+ (!(VEX_WIG || (!EVEX_WIG && EVEX_W == VEX_W) ||
+ (EVEX_W1_VEX_W0 && EVEX_W && !VEX_W))) ||
+ // Instruction's format
+ VEXRI.Form != EVEXRI.Form)
+ return false;
+
+ // This is needed for instructions with intrinsic version (_Int).
+ // Where the only difference is the size of the operands.
+ // For example: VUCOMISDZrm and Int_VUCOMISDrm
+ // Also for instructions that their EVEX version was upgraded to work with
+ // k-registers. For example VPCMPEQBrm (xmm output register) and
+ // VPCMPEQBZ128rm (k register output register).
+ for (unsigned i = 0, e = EVEXInst->Operands.size(); i < e; i++) {
+ Record *OpRec1 = EVEXInst->Operands[i].Rec;
+ Record *OpRec2 = VEXInst->Operands[i].Rec;
+
+ if (OpRec1 == OpRec2)
+ continue;
+
+ if (isRegisterOperand(OpRec1) && isRegisterOperand(OpRec2)) {
+ if (getRegOperandSize(OpRec1) != getRegOperandSize(OpRec2))
+ return false;
+ } else if (isMemoryOperand(OpRec1) && isMemoryOperand(OpRec2)) {
+ return false;
+ } else if (isImmediateOperand(OpRec1) && isImmediateOperand(OpRec2)) {
+ if (OpRec1->getValueAsDef("Type") != OpRec2->getValueAsDef("Type")) {
+ return false;
+ }
+ } else
+ return false;
+ }
+
+ return true;
+ }
+};
+
+void X86EVEX2VEXTablesEmitter::run(raw_ostream &OS) {
+ auto getPredicates = [&](const CodeGenInstruction *Inst) {
+ std::vector<Record *> PredicatesRecords =
+ Inst->TheDef->getValueAsListOfDefs("Predicates");
+ // Currently we only do AVX related checks and assume each instruction
+ // has one and only one AVX related predicates.
+ for (unsigned i = 0, e = PredicatesRecords.size(); i != e; ++i)
+ if (PredicatesRecords[i]->getName().startswith("HasAVX"))
+ return PredicatesRecords[i]->getValueAsString("CondString");
+ llvm_unreachable(
+ "Instruction with checkPredicate set must have one predicate!");
+ };
+
+ emitSourceFileHeader("X86 EVEX2VEX tables", OS);
+
+ ArrayRef<const CodeGenInstruction *> NumberedInstructions =
+ Target.getInstructionsByEnumValue();
+
+ for (const CodeGenInstruction *Inst : NumberedInstructions) {
+ const Record *Def = Inst->TheDef;
+ // Filter non-X86 instructions.
+ if (!Def->isSubClassOf("X86Inst"))
+ continue;
+ RecognizableInstrBase RI(*Inst);
+
+ // Add VEX encoded instructions to one of VEXInsts vectors according to
+ // it's opcode.
+ if (RI.Encoding == X86Local::VEX)
+ VEXInsts[RI.Opcode].push_back(Inst);
+ // Add relevant EVEX encoded instructions to EVEXInsts
+ else if (RI.Encoding == X86Local::EVEX && !RI.HasEVEX_K && !RI.HasEVEX_B &&
+ !RI.HasEVEX_L2 && !Def->getValueAsBit("notEVEX2VEXConvertible"))
+ EVEXInsts.push_back(Inst);
+ }
+
+ for (const CodeGenInstruction *EVEXInst : EVEXInsts) {
+ uint64_t Opcode = getValueFromBitsInit(EVEXInst->TheDef->
+ getValueAsBitsInit("Opcode"));
+ // For each EVEX instruction look for a VEX match in the appropriate vector
+ // (instructions with the same opcode) using function object IsMatch.
+ // Allow EVEX2VEXOverride to explicitly specify a match.
+ const CodeGenInstruction *VEXInst = nullptr;
+ if (!EVEXInst->TheDef->isValueUnset("EVEX2VEXOverride")) {
+ StringRef AltInstStr =
+ EVEXInst->TheDef->getValueAsString("EVEX2VEXOverride");
+ Record *AltInstRec = Records.getDef(AltInstStr);
+ assert(AltInstRec && "EVEX2VEXOverride instruction not found!");
+ VEXInst = &Target.getInstruction(AltInstRec);
+ } else {
+ auto Match = llvm::find_if(VEXInsts[Opcode], IsMatch(EVEXInst));
+ if (Match != VEXInsts[Opcode].end())
+ VEXInst = *Match;
+ }
+
+ if (!VEXInst)
+ continue;
+
+ // In case a match is found add new entry to the appropriate table
+ if (EVEXInst->TheDef->getValueAsBit("hasVEX_L"))
+ EVEX2VEX256.push_back(std::make_pair(EVEXInst, VEXInst)); // {0,1}
+ else
+ EVEX2VEX128.push_back(std::make_pair(EVEXInst, VEXInst)); // {0,0}
+
+ // Adding predicate check to EVEX2VEXPredicates table when needed.
+ if (VEXInst->TheDef->getValueAsBit("checkVEXPredicate"))
+ EVEX2VEXPredicates.push_back(
+ std::make_pair(EVEXInst->TheDef->getName(), getPredicates(VEXInst)));
+ }
+
+ // Print both tables
+ printTable(EVEX2VEX128, OS);
+ printTable(EVEX2VEX256, OS);
+ // Print CheckVEXInstPredicate function.
+ printCheckPredicate(EVEX2VEXPredicates, OS);
+}
+}
+
+namespace llvm {
+void EmitX86EVEX2VEXTables(RecordKeeper &RK, raw_ostream &OS) {
+ X86EVEX2VEXTablesEmitter(RK).run(OS);
+}
+}
diff --git a/contrib/libs/llvm16/utils/TableGen/X86FoldTablesEmitter.cpp b/contrib/libs/llvm16/utils/TableGen/X86FoldTablesEmitter.cpp
new file mode 100644
index 0000000000..5b3f11848d
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/X86FoldTablesEmitter.cpp
@@ -0,0 +1,618 @@
+//===- utils/TableGen/X86FoldTablesEmitter.cpp - X86 backend-*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend is responsible for emitting the memory fold tables of
+// the X86 backend instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenTarget.h"
+#include "X86RecognizableInstr.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/TableGenBackend.h"
+
+using namespace llvm;
+using namespace X86Disassembler;
+
+namespace {
+
+// 3 possible strategies for the unfolding flag (TB_NO_REVERSE) of the
+// manual added entries.
+enum UnfoldStrategy {
+ UNFOLD, // Allow unfolding
+ NO_UNFOLD, // Prevent unfolding
+ NO_STRATEGY // Make decision according to operands' sizes
+};
+
+// Represents an entry in the manual mapped instructions set.
+struct ManualMapEntry {
+ const char *RegInstStr;
+ const char *MemInstStr;
+ UnfoldStrategy Strategy;
+
+ ManualMapEntry(const char *RegInstStr, const char *MemInstStr,
+ UnfoldStrategy Strategy = NO_STRATEGY)
+ : RegInstStr(RegInstStr), MemInstStr(MemInstStr), Strategy(Strategy) {}
+};
+
+// List of instructions requiring explicitly aligned memory.
+const char *ExplicitAlign[] = {"MOVDQA", "MOVAPS", "MOVAPD", "MOVNTPS",
+ "MOVNTPD", "MOVNTDQ", "MOVNTDQA"};
+
+// List of instructions NOT requiring explicit memory alignment.
+const char *ExplicitUnalign[] = {"MOVDQU", "MOVUPS", "MOVUPD",
+ "PCMPESTRM", "PCMPESTRI",
+ "PCMPISTRM", "PCMPISTRI" };
+
+// For manually mapping instructions that do not match by their encoding.
+const ManualMapEntry ManualMapSet[] = {
+ { "ADD16ri_DB", "ADD16mi", NO_UNFOLD },
+ { "ADD16ri8_DB", "ADD16mi8", NO_UNFOLD },
+ { "ADD16rr_DB", "ADD16mr", NO_UNFOLD },
+ { "ADD32ri_DB", "ADD32mi", NO_UNFOLD },
+ { "ADD32ri8_DB", "ADD32mi8", NO_UNFOLD },
+ { "ADD32rr_DB", "ADD32mr", NO_UNFOLD },
+ { "ADD64ri32_DB", "ADD64mi32", NO_UNFOLD },
+ { "ADD64ri8_DB", "ADD64mi8", NO_UNFOLD },
+ { "ADD64rr_DB", "ADD64mr", NO_UNFOLD },
+ { "ADD8ri_DB", "ADD8mi", NO_UNFOLD },
+ { "ADD8rr_DB", "ADD8mr", NO_UNFOLD },
+ { "ADD16rr_DB", "ADD16rm", NO_UNFOLD },
+ { "ADD32rr_DB", "ADD32rm", NO_UNFOLD },
+ { "ADD64rr_DB", "ADD64rm", NO_UNFOLD },
+ { "ADD8rr_DB", "ADD8rm", NO_UNFOLD },
+ { "MMX_MOVD64from64rr", "MMX_MOVQ64mr", UNFOLD },
+ { "MMX_MOVD64grr", "MMX_MOVD64mr", UNFOLD },
+ { "MOVLHPSrr", "MOVHPSrm", NO_UNFOLD },
+ { "PUSH16r", "PUSH16rmm", UNFOLD },
+ { "PUSH32r", "PUSH32rmm", UNFOLD },
+ { "PUSH64r", "PUSH64rmm", UNFOLD },
+ { "TAILJMPr", "TAILJMPm", UNFOLD },
+ { "TAILJMPr64", "TAILJMPm64", UNFOLD },
+ { "TAILJMPr64_REX", "TAILJMPm64_REX", UNFOLD },
+ { "VMOVLHPSZrr", "VMOVHPSZ128rm", NO_UNFOLD },
+ { "VMOVLHPSrr", "VMOVHPSrm", NO_UNFOLD },
+};
+
+
+static bool isExplicitAlign(const CodeGenInstruction *Inst) {
+ return any_of(ExplicitAlign, [Inst](const char *InstStr) {
+ return Inst->TheDef->getName().contains(InstStr);
+ });
+}
+
+static bool isExplicitUnalign(const CodeGenInstruction *Inst) {
+ return any_of(ExplicitUnalign, [Inst](const char *InstStr) {
+ return Inst->TheDef->getName().contains(InstStr);
+ });
+}
+
+class X86FoldTablesEmitter {
+ RecordKeeper &Records;
+ CodeGenTarget Target;
+
+ // Represents an entry in the folding table
+ class X86FoldTableEntry {
+ const CodeGenInstruction *RegInst;
+ const CodeGenInstruction *MemInst;
+
+ public:
+ bool CannotUnfold = false;
+ bool IsLoad = false;
+ bool IsStore = false;
+ bool IsAligned = false;
+ unsigned int Alignment = 0;
+
+ X86FoldTableEntry(const CodeGenInstruction *RegInst,
+ const CodeGenInstruction *MemInst)
+ : RegInst(RegInst), MemInst(MemInst) {}
+
+ void print(formatted_raw_ostream &OS) const {
+ OS.indent(2);
+ OS << "{ X86::" << RegInst->TheDef->getName() << ",";
+ OS.PadToColumn(40);
+ OS << "X86::" << MemInst->TheDef->getName() << ",";
+ OS.PadToColumn(75);
+
+ std::string Attrs;
+ if (IsLoad)
+ Attrs += "TB_FOLDED_LOAD | ";
+ if (IsStore)
+ Attrs += "TB_FOLDED_STORE | ";
+ if (CannotUnfold)
+ Attrs += "TB_NO_REVERSE | ";
+ if (IsAligned)
+ Attrs += "TB_ALIGN_" + std::to_string(Alignment) + " | ";
+
+ StringRef SimplifiedAttrs = StringRef(Attrs).rtrim("| ");
+ if (SimplifiedAttrs.empty())
+ SimplifiedAttrs = "0";
+
+ OS << SimplifiedAttrs << " },\n";
+ }
+
+ bool operator<(const X86FoldTableEntry &RHS) const {
+ bool LHSpseudo = RegInst->TheDef->getValueAsBit("isPseudo");
+ bool RHSpseudo = RHS.RegInst->TheDef->getValueAsBit("isPseudo");
+ if (LHSpseudo != RHSpseudo)
+ return LHSpseudo;
+
+ return RegInst->TheDef->getName() < RHS.RegInst->TheDef->getName();
+ }
+ };
+
+ typedef std::vector<X86FoldTableEntry> FoldTable;
+ // std::vector for each folding table.
+ // Table2Addr - Holds instructions which their memory form performs load+store
+ // Table#i - Holds instructions which the their memory form perform a load OR
+ // a store, and their #i'th operand is folded.
+ FoldTable Table2Addr;
+ FoldTable Table0;
+ FoldTable Table1;
+ FoldTable Table2;
+ FoldTable Table3;
+ FoldTable Table4;
+
+public:
+ X86FoldTablesEmitter(RecordKeeper &R) : Records(R), Target(R) {}
+
+ // run - Generate the 6 X86 memory fold tables.
+ void run(formatted_raw_ostream &OS);
+
+private:
+ // Decides to which table to add the entry with the given instructions.
+ // S sets the strategy of adding the TB_NO_REVERSE flag.
+ void updateTables(const CodeGenInstruction *RegInstr,
+ const CodeGenInstruction *MemInstr,
+ const UnfoldStrategy S = NO_STRATEGY);
+
+ // Generates X86FoldTableEntry with the given instructions and fill it with
+ // the appropriate flags - then adds it to Table.
+ void addEntryWithFlags(FoldTable &Table, const CodeGenInstruction *RegInstr,
+ const CodeGenInstruction *MemInstr,
+ const UnfoldStrategy S, const unsigned int FoldedInd);
+
+ // Print the given table as a static const C++ array of type
+ // X86MemoryFoldTableEntry.
+ void printTable(const FoldTable &Table, StringRef TableName,
+ formatted_raw_ostream &OS) {
+ OS << "static const X86MemoryFoldTableEntry MemoryFold" << TableName
+ << "[] = {\n";
+
+ for (const X86FoldTableEntry &E : Table)
+ E.print(OS);
+
+ OS << "};\n\n";
+ }
+};
+
+// Return true if one of the instruction's operands is a RST register class
+static bool hasRSTRegClass(const CodeGenInstruction *Inst) {
+ return any_of(Inst->Operands, [](const CGIOperandList::OperandInfo &OpIn) {
+ return OpIn.Rec->getName() == "RST" || OpIn.Rec->getName() == "RSTi";
+ });
+}
+
+// Return true if one of the instruction's operands is a ptr_rc_tailcall
+static bool hasPtrTailcallRegClass(const CodeGenInstruction *Inst) {
+ return any_of(Inst->Operands, [](const CGIOperandList::OperandInfo &OpIn) {
+ return OpIn.Rec->getName() == "ptr_rc_tailcall";
+ });
+}
+
+// Calculates the integer value representing the BitsInit object
+static inline uint64_t getValueFromBitsInit(const BitsInit *B) {
+ assert(B->getNumBits() <= sizeof(uint64_t) * 8 && "BitInits' too long!");
+
+ uint64_t Value = 0;
+ for (unsigned i = 0, e = B->getNumBits(); i != e; ++i) {
+ BitInit *Bit = cast<BitInit>(B->getBit(i));
+ Value |= uint64_t(Bit->getValue()) << i;
+ }
+ return Value;
+}
+
+// Return true if the instruction defined as a register flavor.
+static inline bool hasRegisterFormat(const Record *Inst) {
+ const BitsInit *FormBits = Inst->getValueAsBitsInit("FormBits");
+ uint64_t FormBitsNum = getValueFromBitsInit(FormBits);
+
+ // Values from X86Local namespace defined in X86RecognizableInstr.cpp
+ return FormBitsNum >= X86Local::MRMDestReg && FormBitsNum <= X86Local::MRM7r;
+}
+
+// Return true if the instruction defined as a memory flavor.
+static inline bool hasMemoryFormat(const Record *Inst) {
+ const BitsInit *FormBits = Inst->getValueAsBitsInit("FormBits");
+ uint64_t FormBitsNum = getValueFromBitsInit(FormBits);
+
+ // Values from X86Local namespace defined in X86RecognizableInstr.cpp
+ return FormBitsNum >= X86Local::MRMDestMem && FormBitsNum <= X86Local::MRM7m;
+}
+
+static inline bool isNOREXRegClass(const Record *Op) {
+ return Op->getName().contains("_NOREX");
+}
+
+// Get the alternative instruction pointed by "FoldGenRegForm" field.
+static inline const CodeGenInstruction *
+getAltRegInst(const CodeGenInstruction *I, const RecordKeeper &Records,
+ const CodeGenTarget &Target) {
+
+ StringRef AltRegInstStr = I->TheDef->getValueAsString("FoldGenRegForm");
+ Record *AltRegInstRec = Records.getDef(AltRegInstStr);
+ assert(AltRegInstRec &&
+ "Alternative register form instruction def not found");
+ CodeGenInstruction &AltRegInst = Target.getInstruction(AltRegInstRec);
+ return &AltRegInst;
+}
+
+// Function object - Operator() returns true if the given VEX instruction
+// matches the EVEX instruction of this object.
+class IsMatch {
+ const CodeGenInstruction *MemInst;
+ unsigned Variant;
+
+public:
+ IsMatch(const CodeGenInstruction *Inst, unsigned V)
+ : MemInst(Inst), Variant(V) {}
+
+ bool operator()(const CodeGenInstruction *RegInst) {
+ X86Disassembler::RecognizableInstrBase RegRI(*RegInst);
+ X86Disassembler::RecognizableInstrBase MemRI(*MemInst);
+ const Record *RegRec = RegInst->TheDef;
+ const Record *MemRec = MemInst->TheDef;
+
+ // EVEX_B means different things for memory and register forms.
+ if (RegRI.HasEVEX_B != 0 || MemRI.HasEVEX_B != 0)
+ return false;
+
+ // Instruction's format - The register form's "Form" field should be
+ // the opposite of the memory form's "Form" field.
+ if (!areOppositeForms(RegRI.Form, MemRI.Form))
+ return false;
+
+ // X86 encoding is crazy, e.g
+ //
+ // f3 0f c7 30 vmxon (%rax)
+ // f3 0f c7 f0 senduipi %rax
+ //
+ // This two instruction have similiar encoding fields but are unrelated
+ if (X86Disassembler::getMnemonic(MemInst, Variant) !=
+ X86Disassembler::getMnemonic(RegInst, Variant))
+ return false;
+
+ // Return false if one (at least) of the encoding fields of both
+ // instructions do not match.
+ if (RegRI.Encoding != MemRI.Encoding || RegRI.Opcode != MemRI.Opcode ||
+ RegRI.OpPrefix != MemRI.OpPrefix || RegRI.OpMap != MemRI.OpMap ||
+ RegRI.OpSize != MemRI.OpSize || RegRI.AdSize != MemRI.AdSize ||
+ RegRI.HasREX_W != MemRI.HasREX_W ||
+ RegRI.HasVEX_4V != MemRI.HasVEX_4V ||
+ RegRI.HasVEX_L != MemRI.HasVEX_L ||
+ RegRI.HasVEX_W != MemRI.HasVEX_W ||
+ RegRI.IgnoresVEX_L != MemRI.IgnoresVEX_L ||
+ RegRI.IgnoresVEX_W != MemRI.IgnoresVEX_W ||
+ RegRI.HasEVEX_K != MemRI.HasEVEX_K ||
+ RegRI.HasEVEX_KZ != MemRI.HasEVEX_KZ ||
+ RegRI.HasEVEX_L2 != MemRI.HasEVEX_L2 ||
+ RegRec->getValueAsBit("hasEVEX_RC") !=
+ MemRec->getValueAsBit("hasEVEX_RC") ||
+ RegRec->getValueAsBit("hasLockPrefix") !=
+ MemRec->getValueAsBit("hasLockPrefix") ||
+ RegRec->getValueAsBit("hasNoTrackPrefix") !=
+ MemRec->getValueAsBit("hasNoTrackPrefix") ||
+ RegRec->getValueAsBit("EVEX_W1_VEX_W0") !=
+ MemRec->getValueAsBit("EVEX_W1_VEX_W0"))
+ return false;
+
+ // Make sure the sizes of the operands of both instructions suit each other.
+ // This is needed for instructions with intrinsic version (_Int).
+ // Where the only difference is the size of the operands.
+ // For example: VUCOMISDZrm and Int_VUCOMISDrm
+ // Also for instructions that their EVEX version was upgraded to work with
+ // k-registers. For example VPCMPEQBrm (xmm output register) and
+ // VPCMPEQBZ128rm (k register output register).
+ bool ArgFolded = false;
+ unsigned MemOutSize = MemRec->getValueAsDag("OutOperandList")->getNumArgs();
+ unsigned RegOutSize = RegRec->getValueAsDag("OutOperandList")->getNumArgs();
+ unsigned MemInSize = MemRec->getValueAsDag("InOperandList")->getNumArgs();
+ unsigned RegInSize = RegRec->getValueAsDag("InOperandList")->getNumArgs();
+
+ // Instructions with one output in their memory form use the memory folded
+ // operand as source and destination (Read-Modify-Write).
+ unsigned RegStartIdx =
+ (MemOutSize + 1 == RegOutSize) && (MemInSize == RegInSize) ? 1 : 0;
+
+ for (unsigned i = 0, e = MemInst->Operands.size(); i < e; i++) {
+ Record *MemOpRec = MemInst->Operands[i].Rec;
+ Record *RegOpRec = RegInst->Operands[i + RegStartIdx].Rec;
+
+ if (MemOpRec == RegOpRec)
+ continue;
+
+ if (isRegisterOperand(MemOpRec) && isRegisterOperand(RegOpRec)) {
+ if (getRegOperandSize(MemOpRec) != getRegOperandSize(RegOpRec) ||
+ isNOREXRegClass(MemOpRec) != isNOREXRegClass(RegOpRec))
+ return false;
+ } else if (isMemoryOperand(MemOpRec) && isMemoryOperand(RegOpRec)) {
+ if (getMemOperandSize(MemOpRec) != getMemOperandSize(RegOpRec))
+ return false;
+ } else if (isImmediateOperand(MemOpRec) && isImmediateOperand(RegOpRec)) {
+ if (MemOpRec->getValueAsDef("Type") != RegOpRec->getValueAsDef("Type"))
+ return false;
+ } else {
+ // Only one operand can be folded.
+ if (ArgFolded)
+ return false;
+
+ assert(isRegisterOperand(RegOpRec) && isMemoryOperand(MemOpRec));
+ ArgFolded = true;
+ }
+ }
+
+ return true;
+ }
+
+private:
+ // Return true of the 2 given forms are the opposite of each other.
+ bool areOppositeForms(unsigned RegForm, unsigned MemForm) {
+ if ((MemForm == X86Local::MRM0m && RegForm == X86Local::MRM0r) ||
+ (MemForm == X86Local::MRM1m && RegForm == X86Local::MRM1r) ||
+ (MemForm == X86Local::MRM2m && RegForm == X86Local::MRM2r) ||
+ (MemForm == X86Local::MRM3m && RegForm == X86Local::MRM3r) ||
+ (MemForm == X86Local::MRM4m && RegForm == X86Local::MRM4r) ||
+ (MemForm == X86Local::MRM5m && RegForm == X86Local::MRM5r) ||
+ (MemForm == X86Local::MRM6m && RegForm == X86Local::MRM6r) ||
+ (MemForm == X86Local::MRM7m && RegForm == X86Local::MRM7r) ||
+ (MemForm == X86Local::MRMXm && RegForm == X86Local::MRMXr) ||
+ (MemForm == X86Local::MRMXmCC && RegForm == X86Local::MRMXrCC) ||
+ (MemForm == X86Local::MRMDestMem && RegForm == X86Local::MRMDestReg) ||
+ (MemForm == X86Local::MRMSrcMem && RegForm == X86Local::MRMSrcReg) ||
+ (MemForm == X86Local::MRMSrcMem4VOp3 &&
+ RegForm == X86Local::MRMSrcReg4VOp3) ||
+ (MemForm == X86Local::MRMSrcMemOp4 &&
+ RegForm == X86Local::MRMSrcRegOp4) ||
+ (MemForm == X86Local::MRMSrcMemCC && RegForm == X86Local::MRMSrcRegCC))
+ return true;
+
+ return false;
+ }
+};
+
+} // end anonymous namespace
+
+void X86FoldTablesEmitter::addEntryWithFlags(FoldTable &Table,
+ const CodeGenInstruction *RegInstr,
+ const CodeGenInstruction *MemInstr,
+ const UnfoldStrategy S,
+ const unsigned int FoldedInd) {
+
+ X86FoldTableEntry Result = X86FoldTableEntry(RegInstr, MemInstr);
+ Record *RegRec = RegInstr->TheDef;
+ Record *MemRec = MemInstr->TheDef;
+
+ // Only table0 entries should explicitly specify a load or store flag.
+ if (&Table == &Table0) {
+ unsigned MemInOpsNum = MemRec->getValueAsDag("InOperandList")->getNumArgs();
+ unsigned RegInOpsNum = RegRec->getValueAsDag("InOperandList")->getNumArgs();
+ // If the instruction writes to the folded operand, it will appear as an
+ // output in the register form instruction and as an input in the memory
+ // form instruction.
+ // If the instruction reads from the folded operand, it well appear as in
+ // input in both forms.
+ if (MemInOpsNum == RegInOpsNum)
+ Result.IsLoad = true;
+ else
+ Result.IsStore = true;
+ }
+
+ Record *RegOpRec = RegInstr->Operands[FoldedInd].Rec;
+ Record *MemOpRec = MemInstr->Operands[FoldedInd].Rec;
+
+ // Unfolding code generates a load/store instruction according to the size of
+ // the register in the register form instruction.
+ // If the register's size is greater than the memory's operand size, do not
+ // allow unfolding.
+ if (S == UNFOLD)
+ Result.CannotUnfold = false;
+ else if (S == NO_UNFOLD)
+ Result.CannotUnfold = true;
+ else if (getRegOperandSize(RegOpRec) > getMemOperandSize(MemOpRec))
+ Result.CannotUnfold = true; // S == NO_STRATEGY
+
+ uint64_t Enc = getValueFromBitsInit(RegRec->getValueAsBitsInit("OpEncBits"));
+ if (isExplicitAlign(RegInstr)) {
+ // The instruction require explicitly aligned memory.
+ BitsInit *VectSize = RegRec->getValueAsBitsInit("VectSize");
+ uint64_t Value = getValueFromBitsInit(VectSize);
+ Result.IsAligned = true;
+ Result.Alignment = Value;
+ } else if (Enc != X86Local::XOP && Enc != X86Local::VEX &&
+ Enc != X86Local::EVEX) {
+ // Instructions with VEX encoding do not require alignment.
+ if (!isExplicitUnalign(RegInstr) && getMemOperandSize(MemOpRec) > 64) {
+ // SSE packed vector instructions require a 16 byte alignment.
+ Result.IsAligned = true;
+ Result.Alignment = 16;
+ }
+ }
+
+ Table.push_back(Result);
+}
+
+void X86FoldTablesEmitter::updateTables(const CodeGenInstruction *RegInstr,
+ const CodeGenInstruction *MemInstr,
+ const UnfoldStrategy S) {
+
+ Record *RegRec = RegInstr->TheDef;
+ Record *MemRec = MemInstr->TheDef;
+ unsigned MemOutSize = MemRec->getValueAsDag("OutOperandList")->getNumArgs();
+ unsigned RegOutSize = RegRec->getValueAsDag("OutOperandList")->getNumArgs();
+ unsigned MemInSize = MemRec->getValueAsDag("InOperandList")->getNumArgs();
+ unsigned RegInSize = RegRec->getValueAsDag("InOperandList")->getNumArgs();
+
+ // Instructions which Read-Modify-Write should be added to Table2Addr.
+ if (MemOutSize != RegOutSize && MemInSize == RegInSize) {
+ addEntryWithFlags(Table2Addr, RegInstr, MemInstr, S, 0);
+ return;
+ }
+
+ if (MemInSize == RegInSize && MemOutSize == RegOutSize) {
+ // Load-Folding cases.
+ // If the i'th register form operand is a register and the i'th memory form
+ // operand is a memory operand, add instructions to Table#i.
+ for (unsigned i = RegOutSize, e = RegInstr->Operands.size(); i < e; i++) {
+ Record *RegOpRec = RegInstr->Operands[i].Rec;
+ Record *MemOpRec = MemInstr->Operands[i].Rec;
+ // PointerLikeRegClass: For instructions like TAILJMPr, TAILJMPr64, TAILJMPr64_REX
+ if ((isRegisterOperand(RegOpRec) ||
+ RegOpRec->isSubClassOf("PointerLikeRegClass")) &&
+ isMemoryOperand(MemOpRec)) {
+ switch (i) {
+ case 0:
+ addEntryWithFlags(Table0, RegInstr, MemInstr, S, 0);
+ return;
+ case 1:
+ addEntryWithFlags(Table1, RegInstr, MemInstr, S, 1);
+ return;
+ case 2:
+ addEntryWithFlags(Table2, RegInstr, MemInstr, S, 2);
+ return;
+ case 3:
+ addEntryWithFlags(Table3, RegInstr, MemInstr, S, 3);
+ return;
+ case 4:
+ addEntryWithFlags(Table4, RegInstr, MemInstr, S, 4);
+ return;
+ }
+ }
+ }
+ } else if (MemInSize == RegInSize + 1 && MemOutSize + 1 == RegOutSize) {
+ // Store-Folding cases.
+ // If the memory form instruction performs a store, the *output*
+ // register of the register form instructions disappear and instead a
+ // memory *input* operand appears in the memory form instruction.
+ // For example:
+ // MOVAPSrr => (outs VR128:$dst), (ins VR128:$src)
+ // MOVAPSmr => (outs), (ins f128mem:$dst, VR128:$src)
+ Record *RegOpRec = RegInstr->Operands[RegOutSize - 1].Rec;
+ Record *MemOpRec = MemInstr->Operands[RegOutSize - 1].Rec;
+ if (isRegisterOperand(RegOpRec) && isMemoryOperand(MemOpRec) &&
+ getRegOperandSize(RegOpRec) == getMemOperandSize(MemOpRec))
+ addEntryWithFlags(Table0, RegInstr, MemInstr, S, 0);
+ }
+}
+
+void X86FoldTablesEmitter::run(formatted_raw_ostream &OS) {
+ emitSourceFileHeader("X86 fold tables", OS);
+
+ // Holds all memory instructions
+ std::vector<const CodeGenInstruction *> MemInsts;
+ // Holds all register instructions - divided according to opcode.
+ std::map<uint8_t, std::vector<const CodeGenInstruction *>> RegInsts;
+
+ ArrayRef<const CodeGenInstruction *> NumberedInstructions =
+ Target.getInstructionsByEnumValue();
+
+ for (const CodeGenInstruction *Inst : NumberedInstructions) {
+ const Record *Rec = Inst->TheDef;
+ if (!Rec->isSubClassOf("X86Inst") || Rec->getValueAsBit("isAsmParserOnly"))
+ continue;
+
+ // - Do not proceed if the instruction is marked as notMemoryFoldable.
+ // - Instructions including RST register class operands are not relevant
+ // for memory folding (for further details check the explanation in
+ // lib/Target/X86/X86InstrFPStack.td file).
+ // - Some instructions (listed in the manual map above) use the register
+ // class ptr_rc_tailcall, which can be of a size 32 or 64, to ensure
+ // safe mapping of these instruction we manually map them and exclude
+ // them from the automation.
+ if (Rec->getValueAsBit("isMemoryFoldable") == false ||
+ hasRSTRegClass(Inst) || hasPtrTailcallRegClass(Inst))
+ continue;
+
+ // Add all the memory form instructions to MemInsts, and all the register
+ // form instructions to RegInsts[Opc], where Opc in the opcode of each
+ // instructions. this helps reducing the runtime of the backend.
+ if (hasMemoryFormat(Rec))
+ MemInsts.push_back(Inst);
+ else if (hasRegisterFormat(Rec)) {
+ uint8_t Opc = getValueFromBitsInit(Rec->getValueAsBitsInit("Opcode"));
+ RegInsts[Opc].push_back(Inst);
+ }
+ }
+
+ Record *AsmWriter = Target.getAsmWriter();
+ unsigned Variant = AsmWriter->getValueAsInt("Variant");
+ // For each memory form instruction, try to find its register form
+ // instruction.
+ for (const CodeGenInstruction *MemInst : MemInsts) {
+ uint8_t Opc =
+ getValueFromBitsInit(MemInst->TheDef->getValueAsBitsInit("Opcode"));
+
+ auto RegInstsIt = RegInsts.find(Opc);
+ if (RegInstsIt == RegInsts.end())
+ continue;
+
+ // Two forms (memory & register) of the same instruction must have the same
+ // opcode. try matching only with register form instructions with the same
+ // opcode.
+ std::vector<const CodeGenInstruction *> &OpcRegInsts = RegInstsIt->second;
+
+ auto Match = find_if(OpcRegInsts, IsMatch(MemInst, Variant));
+ if (Match != OpcRegInsts.end()) {
+ const CodeGenInstruction *RegInst = *Match;
+ // If the matched instruction has it's "FoldGenRegForm" set, map the
+ // memory form instruction to the register form instruction pointed by
+ // this field
+ if (RegInst->TheDef->isValueUnset("FoldGenRegForm")) {
+ updateTables(RegInst, MemInst);
+ } else {
+ const CodeGenInstruction *AltRegInst =
+ getAltRegInst(RegInst, Records, Target);
+ updateTables(AltRegInst, MemInst);
+ }
+ OpcRegInsts.erase(Match);
+ }
+ }
+
+ // Add the manually mapped instructions listed above.
+ for (const ManualMapEntry &Entry : ManualMapSet) {
+ Record *RegInstIter = Records.getDef(Entry.RegInstStr);
+ Record *MemInstIter = Records.getDef(Entry.MemInstStr);
+
+ updateTables(&(Target.getInstruction(RegInstIter)),
+ &(Target.getInstruction(MemInstIter)), Entry.Strategy);
+ }
+
+ // Sort the tables before printing.
+ llvm::sort(Table2Addr);
+ llvm::sort(Table0);
+ llvm::sort(Table1);
+ llvm::sort(Table2);
+ llvm::sort(Table3);
+ llvm::sort(Table4);
+
+ // Print all tables.
+ printTable(Table2Addr, "Table2Addr", OS);
+ printTable(Table0, "Table0", OS);
+ printTable(Table1, "Table1", OS);
+ printTable(Table2, "Table2", OS);
+ printTable(Table3, "Table3", OS);
+ printTable(Table4, "Table4", OS);
+}
+
+namespace llvm {
+
+void EmitX86FoldTables(RecordKeeper &RK, raw_ostream &o) {
+ formatted_raw_ostream OS(o);
+ X86FoldTablesEmitter(RK).run(OS);
+}
+} // namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/X86MnemonicTables.cpp b/contrib/libs/llvm16/utils/TableGen/X86MnemonicTables.cpp
new file mode 100644
index 0000000000..f405e051e3
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/X86MnemonicTables.cpp
@@ -0,0 +1,94 @@
+//==- X86MnemonicTables.cpp - Generate mnemonic extraction tables. -*- C++ -*-//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend is responsible for emitting tables that group
+// instructions by their mnemonic name wrt AsmWriter Variant (e.g. isADD, etc).
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenInstruction.h"
+#include "CodeGenTarget.h"
+#include "X86RecognizableInstr.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/TableGenBackend.h"
+
+using namespace llvm;
+
+namespace {
+
+class X86MnemonicTablesEmitter {
+ CodeGenTarget Target;
+
+public:
+ X86MnemonicTablesEmitter(RecordKeeper &R) : Target(R) {}
+
+ // Output X86 mnemonic tables.
+ void run(raw_ostream &OS);
+};
+
+void X86MnemonicTablesEmitter::run(raw_ostream &OS) {
+ emitSourceFileHeader("X86 Mnemonic tables", OS);
+ OS << "namespace llvm {\nnamespace X86 {\n\n";
+ Record *AsmWriter = Target.getAsmWriter();
+ unsigned Variant = AsmWriter->getValueAsInt("Variant");
+
+ // Hold all instructions grouped by mnemonic
+ StringMap<SmallVector<const CodeGenInstruction *, 0>> MnemonicToCGInstrMap;
+
+ ArrayRef<const CodeGenInstruction *> NumberedInstructions =
+ Target.getInstructionsByEnumValue();
+ for (const CodeGenInstruction *I : NumberedInstructions) {
+ const Record *Def = I->TheDef;
+ // Filter non-X86 instructions.
+ if (!Def->isSubClassOf("X86Inst"))
+ continue;
+ X86Disassembler::RecognizableInstrBase RI(*I);
+ if (!RI.shouldBeEmitted())
+ continue;
+ if ( // Non-parsable instruction defs contain prefix as part of AsmString
+ Def->getValueAsString("AsmVariantName") == "NonParsable" ||
+ // Skip prefix byte
+ RI.Form == X86Local::PrefixByte)
+ continue;
+ std::string Mnemonic = X86Disassembler::getMnemonic(I, Variant);
+ MnemonicToCGInstrMap[Mnemonic].push_back(I);
+ }
+
+ OS << "#ifdef GET_X86_MNEMONIC_TABLES_H\n";
+ OS << "#undef GET_X86_MNEMONIC_TABLES_H\n\n";
+ for (StringRef Mnemonic : MnemonicToCGInstrMap.keys())
+ OS << "bool is" << Mnemonic << "(unsigned Opcode);\n";
+ OS << "#endif // GET_X86_MNEMONIC_TABLES_H\n\n";
+
+ OS << "#ifdef GET_X86_MNEMONIC_TABLES_CPP\n";
+ OS << "#undef GET_X86_MNEMONIC_TABLES_CPP\n\n";
+ for (StringRef Mnemonic : MnemonicToCGInstrMap.keys()) {
+ OS << "bool is" << Mnemonic << "(unsigned Opcode) {\n";
+ auto Mnemonics = MnemonicToCGInstrMap[Mnemonic];
+ if (Mnemonics.size() == 1) {
+ const CodeGenInstruction *CGI = *Mnemonics.begin();
+ OS << "\treturn Opcode == " << CGI->TheDef->getName() << ";\n}\n\n";
+ } else {
+ OS << "\tswitch (Opcode) {\n";
+ for (const CodeGenInstruction *CGI : Mnemonics) {
+ OS << "\tcase " << CGI->TheDef->getName() << ":\n";
+ }
+ OS << "\t\treturn true;\n\t}\n\treturn false;\n}\n\n";
+ }
+ }
+ OS << "#endif // GET_X86_MNEMONIC_TABLES_CPP\n\n";
+ OS << "} // end namespace X86\n} // end namespace llvm";
+}
+
+} // namespace
+
+namespace llvm {
+void EmitX86MnemonicTables(RecordKeeper &RK, raw_ostream &OS) {
+ X86MnemonicTablesEmitter(RK).run(OS);
+}
+} // namespace llvm
diff --git a/contrib/libs/llvm16/utils/TableGen/X86ModRMFilters.cpp b/contrib/libs/llvm16/utils/TableGen/X86ModRMFilters.cpp
new file mode 100644
index 0000000000..cf7507094f
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/X86ModRMFilters.cpp
@@ -0,0 +1,23 @@
+//===- X86ModRMFilters.cpp - Disassembler ModR/M filterss -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86ModRMFilters.h"
+
+using namespace llvm::X86Disassembler;
+
+void ModRMFilter::anchor() { }
+
+void DumbFilter::anchor() { }
+
+void ModFilter::anchor() { }
+
+void ExtendedFilter::anchor() { }
+
+void ExtendedRMFilter::anchor() { }
+
+void ExactFilter::anchor() { }
diff --git a/contrib/libs/llvm16/utils/TableGen/X86ModRMFilters.h b/contrib/libs/llvm16/utils/TableGen/X86ModRMFilters.h
new file mode 100644
index 0000000000..e2d0907b4f
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/X86ModRMFilters.h
@@ -0,0 +1,143 @@
+//===- X86ModRMFilters.h - Disassembler ModR/M filterss ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is part of the X86 Disassembler Emitter.
+// It contains ModR/M filters that determine which values of the ModR/M byte
+// are valid for a partiuclar instruction.
+// Documentation for the disassembler emitter in general can be found in
+// X86DisassemblerEmitter.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_X86MODRMFILTERS_H
+#define LLVM_UTILS_TABLEGEN_X86MODRMFILTERS_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+namespace X86Disassembler {
+
+/// ModRMFilter - Abstract base class for clases that recognize patterns in
+/// ModR/M bytes.
+class ModRMFilter {
+ virtual void anchor();
+public:
+ /// Destructor - Override as necessary.
+ virtual ~ModRMFilter() { }
+
+ /// isDumb - Indicates whether this filter returns the same value for
+ /// any value of the ModR/M byte.
+ ///
+ /// @result - True if the filter returns the same value for any ModR/M
+ /// byte; false if not.
+ virtual bool isDumb() const { return false; }
+
+ /// accepts - Indicates whether the filter accepts a particular ModR/M
+ /// byte value.
+ ///
+ /// @result - True if the filter accepts the ModR/M byte; false if not.
+ virtual bool accepts(uint8_t modRM) const = 0;
+};
+
+/// DumbFilter - Accepts any ModR/M byte. Used for instructions that do not
+/// require a ModR/M byte or instructions where the entire ModR/M byte is used
+/// for operands.
+class DumbFilter : public ModRMFilter {
+ void anchor() override;
+public:
+ bool isDumb() const override {
+ return true;
+ }
+
+ bool accepts(uint8_t modRM) const override {
+ return true;
+ }
+};
+
+/// ModFilter - Filters based on the mod bits [bits 7-6] of the ModR/M byte.
+/// Some instructions are classified based on whether they are 11 or anything
+/// else. This filter performs that classification.
+class ModFilter : public ModRMFilter {
+ void anchor() override;
+ bool R;
+public:
+ /// Constructor
+ ///
+ /// \param r True if the mod bits of the ModR/M byte must be 11; false
+ /// otherwise. The name r derives from the fact that the mod
+ /// bits indicate whether the R/M bits [bits 2-0] signify a
+ /// register or a memory operand.
+ ModFilter(bool r) : R(r) {}
+
+ bool accepts(uint8_t modRM) const override {
+ return (R == ((modRM & 0xc0) == 0xc0));
+ }
+};
+
+/// ExtendedFilter - Extended opcodes are classified based on the value of the
+/// mod field [bits 7-6] and the value of the nnn field [bits 5-3].
+class ExtendedFilter : public ModRMFilter {
+ void anchor() override;
+ bool R;
+ uint8_t NNN;
+public:
+ /// Constructor
+ ///
+ /// \param r True if the mod field must be set to 11; false otherwise.
+ /// The name is explained at ModFilter.
+ /// \param nnn The required value of the nnn field.
+ ExtendedFilter(bool r, uint8_t nnn) : R(r), NNN(nnn) {}
+
+ bool accepts(uint8_t modRM) const override {
+ return (((R && ((modRM & 0xc0) == 0xc0)) ||
+ (!R && ((modRM & 0xc0) != 0xc0))) &&
+ (((modRM & 0x38) >> 3) == NNN));
+ }
+};
+
+/// ExtendedRMFilter - Extended opcodes are classified based on the value of the
+/// mod field [bits 7-6] and the value of the nnn field [bits 2-0].
+class ExtendedRMFilter : public ModRMFilter {
+ void anchor() override;
+ bool R;
+ uint8_t NNN;
+public:
+ /// Constructor
+ ///
+ /// \param r True if the mod field must be set to 11; false otherwise.
+ /// The name is explained at ModFilter.
+ /// \param nnn The required value of the nnn field.
+ ExtendedRMFilter(bool r, uint8_t nnn) : R(r), NNN(nnn) {}
+
+ bool accepts(uint8_t modRM) const override {
+ return ((R && ((modRM & 0xc0) == 0xc0)) &&
+ ((modRM & 0x7) == NNN));
+ }
+};
+/// ExactFilter - The occasional extended opcode (such as VMCALL or MONITOR)
+/// requires the ModR/M byte to have a specific value.
+class ExactFilter : public ModRMFilter {
+ void anchor() override;
+ uint8_t ModRM;
+public:
+ /// Constructor
+ ///
+ /// \param modRM The required value of the full ModR/M byte.
+ ExactFilter(uint8_t modRM) : ModRM(modRM) {}
+
+ bool accepts(uint8_t modRM) const override {
+ return (ModRM == modRM);
+ }
+};
+
+} // namespace X86Disassembler
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/libs/llvm16/utils/TableGen/X86RecognizableInstr.cpp b/contrib/libs/llvm16/utils/TableGen/X86RecognizableInstr.cpp
new file mode 100644
index 0000000000..e5c1e53936
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/X86RecognizableInstr.cpp
@@ -0,0 +1,1307 @@
+//===- X86RecognizableInstr.cpp - Disassembler instruction spec --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is part of the X86 Disassembler Emitter.
+// It contains the implementation of a single recognizable instruction.
+// Documentation for the disassembler emitter in general can be found in
+// X86DisassemblerEmitter.h.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86RecognizableInstr.h"
+#include "X86DisassemblerShared.h"
+#include "X86DisassemblerTables.h"
+#include "X86ModRMFilters.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/TableGen/Record.h"
+#include <string>
+
+using namespace llvm;
+using namespace X86Disassembler;
+
+std::string X86Disassembler::getMnemonic(const CodeGenInstruction *I, unsigned Variant) {
+ std::string AsmString = I->FlattenAsmStringVariants(I->AsmString, Variant);
+ StringRef Mnemonic(AsmString);
+ // Extract a mnemonic assuming it's separated by \t
+ Mnemonic = Mnemonic.take_until([](char C) { return C == '\t'; });
+
+ // Special case: CMOVCC, JCC, SETCC have "${cond}" in mnemonic.
+ // Replace it with "CC" in-place.
+ size_t CondPos = Mnemonic.find("${cond}");
+ if (CondPos != StringRef::npos)
+ Mnemonic = AsmString.replace(CondPos, StringRef::npos, "CC");
+ return Mnemonic.upper();
+}
+
+bool X86Disassembler::isRegisterOperand(const Record *Rec) {
+ return Rec->isSubClassOf("RegisterClass") ||
+ Rec->isSubClassOf("RegisterOperand");
+}
+
+bool X86Disassembler::isMemoryOperand(const Record *Rec) {
+ return Rec->isSubClassOf("Operand") &&
+ Rec->getValueAsString("OperandType") == "OPERAND_MEMORY";
+}
+
+bool X86Disassembler::isImmediateOperand(const Record *Rec) {
+ return Rec->isSubClassOf("Operand") &&
+ Rec->getValueAsString("OperandType") == "OPERAND_IMMEDIATE";
+}
+
+unsigned X86Disassembler::getRegOperandSize(const Record *RegRec) {
+ if (RegRec->isSubClassOf("RegisterClass"))
+ return RegRec->getValueAsInt("Alignment");
+ if (RegRec->isSubClassOf("RegisterOperand"))
+ return RegRec->getValueAsDef("RegClass")->getValueAsInt("Alignment");
+
+ llvm_unreachable("Register operand's size not known!");
+}
+
+unsigned X86Disassembler::getMemOperandSize(const Record *MemRec) {
+ if (MemRec->isSubClassOf("X86MemOperand"))
+ return MemRec->getValueAsInt("Size");
+
+ llvm_unreachable("Memory operand's size not known!");
+}
+
+/// byteFromBitsInit - Extracts a value at most 8 bits in width from a BitsInit.
+/// Useful for switch statements and the like.
+///
+/// @param init - A reference to the BitsInit to be decoded.
+/// @return - The field, with the first bit in the BitsInit as the lowest
+/// order bit.
+static uint8_t byteFromBitsInit(BitsInit &init) {
+ int width = init.getNumBits();
+
+ assert(width <= 8 && "Field is too large for uint8_t!");
+
+ int index;
+ uint8_t mask = 0x01;
+
+ uint8_t ret = 0;
+
+ for (index = 0; index < width; index++) {
+ if (cast<BitInit>(init.getBit(index))->getValue())
+ ret |= mask;
+
+ mask <<= 1;
+ }
+
+ return ret;
+}
+
+/// byteFromRec - Extract a value at most 8 bits in with from a Record given the
+/// name of the field.
+///
+/// @param rec - The record from which to extract the value.
+/// @param name - The name of the field in the record.
+/// @return - The field, as translated by byteFromBitsInit().
+static uint8_t byteFromRec(const Record* rec, StringRef name) {
+ BitsInit* bits = rec->getValueAsBitsInit(name);
+ return byteFromBitsInit(*bits);
+}
+
+RecognizableInstrBase::RecognizableInstrBase(const CodeGenInstruction &insn) {
+ const Record *Rec = insn.TheDef;
+ assert(Rec->isSubClassOf("X86Inst") && "Not a X86 Instruction");
+ OpPrefix = byteFromRec(Rec, "OpPrefixBits");
+ OpMap = byteFromRec(Rec, "OpMapBits");
+ Opcode = byteFromRec(Rec, "Opcode");
+ Form = byteFromRec(Rec, "FormBits");
+ Encoding = byteFromRec(Rec, "OpEncBits");
+ OpSize = byteFromRec(Rec, "OpSizeBits");
+ AdSize = byteFromRec(Rec, "AdSizeBits");
+ HasREX_W = Rec->getValueAsBit("hasREX_W");
+ HasVEX_4V = Rec->getValueAsBit("hasVEX_4V");
+ HasVEX_W = Rec->getValueAsBit("HasVEX_W");
+ IgnoresVEX_W = Rec->getValueAsBit("IgnoresVEX_W");
+ IgnoresVEX_L = Rec->getValueAsBit("ignoresVEX_L");
+ HasEVEX_L2 = Rec->getValueAsBit("hasEVEX_L2");
+ HasEVEX_K = Rec->getValueAsBit("hasEVEX_K");
+ HasEVEX_KZ = Rec->getValueAsBit("hasEVEX_Z");
+ HasEVEX_B = Rec->getValueAsBit("hasEVEX_B");
+ IsCodeGenOnly = Rec->getValueAsBit("isCodeGenOnly");
+ IsAsmParserOnly = Rec->getValueAsBit("isAsmParserOnly");
+ ForceDisassemble = Rec->getValueAsBit("ForceDisassemble");
+ CD8_Scale = byteFromRec(Rec, "CD8_Scale");
+ HasVEX_L = Rec->getValueAsBit("hasVEX_L");
+
+ EncodeRC = HasEVEX_B &&
+ (Form == X86Local::MRMDestReg || Form == X86Local::MRMSrcReg);
+}
+
+bool RecognizableInstrBase::shouldBeEmitted() const {
+ return Form != X86Local::Pseudo && (!IsCodeGenOnly || ForceDisassemble) &&
+ !IsAsmParserOnly;
+}
+
+RecognizableInstr::RecognizableInstr(DisassemblerTables &tables,
+ const CodeGenInstruction &insn,
+ InstrUID uid)
+ : RecognizableInstrBase(insn), Rec(insn.TheDef), Name(Rec->getName().str()),
+ Is32Bit(false), Is64Bit(false), Operands(&insn.Operands.OperandList),
+ UID(uid), Spec(&tables.specForUID(uid)) {
+ // Check for 64-bit inst which does not require REX
+ // FIXME: Is there some better way to check for In64BitMode?
+ std::vector<Record *> Predicates = Rec->getValueAsListOfDefs("Predicates");
+ for (unsigned i = 0, e = Predicates.size(); i != e; ++i) {
+ if (Predicates[i]->getName().contains("Not64Bit") ||
+ Predicates[i]->getName().contains("In32Bit")) {
+ Is32Bit = true;
+ break;
+ }
+ if (Predicates[i]->getName().contains("In64Bit")) {
+ Is64Bit = true;
+ break;
+ }
+ }
+}
+
+void RecognizableInstr::processInstr(DisassemblerTables &tables,
+ const CodeGenInstruction &insn,
+ InstrUID uid) {
+ if (!insn.TheDef->isSubClassOf("X86Inst"))
+ return;
+ RecognizableInstr recogInstr(tables, insn, uid);
+
+ if (!recogInstr.shouldBeEmitted())
+ return;
+ recogInstr.emitInstructionSpecifier();
+ recogInstr.emitDecodePath(tables);
+}
+
+#define EVEX_KB(n) (HasEVEX_KZ && HasEVEX_B ? n##_KZ_B : \
+ (HasEVEX_K && HasEVEX_B ? n##_K_B : \
+ (HasEVEX_KZ ? n##_KZ : \
+ (HasEVEX_K? n##_K : (HasEVEX_B ? n##_B : n)))))
+
+InstructionContext RecognizableInstr::insnContext() const {
+ InstructionContext insnContext;
+
+ if (Encoding == X86Local::EVEX) {
+ if (HasVEX_L && HasEVEX_L2) {
+ errs() << "Don't support VEX.L if EVEX_L2 is enabled: " << Name << "\n";
+ llvm_unreachable("Don't support VEX.L if EVEX_L2 is enabled");
+ }
+ // VEX_L & VEX_W
+ if (!EncodeRC && HasVEX_L && HasVEX_W) {
+ if (OpPrefix == X86Local::PD)
+ insnContext = EVEX_KB(IC_EVEX_L_W_OPSIZE);
+ else if (OpPrefix == X86Local::XS)
+ insnContext = EVEX_KB(IC_EVEX_L_W_XS);
+ else if (OpPrefix == X86Local::XD)
+ insnContext = EVEX_KB(IC_EVEX_L_W_XD);
+ else if (OpPrefix == X86Local::PS)
+ insnContext = EVEX_KB(IC_EVEX_L_W);
+ else {
+ errs() << "Instruction does not use a prefix: " << Name << "\n";
+ llvm_unreachable("Invalid prefix");
+ }
+ } else if (!EncodeRC && HasVEX_L) {
+ // VEX_L
+ if (OpPrefix == X86Local::PD)
+ insnContext = EVEX_KB(IC_EVEX_L_OPSIZE);
+ else if (OpPrefix == X86Local::XS)
+ insnContext = EVEX_KB(IC_EVEX_L_XS);
+ else if (OpPrefix == X86Local::XD)
+ insnContext = EVEX_KB(IC_EVEX_L_XD);
+ else if (OpPrefix == X86Local::PS)
+ insnContext = EVEX_KB(IC_EVEX_L);
+ else {
+ errs() << "Instruction does not use a prefix: " << Name << "\n";
+ llvm_unreachable("Invalid prefix");
+ }
+ } else if (!EncodeRC && HasEVEX_L2 && HasVEX_W) {
+ // EVEX_L2 & VEX_W
+ if (OpPrefix == X86Local::PD)
+ insnContext = EVEX_KB(IC_EVEX_L2_W_OPSIZE);
+ else if (OpPrefix == X86Local::XS)
+ insnContext = EVEX_KB(IC_EVEX_L2_W_XS);
+ else if (OpPrefix == X86Local::XD)
+ insnContext = EVEX_KB(IC_EVEX_L2_W_XD);
+ else if (OpPrefix == X86Local::PS)
+ insnContext = EVEX_KB(IC_EVEX_L2_W);
+ else {
+ errs() << "Instruction does not use a prefix: " << Name << "\n";
+ llvm_unreachable("Invalid prefix");
+ }
+ } else if (!EncodeRC && HasEVEX_L2) {
+ // EVEX_L2
+ if (OpPrefix == X86Local::PD)
+ insnContext = EVEX_KB(IC_EVEX_L2_OPSIZE);
+ else if (OpPrefix == X86Local::XD)
+ insnContext = EVEX_KB(IC_EVEX_L2_XD);
+ else if (OpPrefix == X86Local::XS)
+ insnContext = EVEX_KB(IC_EVEX_L2_XS);
+ else if (OpPrefix == X86Local::PS)
+ insnContext = EVEX_KB(IC_EVEX_L2);
+ else {
+ errs() << "Instruction does not use a prefix: " << Name << "\n";
+ llvm_unreachable("Invalid prefix");
+ }
+ }
+ else if (HasVEX_W) {
+ // VEX_W
+ if (OpPrefix == X86Local::PD)
+ insnContext = EVEX_KB(IC_EVEX_W_OPSIZE);
+ else if (OpPrefix == X86Local::XS)
+ insnContext = EVEX_KB(IC_EVEX_W_XS);
+ else if (OpPrefix == X86Local::XD)
+ insnContext = EVEX_KB(IC_EVEX_W_XD);
+ else if (OpPrefix == X86Local::PS)
+ insnContext = EVEX_KB(IC_EVEX_W);
+ else {
+ errs() << "Instruction does not use a prefix: " << Name << "\n";
+ llvm_unreachable("Invalid prefix");
+ }
+ }
+ // No L, no W
+ else if (OpPrefix == X86Local::PD)
+ insnContext = EVEX_KB(IC_EVEX_OPSIZE);
+ else if (OpPrefix == X86Local::XD)
+ insnContext = EVEX_KB(IC_EVEX_XD);
+ else if (OpPrefix == X86Local::XS)
+ insnContext = EVEX_KB(IC_EVEX_XS);
+ else if (OpPrefix == X86Local::PS)
+ insnContext = EVEX_KB(IC_EVEX);
+ else {
+ errs() << "Instruction does not use a prefix: " << Name << "\n";
+ llvm_unreachable("Invalid prefix");
+ }
+ /// eof EVEX
+ } else if (Encoding == X86Local::VEX || Encoding == X86Local::XOP) {
+ if (HasVEX_L && HasVEX_W) {
+ if (OpPrefix == X86Local::PD)
+ insnContext = IC_VEX_L_W_OPSIZE;
+ else if (OpPrefix == X86Local::XS)
+ insnContext = IC_VEX_L_W_XS;
+ else if (OpPrefix == X86Local::XD)
+ insnContext = IC_VEX_L_W_XD;
+ else if (OpPrefix == X86Local::PS)
+ insnContext = IC_VEX_L_W;
+ else {
+ errs() << "Instruction does not use a prefix: " << Name << "\n";
+ llvm_unreachable("Invalid prefix");
+ }
+ } else if (OpPrefix == X86Local::PD && HasVEX_L)
+ insnContext = IC_VEX_L_OPSIZE;
+ else if (OpPrefix == X86Local::PD && HasVEX_W)
+ insnContext = IC_VEX_W_OPSIZE;
+ else if (OpPrefix == X86Local::PD)
+ insnContext = IC_VEX_OPSIZE;
+ else if (HasVEX_L && OpPrefix == X86Local::XS)
+ insnContext = IC_VEX_L_XS;
+ else if (HasVEX_L && OpPrefix == X86Local::XD)
+ insnContext = IC_VEX_L_XD;
+ else if (HasVEX_W && OpPrefix == X86Local::XS)
+ insnContext = IC_VEX_W_XS;
+ else if (HasVEX_W && OpPrefix == X86Local::XD)
+ insnContext = IC_VEX_W_XD;
+ else if (HasVEX_W && OpPrefix == X86Local::PS)
+ insnContext = IC_VEX_W;
+ else if (HasVEX_L && OpPrefix == X86Local::PS)
+ insnContext = IC_VEX_L;
+ else if (OpPrefix == X86Local::XD)
+ insnContext = IC_VEX_XD;
+ else if (OpPrefix == X86Local::XS)
+ insnContext = IC_VEX_XS;
+ else if (OpPrefix == X86Local::PS)
+ insnContext = IC_VEX;
+ else {
+ errs() << "Instruction does not use a prefix: " << Name << "\n";
+ llvm_unreachable("Invalid prefix");
+ }
+ } else if (Is64Bit || HasREX_W || AdSize == X86Local::AdSize64) {
+ if (HasREX_W && (OpSize == X86Local::OpSize16 || OpPrefix == X86Local::PD))
+ insnContext = IC_64BIT_REXW_OPSIZE;
+ else if (HasREX_W && AdSize == X86Local::AdSize32)
+ insnContext = IC_64BIT_REXW_ADSIZE;
+ else if (OpSize == X86Local::OpSize16 && OpPrefix == X86Local::XD)
+ insnContext = IC_64BIT_XD_OPSIZE;
+ else if (OpSize == X86Local::OpSize16 && OpPrefix == X86Local::XS)
+ insnContext = IC_64BIT_XS_OPSIZE;
+ else if (AdSize == X86Local::AdSize32 && OpPrefix == X86Local::PD)
+ insnContext = IC_64BIT_OPSIZE_ADSIZE;
+ else if (OpSize == X86Local::OpSize16 && AdSize == X86Local::AdSize32)
+ insnContext = IC_64BIT_OPSIZE_ADSIZE;
+ else if (OpSize == X86Local::OpSize16 || OpPrefix == X86Local::PD)
+ insnContext = IC_64BIT_OPSIZE;
+ else if (AdSize == X86Local::AdSize32)
+ insnContext = IC_64BIT_ADSIZE;
+ else if (HasREX_W && OpPrefix == X86Local::XS)
+ insnContext = IC_64BIT_REXW_XS;
+ else if (HasREX_W && OpPrefix == X86Local::XD)
+ insnContext = IC_64BIT_REXW_XD;
+ else if (OpPrefix == X86Local::XD)
+ insnContext = IC_64BIT_XD;
+ else if (OpPrefix == X86Local::XS)
+ insnContext = IC_64BIT_XS;
+ else if (HasREX_W)
+ insnContext = IC_64BIT_REXW;
+ else
+ insnContext = IC_64BIT;
+ } else {
+ if (OpSize == X86Local::OpSize16 && OpPrefix == X86Local::XD)
+ insnContext = IC_XD_OPSIZE;
+ else if (OpSize == X86Local::OpSize16 && OpPrefix == X86Local::XS)
+ insnContext = IC_XS_OPSIZE;
+ else if (AdSize == X86Local::AdSize16 && OpPrefix == X86Local::XD)
+ insnContext = IC_XD_ADSIZE;
+ else if (AdSize == X86Local::AdSize16 && OpPrefix == X86Local::XS)
+ insnContext = IC_XS_ADSIZE;
+ else if (AdSize == X86Local::AdSize16 && OpPrefix == X86Local::PD)
+ insnContext = IC_OPSIZE_ADSIZE;
+ else if (OpSize == X86Local::OpSize16 && AdSize == X86Local::AdSize16)
+ insnContext = IC_OPSIZE_ADSIZE;
+ else if (OpSize == X86Local::OpSize16 || OpPrefix == X86Local::PD)
+ insnContext = IC_OPSIZE;
+ else if (AdSize == X86Local::AdSize16)
+ insnContext = IC_ADSIZE;
+ else if (OpPrefix == X86Local::XD)
+ insnContext = IC_XD;
+ else if (OpPrefix == X86Local::XS)
+ insnContext = IC_XS;
+ else
+ insnContext = IC;
+ }
+
+ return insnContext;
+}
+
+void RecognizableInstr::adjustOperandEncoding(OperandEncoding &encoding) {
+ // The scaling factor for AVX512 compressed displacement encoding is an
+ // instruction attribute. Adjust the ModRM encoding type to include the
+ // scale for compressed displacement.
+ if ((encoding != ENCODING_RM &&
+ encoding != ENCODING_VSIB &&
+ encoding != ENCODING_SIB) ||CD8_Scale == 0)
+ return;
+ encoding = (OperandEncoding)(encoding + Log2_32(CD8_Scale));
+ assert(((encoding >= ENCODING_RM && encoding <= ENCODING_RM_CD64) ||
+ (encoding == ENCODING_SIB) ||
+ (encoding >= ENCODING_VSIB && encoding <= ENCODING_VSIB_CD64)) &&
+ "Invalid CDisp scaling");
+}
+
+void RecognizableInstr::handleOperand(bool optional, unsigned &operandIndex,
+ unsigned &physicalOperandIndex,
+ unsigned numPhysicalOperands,
+ const unsigned *operandMapping,
+ OperandEncoding (*encodingFromString)
+ (const std::string&,
+ uint8_t OpSize)) {
+ if (optional) {
+ if (physicalOperandIndex >= numPhysicalOperands)
+ return;
+ } else {
+ assert(physicalOperandIndex < numPhysicalOperands);
+ }
+
+ while (operandMapping[operandIndex] != operandIndex) {
+ Spec->operands[operandIndex].encoding = ENCODING_DUP;
+ Spec->operands[operandIndex].type =
+ (OperandType)(TYPE_DUP0 + operandMapping[operandIndex]);
+ ++operandIndex;
+ }
+
+ StringRef typeName = (*Operands)[operandIndex].Rec->getName();
+
+ OperandEncoding encoding = encodingFromString(std::string(typeName), OpSize);
+ // Adjust the encoding type for an operand based on the instruction.
+ adjustOperandEncoding(encoding);
+ Spec->operands[operandIndex].encoding = encoding;
+ Spec->operands[operandIndex].type =
+ typeFromString(std::string(typeName), HasREX_W, OpSize);
+
+ ++operandIndex;
+ ++physicalOperandIndex;
+}
+
+void RecognizableInstr::emitInstructionSpecifier() {
+ Spec->name = Name;
+
+ Spec->insnContext = insnContext();
+
+ const std::vector<CGIOperandList::OperandInfo> &OperandList = *Operands;
+
+ unsigned numOperands = OperandList.size();
+ unsigned numPhysicalOperands = 0;
+
+ // operandMapping maps from operands in OperandList to their originals.
+ // If operandMapping[i] != i, then the entry is a duplicate.
+ unsigned operandMapping[X86_MAX_OPERANDS];
+ assert(numOperands <= X86_MAX_OPERANDS && "X86_MAX_OPERANDS is not large enough");
+
+ for (unsigned operandIndex = 0; operandIndex < numOperands; ++operandIndex) {
+ if (!OperandList[operandIndex].Constraints.empty()) {
+ const CGIOperandList::ConstraintInfo &Constraint =
+ OperandList[operandIndex].Constraints[0];
+ if (Constraint.isTied()) {
+ operandMapping[operandIndex] = operandIndex;
+ operandMapping[Constraint.getTiedOperand()] = operandIndex;
+ } else {
+ ++numPhysicalOperands;
+ operandMapping[operandIndex] = operandIndex;
+ }
+ } else {
+ ++numPhysicalOperands;
+ operandMapping[operandIndex] = operandIndex;
+ }
+ }
+
+#define HANDLE_OPERAND(class) \
+ handleOperand(false, \
+ operandIndex, \
+ physicalOperandIndex, \
+ numPhysicalOperands, \
+ operandMapping, \
+ class##EncodingFromString);
+
+#define HANDLE_OPTIONAL(class) \
+ handleOperand(true, \
+ operandIndex, \
+ physicalOperandIndex, \
+ numPhysicalOperands, \
+ operandMapping, \
+ class##EncodingFromString);
+
+ // operandIndex should always be < numOperands
+ unsigned operandIndex = 0;
+ // physicalOperandIndex should always be < numPhysicalOperands
+ unsigned physicalOperandIndex = 0;
+
+#ifndef NDEBUG
+ // Given the set of prefix bits, how many additional operands does the
+ // instruction have?
+ unsigned additionalOperands = 0;
+ if (HasVEX_4V)
+ ++additionalOperands;
+ if (HasEVEX_K)
+ ++additionalOperands;
+#endif
+
+ switch (Form) {
+ default: llvm_unreachable("Unhandled form");
+ case X86Local::PrefixByte:
+ return;
+ case X86Local::RawFrmSrc:
+ HANDLE_OPERAND(relocation);
+ return;
+ case X86Local::RawFrmDst:
+ HANDLE_OPERAND(relocation);
+ return;
+ case X86Local::RawFrmDstSrc:
+ HANDLE_OPERAND(relocation);
+ HANDLE_OPERAND(relocation);
+ return;
+ case X86Local::RawFrm:
+ // Operand 1 (optional) is an address or immediate.
+ assert(numPhysicalOperands <= 1 &&
+ "Unexpected number of operands for RawFrm");
+ HANDLE_OPTIONAL(relocation)
+ break;
+ case X86Local::RawFrmMemOffs:
+ // Operand 1 is an address.
+ HANDLE_OPERAND(relocation);
+ break;
+ case X86Local::AddRegFrm:
+ // Operand 1 is added to the opcode.
+ // Operand 2 (optional) is an address.
+ assert(numPhysicalOperands >= 1 && numPhysicalOperands <= 2 &&
+ "Unexpected number of operands for AddRegFrm");
+ HANDLE_OPERAND(opcodeModifier)
+ HANDLE_OPTIONAL(relocation)
+ break;
+ case X86Local::AddCCFrm:
+ // Operand 1 (optional) is an address or immediate.
+ assert(numPhysicalOperands == 2 &&
+ "Unexpected number of operands for AddCCFrm");
+ HANDLE_OPERAND(relocation)
+ HANDLE_OPERAND(opcodeModifier)
+ break;
+ case X86Local::MRMDestReg:
+ // Operand 1 is a register operand in the R/M field.
+ // - In AVX512 there may be a mask operand here -
+ // Operand 2 is a register operand in the Reg/Opcode field.
+ // - In AVX, there is a register operand in the VEX.vvvv field here -
+ // Operand 3 (optional) is an immediate.
+ assert(numPhysicalOperands >= 2 + additionalOperands &&
+ numPhysicalOperands <= 3 + additionalOperands &&
+ "Unexpected number of operands for MRMDestRegFrm");
+
+ HANDLE_OPERAND(rmRegister)
+ if (HasEVEX_K)
+ HANDLE_OPERAND(writemaskRegister)
+
+ if (HasVEX_4V)
+ // FIXME: In AVX, the register below becomes the one encoded
+ // in ModRMVEX and the one above the one in the VEX.VVVV field
+ HANDLE_OPERAND(vvvvRegister)
+
+ HANDLE_OPERAND(roRegister)
+ HANDLE_OPTIONAL(immediate)
+ break;
+ case X86Local::MRMDestMem4VOp3CC:
+ // Operand 1 is a register operand in the Reg/Opcode field.
+ // Operand 2 is a register operand in the R/M field.
+ // Operand 3 is VEX.vvvv
+ // Operand 4 is condition code.
+ assert(numPhysicalOperands == 4 &&
+ "Unexpected number of operands for MRMDestMem4VOp3CC");
+ HANDLE_OPERAND(roRegister)
+ HANDLE_OPERAND(memory)
+ HANDLE_OPERAND(vvvvRegister)
+ HANDLE_OPERAND(opcodeModifier)
+ break;
+ case X86Local::MRMDestMem:
+ case X86Local::MRMDestMemFSIB:
+ // Operand 1 is a memory operand (possibly SIB-extended)
+ // Operand 2 is a register operand in the Reg/Opcode field.
+ // - In AVX, there is a register operand in the VEX.vvvv field here -
+ // Operand 3 (optional) is an immediate.
+ assert(numPhysicalOperands >= 2 + additionalOperands &&
+ numPhysicalOperands <= 3 + additionalOperands &&
+ "Unexpected number of operands for MRMDestMemFrm with VEX_4V");
+
+ HANDLE_OPERAND(memory)
+
+ if (HasEVEX_K)
+ HANDLE_OPERAND(writemaskRegister)
+
+ if (HasVEX_4V)
+ // FIXME: In AVX, the register below becomes the one encoded
+ // in ModRMVEX and the one above the one in the VEX.VVVV field
+ HANDLE_OPERAND(vvvvRegister)
+
+ HANDLE_OPERAND(roRegister)
+ HANDLE_OPTIONAL(immediate)
+ break;
+ case X86Local::MRMSrcReg:
+ // Operand 1 is a register operand in the Reg/Opcode field.
+ // Operand 2 is a register operand in the R/M field.
+ // - In AVX, there is a register operand in the VEX.vvvv field here -
+ // Operand 3 (optional) is an immediate.
+ // Operand 4 (optional) is an immediate.
+
+ assert(numPhysicalOperands >= 2 + additionalOperands &&
+ numPhysicalOperands <= 4 + additionalOperands &&
+ "Unexpected number of operands for MRMSrcRegFrm");
+
+ HANDLE_OPERAND(roRegister)
+
+ if (HasEVEX_K)
+ HANDLE_OPERAND(writemaskRegister)
+
+ if (HasVEX_4V)
+ // FIXME: In AVX, the register below becomes the one encoded
+ // in ModRMVEX and the one above the one in the VEX.VVVV field
+ HANDLE_OPERAND(vvvvRegister)
+
+ HANDLE_OPERAND(rmRegister)
+ HANDLE_OPTIONAL(immediate)
+ HANDLE_OPTIONAL(immediate) // above might be a register in 7:4
+ break;
+ case X86Local::MRMSrcReg4VOp3:
+ assert(numPhysicalOperands == 3 &&
+ "Unexpected number of operands for MRMSrcReg4VOp3Frm");
+ HANDLE_OPERAND(roRegister)
+ HANDLE_OPERAND(rmRegister)
+ HANDLE_OPERAND(vvvvRegister)
+ break;
+ case X86Local::MRMSrcRegOp4:
+ assert(numPhysicalOperands >= 4 && numPhysicalOperands <= 5 &&
+ "Unexpected number of operands for MRMSrcRegOp4Frm");
+ HANDLE_OPERAND(roRegister)
+ HANDLE_OPERAND(vvvvRegister)
+ HANDLE_OPERAND(immediate) // Register in imm[7:4]
+ HANDLE_OPERAND(rmRegister)
+ HANDLE_OPTIONAL(immediate)
+ break;
+ case X86Local::MRMSrcRegCC:
+ assert(numPhysicalOperands == 3 &&
+ "Unexpected number of operands for MRMSrcRegCC");
+ HANDLE_OPERAND(roRegister)
+ HANDLE_OPERAND(rmRegister)
+ HANDLE_OPERAND(opcodeModifier)
+ break;
+ case X86Local::MRMSrcMem:
+ case X86Local::MRMSrcMemFSIB:
+ // Operand 1 is a register operand in the Reg/Opcode field.
+ // Operand 2 is a memory operand (possibly SIB-extended)
+ // - In AVX, there is a register operand in the VEX.vvvv field here -
+ // Operand 3 (optional) is an immediate.
+
+ assert(numPhysicalOperands >= 2 + additionalOperands &&
+ numPhysicalOperands <= 4 + additionalOperands &&
+ "Unexpected number of operands for MRMSrcMemFrm");
+
+ HANDLE_OPERAND(roRegister)
+
+ if (HasEVEX_K)
+ HANDLE_OPERAND(writemaskRegister)
+
+ if (HasVEX_4V)
+ // FIXME: In AVX, the register below becomes the one encoded
+ // in ModRMVEX and the one above the one in the VEX.VVVV field
+ HANDLE_OPERAND(vvvvRegister)
+
+ HANDLE_OPERAND(memory)
+ HANDLE_OPTIONAL(immediate)
+ HANDLE_OPTIONAL(immediate) // above might be a register in 7:4
+ break;
+ case X86Local::MRMSrcMem4VOp3:
+ assert(numPhysicalOperands == 3 &&
+ "Unexpected number of operands for MRMSrcMem4VOp3Frm");
+ HANDLE_OPERAND(roRegister)
+ HANDLE_OPERAND(memory)
+ HANDLE_OPERAND(vvvvRegister)
+ break;
+ case X86Local::MRMSrcMemOp4:
+ assert(numPhysicalOperands >= 4 && numPhysicalOperands <= 5 &&
+ "Unexpected number of operands for MRMSrcMemOp4Frm");
+ HANDLE_OPERAND(roRegister)
+ HANDLE_OPERAND(vvvvRegister)
+ HANDLE_OPERAND(immediate) // Register in imm[7:4]
+ HANDLE_OPERAND(memory)
+ HANDLE_OPTIONAL(immediate)
+ break;
+ case X86Local::MRMSrcMemCC:
+ assert(numPhysicalOperands == 3 &&
+ "Unexpected number of operands for MRMSrcMemCC");
+ HANDLE_OPERAND(roRegister)
+ HANDLE_OPERAND(memory)
+ HANDLE_OPERAND(opcodeModifier)
+ break;
+ case X86Local::MRMXrCC:
+ assert(numPhysicalOperands == 2 &&
+ "Unexpected number of operands for MRMXrCC");
+ HANDLE_OPERAND(rmRegister)
+ HANDLE_OPERAND(opcodeModifier)
+ break;
+ case X86Local::MRMr0:
+ // Operand 1 is a register operand in the R/M field.
+ HANDLE_OPERAND(roRegister)
+ break;
+ case X86Local::MRMXr:
+ case X86Local::MRM0r:
+ case X86Local::MRM1r:
+ case X86Local::MRM2r:
+ case X86Local::MRM3r:
+ case X86Local::MRM4r:
+ case X86Local::MRM5r:
+ case X86Local::MRM6r:
+ case X86Local::MRM7r:
+ // Operand 1 is a register operand in the R/M field.
+ // Operand 2 (optional) is an immediate or relocation.
+ // Operand 3 (optional) is an immediate.
+ assert(numPhysicalOperands >= 0 + additionalOperands &&
+ numPhysicalOperands <= 3 + additionalOperands &&
+ "Unexpected number of operands for MRMnr");
+
+ if (HasVEX_4V)
+ HANDLE_OPERAND(vvvvRegister)
+
+ if (HasEVEX_K)
+ HANDLE_OPERAND(writemaskRegister)
+ HANDLE_OPTIONAL(rmRegister)
+ HANDLE_OPTIONAL(relocation)
+ HANDLE_OPTIONAL(immediate)
+ break;
+ case X86Local::MRMXmCC:
+ assert(numPhysicalOperands == 2 &&
+ "Unexpected number of operands for MRMXm");
+ HANDLE_OPERAND(memory)
+ HANDLE_OPERAND(opcodeModifier)
+ break;
+ case X86Local::MRMXm:
+ case X86Local::MRM0m:
+ case X86Local::MRM1m:
+ case X86Local::MRM2m:
+ case X86Local::MRM3m:
+ case X86Local::MRM4m:
+ case X86Local::MRM5m:
+ case X86Local::MRM6m:
+ case X86Local::MRM7m:
+ // Operand 1 is a memory operand (possibly SIB-extended)
+ // Operand 2 (optional) is an immediate or relocation.
+ assert(numPhysicalOperands >= 1 + additionalOperands &&
+ numPhysicalOperands <= 2 + additionalOperands &&
+ "Unexpected number of operands for MRMnm");
+
+ if (HasVEX_4V)
+ HANDLE_OPERAND(vvvvRegister)
+ if (HasEVEX_K)
+ HANDLE_OPERAND(writemaskRegister)
+ HANDLE_OPERAND(memory)
+ HANDLE_OPTIONAL(relocation)
+ break;
+ case X86Local::RawFrmImm8:
+ // operand 1 is a 16-bit immediate
+ // operand 2 is an 8-bit immediate
+ assert(numPhysicalOperands == 2 &&
+ "Unexpected number of operands for X86Local::RawFrmImm8");
+ HANDLE_OPERAND(immediate)
+ HANDLE_OPERAND(immediate)
+ break;
+ case X86Local::RawFrmImm16:
+ // operand 1 is a 16-bit immediate
+ // operand 2 is a 16-bit immediate
+ HANDLE_OPERAND(immediate)
+ HANDLE_OPERAND(immediate)
+ break;
+ case X86Local::MRM0X:
+ case X86Local::MRM1X:
+ case X86Local::MRM2X:
+ case X86Local::MRM3X:
+ case X86Local::MRM4X:
+ case X86Local::MRM5X:
+ case X86Local::MRM6X:
+ case X86Local::MRM7X:
+#define MAP(from, to) case X86Local::MRM_##from:
+ X86_INSTR_MRM_MAPPING
+#undef MAP
+ HANDLE_OPTIONAL(relocation)
+ break;
+ }
+
+#undef HANDLE_OPERAND
+#undef HANDLE_OPTIONAL
+}
+
+void RecognizableInstr::emitDecodePath(DisassemblerTables &tables) const {
+ // Special cases where the LLVM tables are not complete
+
+#define MAP(from, to) \
+ case X86Local::MRM_##from:
+
+ std::optional<OpcodeType> opcodeType;
+ switch (OpMap) {
+ default: llvm_unreachable("Invalid map!");
+ case X86Local::OB: opcodeType = ONEBYTE; break;
+ case X86Local::TB: opcodeType = TWOBYTE; break;
+ case X86Local::T8: opcodeType = THREEBYTE_38; break;
+ case X86Local::TA: opcodeType = THREEBYTE_3A; break;
+ case X86Local::XOP8: opcodeType = XOP8_MAP; break;
+ case X86Local::XOP9: opcodeType = XOP9_MAP; break;
+ case X86Local::XOPA: opcodeType = XOPA_MAP; break;
+ case X86Local::ThreeDNow: opcodeType = THREEDNOW_MAP; break;
+ case X86Local::T_MAP5: opcodeType = MAP5; break;
+ case X86Local::T_MAP6: opcodeType = MAP6; break;
+ }
+
+ std::unique_ptr<ModRMFilter> filter;
+ switch (Form) {
+ default: llvm_unreachable("Invalid form!");
+ case X86Local::Pseudo: llvm_unreachable("Pseudo should not be emitted!");
+ case X86Local::RawFrm:
+ case X86Local::AddRegFrm:
+ case X86Local::RawFrmMemOffs:
+ case X86Local::RawFrmSrc:
+ case X86Local::RawFrmDst:
+ case X86Local::RawFrmDstSrc:
+ case X86Local::RawFrmImm8:
+ case X86Local::RawFrmImm16:
+ case X86Local::AddCCFrm:
+ case X86Local::PrefixByte:
+ filter = std::make_unique<DumbFilter>();
+ break;
+ case X86Local::MRMDestReg:
+ case X86Local::MRMSrcReg:
+ case X86Local::MRMSrcReg4VOp3:
+ case X86Local::MRMSrcRegOp4:
+ case X86Local::MRMSrcRegCC:
+ case X86Local::MRMXrCC:
+ case X86Local::MRMXr:
+ filter = std::make_unique<ModFilter>(true);
+ break;
+ case X86Local::MRMDestMem:
+ case X86Local::MRMDestMem4VOp3CC:
+ case X86Local::MRMDestMemFSIB:
+ case X86Local::MRMSrcMem:
+ case X86Local::MRMSrcMemFSIB:
+ case X86Local::MRMSrcMem4VOp3:
+ case X86Local::MRMSrcMemOp4:
+ case X86Local::MRMSrcMemCC:
+ case X86Local::MRMXmCC:
+ case X86Local::MRMXm:
+ filter = std::make_unique<ModFilter>(false);
+ break;
+ case X86Local::MRM0r: case X86Local::MRM1r:
+ case X86Local::MRM2r: case X86Local::MRM3r:
+ case X86Local::MRM4r: case X86Local::MRM5r:
+ case X86Local::MRM6r: case X86Local::MRM7r:
+ filter = std::make_unique<ExtendedFilter>(true, Form - X86Local::MRM0r);
+ break;
+ case X86Local::MRM0X: case X86Local::MRM1X:
+ case X86Local::MRM2X: case X86Local::MRM3X:
+ case X86Local::MRM4X: case X86Local::MRM5X:
+ case X86Local::MRM6X: case X86Local::MRM7X:
+ filter = std::make_unique<ExtendedFilter>(true, Form - X86Local::MRM0X);
+ break;
+ case X86Local::MRMr0:
+ filter = std::make_unique<ExtendedRMFilter>(true, Form - X86Local::MRMr0);
+ break;
+ case X86Local::MRM0m: case X86Local::MRM1m:
+ case X86Local::MRM2m: case X86Local::MRM3m:
+ case X86Local::MRM4m: case X86Local::MRM5m:
+ case X86Local::MRM6m: case X86Local::MRM7m:
+ filter = std::make_unique<ExtendedFilter>(false, Form - X86Local::MRM0m);
+ break;
+ X86_INSTR_MRM_MAPPING
+ filter = std::make_unique<ExactFilter>(0xC0 + Form - X86Local::MRM_C0);
+ break;
+ } // switch (Form)
+
+ uint8_t opcodeToSet = Opcode;
+
+ unsigned AddressSize = 0;
+ switch (AdSize) {
+ case X86Local::AdSize16: AddressSize = 16; break;
+ case X86Local::AdSize32: AddressSize = 32; break;
+ case X86Local::AdSize64: AddressSize = 64; break;
+ }
+
+ assert(opcodeType && "Opcode type not set");
+ assert(filter && "Filter not set");
+
+ if (Form == X86Local::AddRegFrm || Form == X86Local::MRMSrcRegCC ||
+ Form == X86Local::MRMSrcMemCC || Form == X86Local::MRMXrCC ||
+ Form == X86Local::MRMXmCC || Form == X86Local::AddCCFrm ||
+ Form == X86Local::MRMDestMem4VOp3CC) {
+ uint8_t Count = Form == X86Local::AddRegFrm ? 8 : 16;
+ assert(((opcodeToSet % Count) == 0) && "ADDREG_FRM opcode not aligned");
+
+ uint8_t currentOpcode;
+
+ for (currentOpcode = opcodeToSet;
+ currentOpcode < (uint8_t)(opcodeToSet + Count); ++currentOpcode)
+ tables.setTableFields(*opcodeType, insnContext(), currentOpcode, *filter,
+ UID, Is32Bit, OpPrefix == 0,
+ IgnoresVEX_L || EncodeRC,
+ IgnoresVEX_W, AddressSize);
+ } else {
+ tables.setTableFields(*opcodeType, insnContext(), opcodeToSet, *filter, UID,
+ Is32Bit, OpPrefix == 0, IgnoresVEX_L || EncodeRC,
+ IgnoresVEX_W, AddressSize);
+ }
+
+#undef MAP
+}
+
+#define TYPE(str, type) if (s == str) return type;
+OperandType RecognizableInstr::typeFromString(const std::string &s,
+ bool hasREX_W,
+ uint8_t OpSize) {
+ if(hasREX_W) {
+ // For instructions with a REX_W prefix, a declared 32-bit register encoding
+ // is special.
+ TYPE("GR32", TYPE_R32)
+ }
+ if(OpSize == X86Local::OpSize16) {
+ // For OpSize16 instructions, a declared 16-bit register or
+ // immediate encoding is special.
+ TYPE("GR16", TYPE_Rv)
+ } else if(OpSize == X86Local::OpSize32) {
+ // For OpSize32 instructions, a declared 32-bit register or
+ // immediate encoding is special.
+ TYPE("GR32", TYPE_Rv)
+ }
+ TYPE("i16mem", TYPE_M)
+ TYPE("i16imm", TYPE_IMM)
+ TYPE("i16i8imm", TYPE_IMM)
+ TYPE("GR16", TYPE_R16)
+ TYPE("GR16orGR32orGR64", TYPE_R16)
+ TYPE("i32mem", TYPE_M)
+ TYPE("i32imm", TYPE_IMM)
+ TYPE("i32i8imm", TYPE_IMM)
+ TYPE("GR32", TYPE_R32)
+ TYPE("GR32orGR64", TYPE_R32)
+ TYPE("i64mem", TYPE_M)
+ TYPE("i64i32imm", TYPE_IMM)
+ TYPE("i64i8imm", TYPE_IMM)
+ TYPE("GR64", TYPE_R64)
+ TYPE("i8mem", TYPE_M)
+ TYPE("i8imm", TYPE_IMM)
+ TYPE("u4imm", TYPE_UIMM8)
+ TYPE("u8imm", TYPE_UIMM8)
+ TYPE("i16u8imm", TYPE_UIMM8)
+ TYPE("i32u8imm", TYPE_UIMM8)
+ TYPE("i64u8imm", TYPE_UIMM8)
+ TYPE("GR8", TYPE_R8)
+ TYPE("VR128", TYPE_XMM)
+ TYPE("VR128X", TYPE_XMM)
+ TYPE("f128mem", TYPE_M)
+ TYPE("f256mem", TYPE_M)
+ TYPE("f512mem", TYPE_M)
+ TYPE("FR128", TYPE_XMM)
+ TYPE("FR64", TYPE_XMM)
+ TYPE("FR64X", TYPE_XMM)
+ TYPE("f64mem", TYPE_M)
+ TYPE("sdmem", TYPE_M)
+ TYPE("FR16X", TYPE_XMM)
+ TYPE("FR32", TYPE_XMM)
+ TYPE("FR32X", TYPE_XMM)
+ TYPE("f32mem", TYPE_M)
+ TYPE("f16mem", TYPE_M)
+ TYPE("ssmem", TYPE_M)
+ TYPE("shmem", TYPE_M)
+ TYPE("RST", TYPE_ST)
+ TYPE("RSTi", TYPE_ST)
+ TYPE("i128mem", TYPE_M)
+ TYPE("i256mem", TYPE_M)
+ TYPE("i512mem", TYPE_M)
+ TYPE("i64i32imm_brtarget", TYPE_REL)
+ TYPE("i16imm_brtarget", TYPE_REL)
+ TYPE("i32imm_brtarget", TYPE_REL)
+ TYPE("ccode", TYPE_IMM)
+ TYPE("AVX512RC", TYPE_IMM)
+ TYPE("brtarget32", TYPE_REL)
+ TYPE("brtarget16", TYPE_REL)
+ TYPE("brtarget8", TYPE_REL)
+ TYPE("f80mem", TYPE_M)
+ TYPE("lea64_32mem", TYPE_M)
+ TYPE("lea64mem", TYPE_M)
+ TYPE("VR64", TYPE_MM64)
+ TYPE("i64imm", TYPE_IMM)
+ TYPE("anymem", TYPE_M)
+ TYPE("opaquemem", TYPE_M)
+ TYPE("sibmem", TYPE_MSIB)
+ TYPE("SEGMENT_REG", TYPE_SEGMENTREG)
+ TYPE("DEBUG_REG", TYPE_DEBUGREG)
+ TYPE("CONTROL_REG", TYPE_CONTROLREG)
+ TYPE("srcidx8", TYPE_SRCIDX)
+ TYPE("srcidx16", TYPE_SRCIDX)
+ TYPE("srcidx32", TYPE_SRCIDX)
+ TYPE("srcidx64", TYPE_SRCIDX)
+ TYPE("dstidx8", TYPE_DSTIDX)
+ TYPE("dstidx16", TYPE_DSTIDX)
+ TYPE("dstidx32", TYPE_DSTIDX)
+ TYPE("dstidx64", TYPE_DSTIDX)
+ TYPE("offset16_8", TYPE_MOFFS)
+ TYPE("offset16_16", TYPE_MOFFS)
+ TYPE("offset16_32", TYPE_MOFFS)
+ TYPE("offset32_8", TYPE_MOFFS)
+ TYPE("offset32_16", TYPE_MOFFS)
+ TYPE("offset32_32", TYPE_MOFFS)
+ TYPE("offset32_64", TYPE_MOFFS)
+ TYPE("offset64_8", TYPE_MOFFS)
+ TYPE("offset64_16", TYPE_MOFFS)
+ TYPE("offset64_32", TYPE_MOFFS)
+ TYPE("offset64_64", TYPE_MOFFS)
+ TYPE("VR256", TYPE_YMM)
+ TYPE("VR256X", TYPE_YMM)
+ TYPE("VR512", TYPE_ZMM)
+ TYPE("VK1", TYPE_VK)
+ TYPE("VK1WM", TYPE_VK)
+ TYPE("VK2", TYPE_VK)
+ TYPE("VK2WM", TYPE_VK)
+ TYPE("VK4", TYPE_VK)
+ TYPE("VK4WM", TYPE_VK)
+ TYPE("VK8", TYPE_VK)
+ TYPE("VK8WM", TYPE_VK)
+ TYPE("VK16", TYPE_VK)
+ TYPE("VK16WM", TYPE_VK)
+ TYPE("VK32", TYPE_VK)
+ TYPE("VK32WM", TYPE_VK)
+ TYPE("VK64", TYPE_VK)
+ TYPE("VK64WM", TYPE_VK)
+ TYPE("VK1Pair", TYPE_VK_PAIR)
+ TYPE("VK2Pair", TYPE_VK_PAIR)
+ TYPE("VK4Pair", TYPE_VK_PAIR)
+ TYPE("VK8Pair", TYPE_VK_PAIR)
+ TYPE("VK16Pair", TYPE_VK_PAIR)
+ TYPE("vx64mem", TYPE_MVSIBX)
+ TYPE("vx128mem", TYPE_MVSIBX)
+ TYPE("vx256mem", TYPE_MVSIBX)
+ TYPE("vy128mem", TYPE_MVSIBY)
+ TYPE("vy256mem", TYPE_MVSIBY)
+ TYPE("vx64xmem", TYPE_MVSIBX)
+ TYPE("vx128xmem", TYPE_MVSIBX)
+ TYPE("vx256xmem", TYPE_MVSIBX)
+ TYPE("vy128xmem", TYPE_MVSIBY)
+ TYPE("vy256xmem", TYPE_MVSIBY)
+ TYPE("vy512xmem", TYPE_MVSIBY)
+ TYPE("vz256mem", TYPE_MVSIBZ)
+ TYPE("vz512mem", TYPE_MVSIBZ)
+ TYPE("BNDR", TYPE_BNDR)
+ TYPE("TILE", TYPE_TMM)
+ errs() << "Unhandled type string " << s << "\n";
+ llvm_unreachable("Unhandled type string");
+}
+#undef TYPE
+
+#define ENCODING(str, encoding) if (s == str) return encoding;
+OperandEncoding
+RecognizableInstr::immediateEncodingFromString(const std::string &s,
+ uint8_t OpSize) {
+ if(OpSize != X86Local::OpSize16) {
+ // For instructions without an OpSize prefix, a declared 16-bit register or
+ // immediate encoding is special.
+ ENCODING("i16imm", ENCODING_IW)
+ }
+ ENCODING("i32i8imm", ENCODING_IB)
+ ENCODING("AVX512RC", ENCODING_IRC)
+ ENCODING("i16imm", ENCODING_Iv)
+ ENCODING("i16i8imm", ENCODING_IB)
+ ENCODING("i32imm", ENCODING_Iv)
+ ENCODING("i64i32imm", ENCODING_ID)
+ ENCODING("i64i8imm", ENCODING_IB)
+ ENCODING("i8imm", ENCODING_IB)
+ ENCODING("u4imm", ENCODING_IB)
+ ENCODING("u8imm", ENCODING_IB)
+ ENCODING("i16u8imm", ENCODING_IB)
+ ENCODING("i32u8imm", ENCODING_IB)
+ ENCODING("i64u8imm", ENCODING_IB)
+ // This is not a typo. Instructions like BLENDVPD put
+ // register IDs in 8-bit immediates nowadays.
+ ENCODING("FR32", ENCODING_IB)
+ ENCODING("FR64", ENCODING_IB)
+ ENCODING("FR128", ENCODING_IB)
+ ENCODING("VR128", ENCODING_IB)
+ ENCODING("VR256", ENCODING_IB)
+ ENCODING("FR16X", ENCODING_IB)
+ ENCODING("FR32X", ENCODING_IB)
+ ENCODING("FR64X", ENCODING_IB)
+ ENCODING("VR128X", ENCODING_IB)
+ ENCODING("VR256X", ENCODING_IB)
+ ENCODING("VR512", ENCODING_IB)
+ ENCODING("TILE", ENCODING_IB)
+ errs() << "Unhandled immediate encoding " << s << "\n";
+ llvm_unreachable("Unhandled immediate encoding");
+}
+
+OperandEncoding
+RecognizableInstr::rmRegisterEncodingFromString(const std::string &s,
+ uint8_t OpSize) {
+ ENCODING("RST", ENCODING_FP)
+ ENCODING("RSTi", ENCODING_FP)
+ ENCODING("GR16", ENCODING_RM)
+ ENCODING("GR16orGR32orGR64",ENCODING_RM)
+ ENCODING("GR32", ENCODING_RM)
+ ENCODING("GR32orGR64", ENCODING_RM)
+ ENCODING("GR64", ENCODING_RM)
+ ENCODING("GR8", ENCODING_RM)
+ ENCODING("VR128", ENCODING_RM)
+ ENCODING("VR128X", ENCODING_RM)
+ ENCODING("FR128", ENCODING_RM)
+ ENCODING("FR64", ENCODING_RM)
+ ENCODING("FR32", ENCODING_RM)
+ ENCODING("FR64X", ENCODING_RM)
+ ENCODING("FR32X", ENCODING_RM)
+ ENCODING("FR16X", ENCODING_RM)
+ ENCODING("VR64", ENCODING_RM)
+ ENCODING("VR256", ENCODING_RM)
+ ENCODING("VR256X", ENCODING_RM)
+ ENCODING("VR512", ENCODING_RM)
+ ENCODING("VK1", ENCODING_RM)
+ ENCODING("VK2", ENCODING_RM)
+ ENCODING("VK4", ENCODING_RM)
+ ENCODING("VK8", ENCODING_RM)
+ ENCODING("VK16", ENCODING_RM)
+ ENCODING("VK32", ENCODING_RM)
+ ENCODING("VK64", ENCODING_RM)
+ ENCODING("BNDR", ENCODING_RM)
+ ENCODING("TILE", ENCODING_RM)
+ errs() << "Unhandled R/M register encoding " << s << "\n";
+ llvm_unreachable("Unhandled R/M register encoding");
+}
+
+OperandEncoding
+RecognizableInstr::roRegisterEncodingFromString(const std::string &s,
+ uint8_t OpSize) {
+ ENCODING("GR16", ENCODING_REG)
+ ENCODING("GR16orGR32orGR64",ENCODING_REG)
+ ENCODING("GR32", ENCODING_REG)
+ ENCODING("GR32orGR64", ENCODING_REG)
+ ENCODING("GR64", ENCODING_REG)
+ ENCODING("GR8", ENCODING_REG)
+ ENCODING("VR128", ENCODING_REG)
+ ENCODING("FR128", ENCODING_REG)
+ ENCODING("FR64", ENCODING_REG)
+ ENCODING("FR32", ENCODING_REG)
+ ENCODING("VR64", ENCODING_REG)
+ ENCODING("SEGMENT_REG", ENCODING_REG)
+ ENCODING("DEBUG_REG", ENCODING_REG)
+ ENCODING("CONTROL_REG", ENCODING_REG)
+ ENCODING("VR256", ENCODING_REG)
+ ENCODING("VR256X", ENCODING_REG)
+ ENCODING("VR128X", ENCODING_REG)
+ ENCODING("FR64X", ENCODING_REG)
+ ENCODING("FR32X", ENCODING_REG)
+ ENCODING("FR16X", ENCODING_REG)
+ ENCODING("VR512", ENCODING_REG)
+ ENCODING("VK1", ENCODING_REG)
+ ENCODING("VK2", ENCODING_REG)
+ ENCODING("VK4", ENCODING_REG)
+ ENCODING("VK8", ENCODING_REG)
+ ENCODING("VK16", ENCODING_REG)
+ ENCODING("VK32", ENCODING_REG)
+ ENCODING("VK64", ENCODING_REG)
+ ENCODING("VK1Pair", ENCODING_REG)
+ ENCODING("VK2Pair", ENCODING_REG)
+ ENCODING("VK4Pair", ENCODING_REG)
+ ENCODING("VK8Pair", ENCODING_REG)
+ ENCODING("VK16Pair", ENCODING_REG)
+ ENCODING("VK1WM", ENCODING_REG)
+ ENCODING("VK2WM", ENCODING_REG)
+ ENCODING("VK4WM", ENCODING_REG)
+ ENCODING("VK8WM", ENCODING_REG)
+ ENCODING("VK16WM", ENCODING_REG)
+ ENCODING("VK32WM", ENCODING_REG)
+ ENCODING("VK64WM", ENCODING_REG)
+ ENCODING("BNDR", ENCODING_REG)
+ ENCODING("TILE", ENCODING_REG)
+ errs() << "Unhandled reg/opcode register encoding " << s << "\n";
+ llvm_unreachable("Unhandled reg/opcode register encoding");
+}
+
+OperandEncoding
+RecognizableInstr::vvvvRegisterEncodingFromString(const std::string &s,
+ uint8_t OpSize) {
+ ENCODING("GR32", ENCODING_VVVV)
+ ENCODING("GR64", ENCODING_VVVV)
+ ENCODING("FR32", ENCODING_VVVV)
+ ENCODING("FR128", ENCODING_VVVV)
+ ENCODING("FR64", ENCODING_VVVV)
+ ENCODING("VR128", ENCODING_VVVV)
+ ENCODING("VR256", ENCODING_VVVV)
+ ENCODING("FR16X", ENCODING_VVVV)
+ ENCODING("FR32X", ENCODING_VVVV)
+ ENCODING("FR64X", ENCODING_VVVV)
+ ENCODING("VR128X", ENCODING_VVVV)
+ ENCODING("VR256X", ENCODING_VVVV)
+ ENCODING("VR512", ENCODING_VVVV)
+ ENCODING("VK1", ENCODING_VVVV)
+ ENCODING("VK2", ENCODING_VVVV)
+ ENCODING("VK4", ENCODING_VVVV)
+ ENCODING("VK8", ENCODING_VVVV)
+ ENCODING("VK16", ENCODING_VVVV)
+ ENCODING("VK32", ENCODING_VVVV)
+ ENCODING("VK64", ENCODING_VVVV)
+ ENCODING("TILE", ENCODING_VVVV)
+ errs() << "Unhandled VEX.vvvv register encoding " << s << "\n";
+ llvm_unreachable("Unhandled VEX.vvvv register encoding");
+}
+
+OperandEncoding
+RecognizableInstr::writemaskRegisterEncodingFromString(const std::string &s,
+ uint8_t OpSize) {
+ ENCODING("VK1WM", ENCODING_WRITEMASK)
+ ENCODING("VK2WM", ENCODING_WRITEMASK)
+ ENCODING("VK4WM", ENCODING_WRITEMASK)
+ ENCODING("VK8WM", ENCODING_WRITEMASK)
+ ENCODING("VK16WM", ENCODING_WRITEMASK)
+ ENCODING("VK32WM", ENCODING_WRITEMASK)
+ ENCODING("VK64WM", ENCODING_WRITEMASK)
+ errs() << "Unhandled mask register encoding " << s << "\n";
+ llvm_unreachable("Unhandled mask register encoding");
+}
+
+OperandEncoding
+RecognizableInstr::memoryEncodingFromString(const std::string &s,
+ uint8_t OpSize) {
+ ENCODING("i16mem", ENCODING_RM)
+ ENCODING("i32mem", ENCODING_RM)
+ ENCODING("i64mem", ENCODING_RM)
+ ENCODING("i8mem", ENCODING_RM)
+ ENCODING("shmem", ENCODING_RM)
+ ENCODING("ssmem", ENCODING_RM)
+ ENCODING("sdmem", ENCODING_RM)
+ ENCODING("f128mem", ENCODING_RM)
+ ENCODING("f256mem", ENCODING_RM)
+ ENCODING("f512mem", ENCODING_RM)
+ ENCODING("f64mem", ENCODING_RM)
+ ENCODING("f32mem", ENCODING_RM)
+ ENCODING("f16mem", ENCODING_RM)
+ ENCODING("i128mem", ENCODING_RM)
+ ENCODING("i256mem", ENCODING_RM)
+ ENCODING("i512mem", ENCODING_RM)
+ ENCODING("f80mem", ENCODING_RM)
+ ENCODING("lea64_32mem", ENCODING_RM)
+ ENCODING("lea64mem", ENCODING_RM)
+ ENCODING("anymem", ENCODING_RM)
+ ENCODING("opaquemem", ENCODING_RM)
+ ENCODING("sibmem", ENCODING_SIB)
+ ENCODING("vx64mem", ENCODING_VSIB)
+ ENCODING("vx128mem", ENCODING_VSIB)
+ ENCODING("vx256mem", ENCODING_VSIB)
+ ENCODING("vy128mem", ENCODING_VSIB)
+ ENCODING("vy256mem", ENCODING_VSIB)
+ ENCODING("vx64xmem", ENCODING_VSIB)
+ ENCODING("vx128xmem", ENCODING_VSIB)
+ ENCODING("vx256xmem", ENCODING_VSIB)
+ ENCODING("vy128xmem", ENCODING_VSIB)
+ ENCODING("vy256xmem", ENCODING_VSIB)
+ ENCODING("vy512xmem", ENCODING_VSIB)
+ ENCODING("vz256mem", ENCODING_VSIB)
+ ENCODING("vz512mem", ENCODING_VSIB)
+ errs() << "Unhandled memory encoding " << s << "\n";
+ llvm_unreachable("Unhandled memory encoding");
+}
+
+OperandEncoding
+RecognizableInstr::relocationEncodingFromString(const std::string &s,
+ uint8_t OpSize) {
+ if(OpSize != X86Local::OpSize16) {
+ // For instructions without an OpSize prefix, a declared 16-bit register or
+ // immediate encoding is special.
+ ENCODING("i16imm", ENCODING_IW)
+ }
+ ENCODING("i16imm", ENCODING_Iv)
+ ENCODING("i16i8imm", ENCODING_IB)
+ ENCODING("i32imm", ENCODING_Iv)
+ ENCODING("i32i8imm", ENCODING_IB)
+ ENCODING("i64i32imm", ENCODING_ID)
+ ENCODING("i64i8imm", ENCODING_IB)
+ ENCODING("i8imm", ENCODING_IB)
+ ENCODING("u8imm", ENCODING_IB)
+ ENCODING("i16u8imm", ENCODING_IB)
+ ENCODING("i32u8imm", ENCODING_IB)
+ ENCODING("i64u8imm", ENCODING_IB)
+ ENCODING("i64i32imm_brtarget", ENCODING_ID)
+ ENCODING("i16imm_brtarget", ENCODING_IW)
+ ENCODING("i32imm_brtarget", ENCODING_ID)
+ ENCODING("brtarget32", ENCODING_ID)
+ ENCODING("brtarget16", ENCODING_IW)
+ ENCODING("brtarget8", ENCODING_IB)
+ ENCODING("i64imm", ENCODING_IO)
+ ENCODING("offset16_8", ENCODING_Ia)
+ ENCODING("offset16_16", ENCODING_Ia)
+ ENCODING("offset16_32", ENCODING_Ia)
+ ENCODING("offset32_8", ENCODING_Ia)
+ ENCODING("offset32_16", ENCODING_Ia)
+ ENCODING("offset32_32", ENCODING_Ia)
+ ENCODING("offset32_64", ENCODING_Ia)
+ ENCODING("offset64_8", ENCODING_Ia)
+ ENCODING("offset64_16", ENCODING_Ia)
+ ENCODING("offset64_32", ENCODING_Ia)
+ ENCODING("offset64_64", ENCODING_Ia)
+ ENCODING("srcidx8", ENCODING_SI)
+ ENCODING("srcidx16", ENCODING_SI)
+ ENCODING("srcidx32", ENCODING_SI)
+ ENCODING("srcidx64", ENCODING_SI)
+ ENCODING("dstidx8", ENCODING_DI)
+ ENCODING("dstidx16", ENCODING_DI)
+ ENCODING("dstidx32", ENCODING_DI)
+ ENCODING("dstidx64", ENCODING_DI)
+ errs() << "Unhandled relocation encoding " << s << "\n";
+ llvm_unreachable("Unhandled relocation encoding");
+}
+
+OperandEncoding
+RecognizableInstr::opcodeModifierEncodingFromString(const std::string &s,
+ uint8_t OpSize) {
+ ENCODING("GR32", ENCODING_Rv)
+ ENCODING("GR64", ENCODING_RO)
+ ENCODING("GR16", ENCODING_Rv)
+ ENCODING("GR8", ENCODING_RB)
+ ENCODING("ccode", ENCODING_CC)
+ errs() << "Unhandled opcode modifier encoding " << s << "\n";
+ llvm_unreachable("Unhandled opcode modifier encoding");
+}
+#undef ENCODING
diff --git a/contrib/libs/llvm16/utils/TableGen/X86RecognizableInstr.h b/contrib/libs/llvm16/utils/TableGen/X86RecognizableInstr.h
new file mode 100644
index 0000000000..ea56a9d7d9
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/X86RecognizableInstr.h
@@ -0,0 +1,367 @@
+//===- X86RecognizableInstr.h - Disassembler instruction spec ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is part of the X86 Disassembler Emitter.
+// It contains the interface of a single recognizable instruction.
+// Documentation for the disassembler emitter in general can be found in
+// X86DisassemblerEmitter.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_X86RECOGNIZABLEINSTR_H
+#define LLVM_UTILS_TABLEGEN_X86RECOGNIZABLEINSTR_H
+
+#include "CodeGenInstruction.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/X86DisassemblerDecoderCommon.h"
+
+struct InstructionSpecifier;
+
+namespace llvm {
+
+class Record;
+
+#define X86_INSTR_MRM_MAPPING \
+ MAP(C0, 64) \
+ MAP(C1, 65) \
+ MAP(C2, 66) \
+ MAP(C3, 67) \
+ MAP(C4, 68) \
+ MAP(C5, 69) \
+ MAP(C6, 70) \
+ MAP(C7, 71) \
+ MAP(C8, 72) \
+ MAP(C9, 73) \
+ MAP(CA, 74) \
+ MAP(CB, 75) \
+ MAP(CC, 76) \
+ MAP(CD, 77) \
+ MAP(CE, 78) \
+ MAP(CF, 79) \
+ MAP(D0, 80) \
+ MAP(D1, 81) \
+ MAP(D2, 82) \
+ MAP(D3, 83) \
+ MAP(D4, 84) \
+ MAP(D5, 85) \
+ MAP(D6, 86) \
+ MAP(D7, 87) \
+ MAP(D8, 88) \
+ MAP(D9, 89) \
+ MAP(DA, 90) \
+ MAP(DB, 91) \
+ MAP(DC, 92) \
+ MAP(DD, 93) \
+ MAP(DE, 94) \
+ MAP(DF, 95) \
+ MAP(E0, 96) \
+ MAP(E1, 97) \
+ MAP(E2, 98) \
+ MAP(E3, 99) \
+ MAP(E4, 100) \
+ MAP(E5, 101) \
+ MAP(E6, 102) \
+ MAP(E7, 103) \
+ MAP(E8, 104) \
+ MAP(E9, 105) \
+ MAP(EA, 106) \
+ MAP(EB, 107) \
+ MAP(EC, 108) \
+ MAP(ED, 109) \
+ MAP(EE, 110) \
+ MAP(EF, 111) \
+ MAP(F0, 112) \
+ MAP(F1, 113) \
+ MAP(F2, 114) \
+ MAP(F3, 115) \
+ MAP(F4, 116) \
+ MAP(F5, 117) \
+ MAP(F6, 118) \
+ MAP(F7, 119) \
+ MAP(F8, 120) \
+ MAP(F9, 121) \
+ MAP(FA, 122) \
+ MAP(FB, 123) \
+ MAP(FC, 124) \
+ MAP(FD, 125) \
+ MAP(FE, 126) \
+ MAP(FF, 127)
+
+// A clone of X86 since we can't depend on something that is generated.
+namespace X86Local {
+ enum {
+ Pseudo = 0,
+ RawFrm = 1,
+ AddRegFrm = 2,
+ RawFrmMemOffs = 3,
+ RawFrmSrc = 4,
+ RawFrmDst = 5,
+ RawFrmDstSrc = 6,
+ RawFrmImm8 = 7,
+ RawFrmImm16 = 8,
+ AddCCFrm = 9,
+ PrefixByte = 10,
+ MRMDestMem4VOp3CC = 20,
+ MRMr0 = 21,
+ MRMSrcMemFSIB = 22,
+ MRMDestMemFSIB = 23,
+ MRMDestMem = 24,
+ MRMSrcMem = 25,
+ MRMSrcMem4VOp3 = 26,
+ MRMSrcMemOp4 = 27,
+ MRMSrcMemCC = 28,
+ MRMXmCC = 30, MRMXm = 31,
+ MRM0m = 32, MRM1m = 33, MRM2m = 34, MRM3m = 35,
+ MRM4m = 36, MRM5m = 37, MRM6m = 38, MRM7m = 39,
+ MRMDestReg = 40,
+ MRMSrcReg = 41,
+ MRMSrcReg4VOp3 = 42,
+ MRMSrcRegOp4 = 43,
+ MRMSrcRegCC = 44,
+ MRMXrCC = 46, MRMXr = 47,
+ MRM0r = 48, MRM1r = 49, MRM2r = 50, MRM3r = 51,
+ MRM4r = 52, MRM5r = 53, MRM6r = 54, MRM7r = 55,
+ MRM0X = 56, MRM1X = 57, MRM2X = 58, MRM3X = 59,
+ MRM4X = 60, MRM5X = 61, MRM6X = 62, MRM7X = 63,
+#define MAP(from, to) MRM_##from = to,
+ X86_INSTR_MRM_MAPPING
+#undef MAP
+ };
+
+ enum {
+ OB = 0, TB = 1, T8 = 2, TA = 3, XOP8 = 4, XOP9 = 5, XOPA = 6, ThreeDNow = 7,
+ T_MAP5 = 8, T_MAP6 = 9
+ };
+
+ enum {
+ PD = 1, XS = 2, XD = 3, PS = 4
+ };
+
+ enum {
+ VEX = 1, XOP = 2, EVEX = 3
+ };
+
+ enum {
+ OpSize16 = 1, OpSize32 = 2
+ };
+
+ enum {
+ AdSize16 = 1, AdSize32 = 2, AdSize64 = 3
+ };
+}
+
+namespace X86Disassembler {
+
+class DisassemblerTables;
+
+/// Extract common fields of a single X86 instruction from a CodeGenInstruction
+struct RecognizableInstrBase {
+ /// The OpPrefix field from the record
+ uint8_t OpPrefix;
+ /// The OpMap field from the record
+ uint8_t OpMap;
+ /// The opcode field from the record; this is the opcode used in the Intel
+ /// encoding and therefore distinct from the UID
+ uint8_t Opcode;
+ /// The form field from the record
+ uint8_t Form;
+ // The encoding field from the record
+ uint8_t Encoding;
+ /// The OpSize field from the record
+ uint8_t OpSize;
+ /// The AdSize field from the record
+ uint8_t AdSize;
+ /// The hasREX_W field from the record
+ bool HasREX_W;
+ /// The hasVEX_4V field from the record
+ bool HasVEX_4V;
+ /// The HasVEX_WPrefix field from the record
+ bool HasVEX_W;
+ /// The IgnoresVEX_W field from the record
+ bool IgnoresVEX_W;
+ /// The hasVEX_L field from the record
+ bool HasVEX_L;
+ /// The ignoreVEX_L field from the record
+ bool IgnoresVEX_L;
+ /// The hasEVEX_L2Prefix field from the record
+ bool HasEVEX_L2;
+ /// The hasEVEX_K field from the record
+ bool HasEVEX_K;
+ /// The hasEVEX_KZ field from the record
+ bool HasEVEX_KZ;
+ /// The hasEVEX_B field from the record
+ bool HasEVEX_B;
+ /// Indicates that the instruction uses the L and L' fields for RC.
+ bool EncodeRC;
+ /// The isCodeGenOnly field from the record
+ bool IsCodeGenOnly;
+ /// The isAsmParserOnly field from the record
+ bool IsAsmParserOnly;
+ /// The ForceDisassemble field from the record
+ bool ForceDisassemble;
+ // The CD8_Scale field from the record
+ uint8_t CD8_Scale;
+ /// \param insn The CodeGenInstruction to extract information from.
+ RecognizableInstrBase(const CodeGenInstruction &insn);
+ /// \returns true if this instruction should be emitted
+ bool shouldBeEmitted() const;
+};
+
+/// RecognizableInstr - Encapsulates all information required to decode a single
+/// instruction, as extracted from the LLVM instruction tables. Has methods
+/// to interpret the information available in the LLVM tables, and to emit the
+/// instruction into DisassemblerTables.
+class RecognizableInstr : public RecognizableInstrBase {
+private:
+ /// The record from the .td files corresponding to this instruction
+ const Record* Rec;
+ /// The instruction name as listed in the tables
+ std::string Name;
+ // Whether the instruction has the predicate "In32BitMode"
+ bool Is32Bit;
+ // Whether the instruction has the predicate "In64BitMode"
+ bool Is64Bit;
+ /// The operands of the instruction, as listed in the CodeGenInstruction.
+ /// They are not one-to-one with operands listed in the MCInst; for example,
+ /// memory operands expand to 5 operands in the MCInst
+ const std::vector<CGIOperandList::OperandInfo>* Operands;
+
+ /// The opcode of the instruction, as used in an MCInst
+ InstrUID UID;
+ /// The description of the instruction that is emitted into the instruction
+ /// info table
+ InstructionSpecifier* Spec;
+
+ /// insnContext - Returns the primary context in which the instruction is
+ /// valid.
+ ///
+ /// @return - The context in which the instruction is valid.
+ InstructionContext insnContext() const;
+
+ /// typeFromString - Translates an operand type from the string provided in
+ /// the LLVM tables to an OperandType for use in the operand specifier.
+ ///
+ /// @param s - The string, as extracted by calling Rec->getName()
+ /// on a CodeGenInstruction::OperandInfo.
+ /// @param hasREX_W - Indicates whether the instruction has a REX.W
+ /// prefix. If it does, 32-bit register operands stay
+ /// 32-bit regardless of the operand size.
+ /// @param OpSize Indicates the operand size of the instruction.
+ /// If register size does not match OpSize, then
+ /// register sizes keep their size.
+ /// @return - The operand's type.
+ static OperandType typeFromString(const std::string& s,
+ bool hasREX_W, uint8_t OpSize);
+
+ /// immediateEncodingFromString - Translates an immediate encoding from the
+ /// string provided in the LLVM tables to an OperandEncoding for use in
+ /// the operand specifier.
+ ///
+ /// @param s - See typeFromString().
+ /// @param OpSize - Indicates whether this is an OpSize16 instruction.
+ /// If it is not, then 16-bit immediate operands stay 16-bit.
+ /// @return - The operand's encoding.
+ static OperandEncoding immediateEncodingFromString(const std::string &s,
+ uint8_t OpSize);
+
+ /// rmRegisterEncodingFromString - Like immediateEncodingFromString, but
+ /// handles operands that are in the REG field of the ModR/M byte.
+ static OperandEncoding rmRegisterEncodingFromString(const std::string &s,
+ uint8_t OpSize);
+
+ /// rmRegisterEncodingFromString - Like immediateEncodingFromString, but
+ /// handles operands that are in the REG field of the ModR/M byte.
+ static OperandEncoding roRegisterEncodingFromString(const std::string &s,
+ uint8_t OpSize);
+ static OperandEncoding memoryEncodingFromString(const std::string &s,
+ uint8_t OpSize);
+ static OperandEncoding relocationEncodingFromString(const std::string &s,
+ uint8_t OpSize);
+ static OperandEncoding opcodeModifierEncodingFromString(const std::string &s,
+ uint8_t OpSize);
+ static OperandEncoding vvvvRegisterEncodingFromString(const std::string &s,
+ uint8_t OpSize);
+ static OperandEncoding writemaskRegisterEncodingFromString(const std::string &s,
+ uint8_t OpSize);
+
+ /// Adjust the encoding type for an operand based on the instruction.
+ void adjustOperandEncoding(OperandEncoding &encoding);
+
+ /// handleOperand - Converts a single operand from the LLVM table format to
+ /// the emitted table format, handling any duplicate operands it encounters
+ /// and then one non-duplicate.
+ ///
+ /// @param optional - Determines whether to assert that the
+ /// operand exists.
+ /// @param operandIndex - The index into the generated operand table.
+ /// Incremented by this function one or more
+ /// times to reflect possible duplicate
+ /// operands).
+ /// @param physicalOperandIndex - The index of the current operand into the
+ /// set of non-duplicate ('physical') operands.
+ /// Incremented by this function once.
+ /// @param numPhysicalOperands - The number of non-duplicate operands in the
+ /// instructions.
+ /// @param operandMapping - The operand mapping, which has an entry for
+ /// each operand that indicates whether it is a
+ /// duplicate, and of what.
+ void handleOperand(bool optional,
+ unsigned &operandIndex,
+ unsigned &physicalOperandIndex,
+ unsigned numPhysicalOperands,
+ const unsigned *operandMapping,
+ OperandEncoding (*encodingFromString)
+ (const std::string&,
+ uint8_t OpSize));
+
+ /// emitInstructionSpecifier - Loads the instruction specifier for the current
+ /// instruction into a DisassemblerTables.
+ ///
+ void emitInstructionSpecifier();
+
+ /// emitDecodePath - Populates the proper fields in the decode tables
+ /// corresponding to the decode paths for this instruction.
+ ///
+ /// \param tables The DisassemblerTables to populate with the decode
+ /// decode information for the current instruction.
+ void emitDecodePath(DisassemblerTables &tables) const;
+
+public:
+ /// Constructor - Initializes a RecognizableInstr with the appropriate fields
+ /// from a CodeGenInstruction.
+ ///
+ /// \param tables The DisassemblerTables that the specifier will be added to.
+ /// \param insn The CodeGenInstruction to extract information from.
+ /// \param uid The unique ID of the current instruction.
+ RecognizableInstr(DisassemblerTables &tables,
+ const CodeGenInstruction &insn,
+ InstrUID uid);
+ /// processInstr - Accepts a CodeGenInstruction and loads decode information
+ /// for it into a DisassemblerTables if appropriate.
+ ///
+ /// \param tables The DiassemblerTables to be populated with decode
+ /// information.
+ /// \param insn The CodeGenInstruction to be used as a source for this
+ /// information.
+ /// \param uid The unique ID of the instruction.
+ static void processInstr(DisassemblerTables &tables,
+ const CodeGenInstruction &insn,
+ InstrUID uid);
+};
+
+std::string getMnemonic(const CodeGenInstruction *I, unsigned Variant);
+bool isRegisterOperand(const Record *Rec);
+bool isMemoryOperand(const Record *Rec);
+bool isImmediateOperand(const Record *Rec);
+unsigned getRegOperandSize(const Record *RegRec);
+unsigned getMemOperandSize(const Record *MemRec);
+} // namespace X86Disassembler
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/libs/llvm16/utils/TableGen/ya.make b/contrib/libs/llvm16/utils/TableGen/ya.make
new file mode 100644
index 0000000000..38508394f6
--- /dev/null
+++ b/contrib/libs/llvm16/utils/TableGen/ya.make
@@ -0,0 +1,84 @@
+# Generated by devtools/yamaker.
+
+PROGRAM(llvm-tblgen)
+
+LICENSE(Apache-2.0 WITH LLVM-exception)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/libs/llvm16
+ contrib/libs/llvm16/lib/Demangle
+ contrib/libs/llvm16/lib/Support
+ contrib/libs/llvm16/lib/TableGen
+ contrib/libs/llvm16/utils/TableGen/GlobalISel
+)
+
+ADDINCL(
+ contrib/libs/llvm16/utils/TableGen
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+SRCS(
+ AsmMatcherEmitter.cpp
+ AsmWriterEmitter.cpp
+ AsmWriterInst.cpp
+ Attributes.cpp
+ CTagsEmitter.cpp
+ CallingConvEmitter.cpp
+ CodeEmitterGen.cpp
+ CodeGenDAGPatterns.cpp
+ CodeGenHwModes.cpp
+ CodeGenInstruction.cpp
+ CodeGenMapTable.cpp
+ CodeGenRegisters.cpp
+ CodeGenSchedule.cpp
+ CodeGenTarget.cpp
+ CompressInstEmitter.cpp
+ DAGISelEmitter.cpp
+ DAGISelMatcher.cpp
+ DAGISelMatcherEmitter.cpp
+ DAGISelMatcherGen.cpp
+ DAGISelMatcherOpt.cpp
+ DFAEmitter.cpp
+ DFAPacketizerEmitter.cpp
+ DXILEmitter.cpp
+ DecoderEmitter.cpp
+ DirectiveEmitter.cpp
+ DisassemblerEmitter.cpp
+ ExegesisEmitter.cpp
+ FastISelEmitter.cpp
+ GICombinerEmitter.cpp
+ GlobalISelEmitter.cpp
+ InfoByHwMode.cpp
+ InstrDocsEmitter.cpp
+ InstrInfoEmitter.cpp
+ IntrinsicEmitter.cpp
+ OptEmitter.cpp
+ OptParserEmitter.cpp
+ OptRSTEmitter.cpp
+ PredicateExpander.cpp
+ PseudoLoweringEmitter.cpp
+ RISCVTargetDefEmitter.cpp
+ RegisterBankEmitter.cpp
+ RegisterInfoEmitter.cpp
+ SDNodeProperties.cpp
+ SearchableTableEmitter.cpp
+ SubtargetEmitter.cpp
+ SubtargetFeatureInfo.cpp
+ TableGen.cpp
+ Types.cpp
+ VarLenCodeEmitterGen.cpp
+ WebAssemblyDisassemblerEmitter.cpp
+ X86DisassemblerTables.cpp
+ X86EVEX2VEXTablesEmitter.cpp
+ X86FoldTablesEmitter.cpp
+ X86MnemonicTables.cpp
+ X86ModRMFilters.cpp
+ X86RecognizableInstr.cpp
+)
+
+END()