aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm14/include/llvm/Target/GlobalISel
diff options
context:
space:
mode:
authorvitalyisaev <vitalyisaev@yandex-team.com>2023-06-29 10:00:50 +0300
committervitalyisaev <vitalyisaev@yandex-team.com>2023-06-29 10:00:50 +0300
commit6ffe9e53658409f212834330e13564e4952558f6 (patch)
tree85b1e00183517648b228aafa7c8fb07f5276f419 /contrib/libs/llvm14/include/llvm/Target/GlobalISel
parent726057070f9c5a91fc10fde0d5024913d10f1ab9 (diff)
downloadydb-6ffe9e53658409f212834330e13564e4952558f6.tar.gz
YQ Connector: support managed ClickHouse
Со стороны dqrun можно обратиться к инстансу коннектора, который работает на streaming стенде, и извлечь данные из облачного CH.
Diffstat (limited to 'contrib/libs/llvm14/include/llvm/Target/GlobalISel')
-rw-r--r--contrib/libs/llvm14/include/llvm/Target/GlobalISel/Combine.td910
-rw-r--r--contrib/libs/llvm14/include/llvm/Target/GlobalISel/RegisterBank.td15
-rw-r--r--contrib/libs/llvm14/include/llvm/Target/GlobalISel/SelectionDAGCompat.td221
-rw-r--r--contrib/libs/llvm14/include/llvm/Target/GlobalISel/Target.td65
4 files changed, 1211 insertions, 0 deletions
diff --git a/contrib/libs/llvm14/include/llvm/Target/GlobalISel/Combine.td b/contrib/libs/llvm14/include/llvm/Target/GlobalISel/Combine.td
new file mode 100644
index 0000000000..4859cf6b57
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/Target/GlobalISel/Combine.td
@@ -0,0 +1,910 @@
+//===- Combine.td - Combine rule definitions ---------------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Declare GlobalISel combine rules and provide mechanisms to opt-out.
+//
+//===----------------------------------------------------------------------===//
+
+// Common base class for GICombineRule and GICombineGroup.
+class GICombine {
+ // See GICombineGroup. We only declare it here to make the tablegen pass
+ // simpler.
+ list<GICombine> Rules = ?;
+}
+
+// A group of combine rules that can be added to a GICombiner or another group.
+class GICombineGroup<list<GICombine> rules> : GICombine {
+ // The rules contained in this group. The rules in a group are flattened into
+ // a single list and sorted into whatever order is most efficient. However,
+ // they will never be re-ordered such that behaviour differs from the
+ // specified order. It is therefore possible to use the order of rules in this
+ // list to describe priorities.
+ let Rules = rules;
+}
+
+class GICombinerHelperArg<string type, string name> {
+ string Type = type;
+ string Name = name;
+}
+
+// Declares a combiner helper class
+class GICombinerHelper<string classname, list<GICombine> rules>
+ : GICombineGroup<rules> {
+ // The class name to use in the generated output.
+ string Classname = classname;
+ // The name of a run-time compiler option that will be generated to disable
+ // specific rules within this combiner.
+ string DisableRuleOption = ?;
+ // The state class to inherit from (if any). The generated helper will inherit
+ // from this class and will forward arguments to its constructors.
+ string StateClass = "";
+ // Any additional arguments that should be appended to the tryCombine*().
+ list<GICombinerHelperArg> AdditionalArguments =
+ [GICombinerHelperArg<"CombinerHelper &", "Helper">];
+}
+class GICombineRule<dag defs, dag match, dag apply> : GICombine {
+ /// Defines the external interface of the match rule. This includes:
+ /// * The names of the root nodes (requires at least one)
+ /// See GIDefKind for details.
+ dag Defs = defs;
+
+ /// Defines the things which must be true for the pattern to match
+ /// See GIMatchKind for details.
+ dag Match = match;
+
+ /// Defines the things which happen after the decision is made to apply a
+ /// combine rule.
+ /// See GIApplyKind for details.
+ dag Apply = apply;
+}
+
+/// The operator at the root of a GICombineRule.Defs dag.
+def defs;
+
+/// All arguments of the defs operator must be subclasses of GIDefKind or
+/// sub-dags whose operator is GIDefKindWithArgs.
+class GIDefKind;
+class GIDefKindWithArgs;
+/// Declare a root node. There must be at least one of these in every combine
+/// rule.
+/// TODO: The plan is to elide `root` definitions and determine it from the DAG
+/// itself with an overide for situations where the usual determination
+/// is incorrect.
+def root : GIDefKind;
+
+/// Declares data that is passed from the match stage to the apply stage.
+class GIDefMatchData<string type> : GIDefKind {
+ /// A C++ type name indicating the storage type.
+ string Type = type;
+}
+
+def extending_load_matchdata : GIDefMatchData<"PreferredTuple">;
+def indexed_load_store_matchdata : GIDefMatchData<"IndexedLoadStoreMatchInfo">;
+def instruction_steps_matchdata: GIDefMatchData<"InstructionStepsMatchInfo">;
+
+/// The operator at the root of a GICombineRule.Match dag.
+def match;
+/// All arguments of the match operator must be either:
+/// * A subclass of GIMatchKind
+/// * A subclass of GIMatchKindWithArgs
+/// * A subclass of Instruction
+/// * A MIR code block (deprecated)
+/// The GIMatchKind and GIMatchKindWithArgs cases are described in more detail
+/// in their definitions below.
+/// For the Instruction case, these are collected into a DAG where operand names
+/// that occur multiple times introduce edges.
+class GIMatchKind;
+class GIMatchKindWithArgs;
+
+/// In lieu of having proper macro support. Trivial one-off opcode checks can be
+/// performed with this.
+def wip_match_opcode : GIMatchKindWithArgs;
+
+/// The operator at the root of a GICombineRule.Apply dag.
+def apply;
+/// All arguments of the apply operator must be subclasses of GIApplyKind, or
+/// sub-dags whose operator is GIApplyKindWithArgs, or an MIR block
+/// (deprecated).
+class GIApplyKind;
+class GIApplyKindWithArgs;
+
+def register_matchinfo: GIDefMatchData<"Register">;
+def int64_matchinfo: GIDefMatchData<"int64_t">;
+def apint_matchinfo : GIDefMatchData<"APInt">;
+def build_fn_matchinfo :
+GIDefMatchData<"std::function<void(MachineIRBuilder &)>">;
+
+def copy_prop : GICombineRule<
+ (defs root:$d),
+ (match (COPY $d, $s):$mi,
+ [{ return Helper.matchCombineCopy(*${mi}); }]),
+ (apply [{ Helper.applyCombineCopy(*${mi}); }])>;
+
+def extending_loads : GICombineRule<
+ (defs root:$root, extending_load_matchdata:$matchinfo),
+ (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD):$root,
+ [{ return Helper.matchCombineExtendingLoads(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyCombineExtendingLoads(*${root}, ${matchinfo}); }])>;
+
+def load_and_mask : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_AND):$root,
+ [{ return Helper.matchCombineLoadWithAndMask(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
+def combines_for_extload: GICombineGroup<[extending_loads, load_and_mask]>;
+
+def sext_trunc_sextload : GICombineRule<
+ (defs root:$d),
+ (match (wip_match_opcode G_SEXT_INREG):$d,
+ [{ return Helper.matchSextTruncSextLoad(*${d}); }]),
+ (apply [{ Helper.applySextTruncSextLoad(*${d}); }])>;
+
+def sext_inreg_of_load_matchdata : GIDefMatchData<"std::tuple<Register, unsigned>">;
+def sext_inreg_of_load : GICombineRule<
+ (defs root:$root, sext_inreg_of_load_matchdata:$matchinfo),
+ (match (wip_match_opcode G_SEXT_INREG):$root,
+ [{ return Helper.matchSextInRegOfLoad(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applySextInRegOfLoad(*${root}, ${matchinfo}); }])>;
+
+def combine_indexed_load_store : GICombineRule<
+ (defs root:$root, indexed_load_store_matchdata:$matchinfo),
+ (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD, G_STORE):$root,
+ [{ return Helper.matchCombineIndexedLoadStore(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyCombineIndexedLoadStore(*${root}, ${matchinfo}); }])>;
+
+def opt_brcond_by_inverting_cond_matchdata : GIDefMatchData<"MachineInstr *">;
+def opt_brcond_by_inverting_cond : GICombineRule<
+ (defs root:$root, opt_brcond_by_inverting_cond_matchdata:$matchinfo),
+ (match (wip_match_opcode G_BR):$root,
+ [{ return Helper.matchOptBrCondByInvertingCond(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyOptBrCondByInvertingCond(*${root}, ${matchinfo}); }])>;
+
+def ptr_add_immed_matchdata : GIDefMatchData<"PtrAddChain">;
+def ptr_add_immed_chain : GICombineRule<
+ (defs root:$d, ptr_add_immed_matchdata:$matchinfo),
+ (match (wip_match_opcode G_PTR_ADD):$d,
+ [{ return Helper.matchPtrAddImmedChain(*${d}, ${matchinfo}); }]),
+ (apply [{ Helper.applyPtrAddImmedChain(*${d}, ${matchinfo}); }])>;
+
+// Fold shift (shift base x), y -> shift base, (x+y), if shifts are same
+def shift_immed_matchdata : GIDefMatchData<"RegisterImmPair">;
+def shift_immed_chain : GICombineRule<
+ (defs root:$d, shift_immed_matchdata:$matchinfo),
+ (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_SSHLSAT, G_USHLSAT):$d,
+ [{ return Helper.matchShiftImmedChain(*${d}, ${matchinfo}); }]),
+ (apply [{ Helper.applyShiftImmedChain(*${d}, ${matchinfo}); }])>;
+
+// Transform shift (logic (shift X, C0), Y), C1
+// -> logic (shift X, (C0+C1)), (shift Y, C1), if shifts are same
+def shift_of_shifted_logic_matchdata : GIDefMatchData<"ShiftOfShiftedLogic">;
+def shift_of_shifted_logic_chain : GICombineRule<
+ (defs root:$d, shift_of_shifted_logic_matchdata:$matchinfo),
+ (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_USHLSAT, G_SSHLSAT):$d,
+ [{ return Helper.matchShiftOfShiftedLogic(*${d}, ${matchinfo}); }]),
+ (apply [{ Helper.applyShiftOfShiftedLogic(*${d}, ${matchinfo}); }])>;
+
+def mul_to_shl_matchdata : GIDefMatchData<"unsigned">;
+def mul_to_shl : GICombineRule<
+ (defs root:$d, mul_to_shl_matchdata:$matchinfo),
+ (match (G_MUL $d, $op1, $op2):$mi,
+ [{ return Helper.matchCombineMulToShl(*${mi}, ${matchinfo}); }]),
+ (apply [{ Helper.applyCombineMulToShl(*${mi}, ${matchinfo}); }])>;
+
+// shl ([asz]ext x), y => zext (shl x, y), if shift does not overflow int
+def reduce_shl_of_extend_matchdata : GIDefMatchData<"RegisterImmPair">;
+def reduce_shl_of_extend : GICombineRule<
+ (defs root:$dst, reduce_shl_of_extend_matchdata:$matchinfo),
+ (match (G_SHL $dst, $src0, $src1):$mi,
+ [{ return Helper.matchCombineShlOfExtend(*${mi}, ${matchinfo}); }]),
+ (apply [{ Helper.applyCombineShlOfExtend(*${mi}, ${matchinfo}); }])>;
+
+def narrow_binop_feeding_and : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_AND):$root,
+ [{ return Helper.matchNarrowBinopFeedingAnd(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
+
+// [us]itofp(undef) = 0, because the result value is bounded.
+def undef_to_fp_zero : GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_UITOFP, G_SITOFP):$root,
+ [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
+ (apply [{ Helper.replaceInstWithFConstant(*${root}, 0.0); }])>;
+
+def undef_to_int_zero: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_AND, G_MUL):$root,
+ [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
+ (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
+
+def undef_to_negative_one: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_OR):$root,
+ [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
+ (apply [{ Helper.replaceInstWithConstant(*${root}, -1); }])>;
+
+def binop_left_undef_to_zero: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_SHL):$root,
+ [{ return Helper.matchOperandIsUndef(*${root}, 1); }]),
+ (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;
+
+// Instructions where if any source operand is undef, the instruction can be
+// replaced with undef.
+def propagate_undef_any_op: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_ADD, G_FPTOSI, G_FPTOUI, G_SUB, G_XOR, G_TRUNC):$root,
+ [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
+ (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
+
+// Instructions where if all source operands are undef, the instruction can be
+// replaced with undef.
+def propagate_undef_all_ops: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
+ [{ return Helper.matchAllExplicitUsesAreUndef(*${root}); }]),
+ (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
+
+// Replace a G_SHUFFLE_VECTOR with an undef mask with a G_IMPLICIT_DEF.
+def propagate_undef_shuffle_mask: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
+ [{ return Helper.matchUndefShuffleVectorMask(*${root}); }]),
+ (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;
+
+// Fold (cond ? x : x) -> x
+def select_same_val: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_SELECT):$root,
+ [{ return Helper.matchSelectSameVal(*${root}); }]),
+ (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
+>;
+
+// Fold (undef ? x : y) -> y
+def select_undef_cmp: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_SELECT):$root,
+ [{ return Helper.matchUndefSelectCmp(*${root}); }]),
+ (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
+>;
+
+// Fold (true ? x : y) -> x
+// Fold (false ? x : y) -> y
+def select_constant_cmp_matchdata : GIDefMatchData<"unsigned">;
+def select_constant_cmp: GICombineRule<
+ (defs root:$root, select_constant_cmp_matchdata:$matchinfo),
+ (match (wip_match_opcode G_SELECT):$root,
+ [{ return Helper.matchConstantSelectCmp(*${root}, ${matchinfo}); }]),
+ (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, ${matchinfo}); }])
+>;
+
+// Fold x op 0 -> x
+def right_identity_zero: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_SUB, G_ADD, G_OR, G_XOR, G_SHL, G_ASHR, G_LSHR,
+ G_PTR_ADD, G_ROTL, G_ROTR):$root,
+ [{ return Helper.matchConstantOp(${root}->getOperand(2), 0); }]),
+ (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
+>;
+
+// Fold x op 1 -> x
+def right_identity_one: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_MUL):$root,
+ [{ return Helper.matchConstantOp(${root}->getOperand(2), 1); }]),
+ (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
+>;
+
+// Fold (x op x) - > x
+def binop_same_val: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_AND, G_OR):$root,
+ [{ return Helper.matchBinOpSameVal(*${root}); }]),
+ (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
+>;
+
+// Fold (0 op x) - > 0
+def binop_left_to_zero: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root,
+ [{ return Helper.matchOperandIsZero(*${root}, 1); }]),
+ (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
+>;
+
+def urem_pow2_to_mask : GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_UREM):$root,
+ [{ return Helper.matchOperandIsKnownToBeAPowerOfTwo(*${root}, 2); }]),
+ (apply [{ Helper.applySimplifyURemByPow2(*${root}); }])
+>;
+
+// Transform d = [su]div(x, y) and r = [su]rem(x, y) - > d, r = [su]divrem(x, y)
+def div_rem_to_divrem_matchdata : GIDefMatchData<"MachineInstr *">;
+def div_rem_to_divrem : GICombineRule<
+ (defs root:$root, div_rem_to_divrem_matchdata:$matchinfo),
+ (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root,
+ [{ return Helper.matchCombineDivRem(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyCombineDivRem(*${root}, ${matchinfo}); }])
+>;
+
+// Fold (x op 0) - > 0
+def binop_right_to_zero: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_MUL):$root,
+ [{ return Helper.matchOperandIsZero(*${root}, 2); }]),
+ (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
+>;
+
+// Erase stores of undef values.
+def erase_undef_store : GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_STORE):$root,
+ [{ return Helper.matchUndefStore(*${root}); }]),
+ (apply [{ return Helper.eraseInst(*${root}); }])
+>;
+
+def simplify_add_to_sub_matchinfo: GIDefMatchData<"std::tuple<Register, Register>">;
+def simplify_add_to_sub: GICombineRule <
+ (defs root:$root, simplify_add_to_sub_matchinfo:$info),
+ (match (wip_match_opcode G_ADD):$root,
+ [{ return Helper.matchSimplifyAddToSub(*${root}, ${info}); }]),
+ (apply [{ Helper.applySimplifyAddToSub(*${root}, ${info});}])
+>;
+
+// Fold fp_op(cst) to the constant result of the floating point operation.
+def constant_fp_op_matchinfo: GIDefMatchData<"Optional<APFloat>">;
+def constant_fp_op: GICombineRule <
+ (defs root:$root, constant_fp_op_matchinfo:$info),
+ (match (wip_match_opcode G_FNEG, G_FABS, G_FPTRUNC, G_FSQRT, G_FLOG2):$root,
+ [{ return Helper.matchCombineConstantFoldFpUnary(*${root}, ${info}); }]),
+ (apply [{ Helper.applyCombineConstantFoldFpUnary(*${root}, ${info}); }])
+>;
+
+// Fold int2ptr(ptr2int(x)) -> x
+def p2i_to_i2p: GICombineRule<
+ (defs root:$root, register_matchinfo:$info),
+ (match (wip_match_opcode G_INTTOPTR):$root,
+ [{ return Helper.matchCombineI2PToP2I(*${root}, ${info}); }]),
+ (apply [{ Helper.applyCombineI2PToP2I(*${root}, ${info}); }])
+>;
+
+// Fold ptr2int(int2ptr(x)) -> x
+def i2p_to_p2i: GICombineRule<
+ (defs root:$root, register_matchinfo:$info),
+ (match (wip_match_opcode G_PTRTOINT):$root,
+ [{ return Helper.matchCombineP2IToI2P(*${root}, ${info}); }]),
+ (apply [{ Helper.applyCombineP2IToI2P(*${root}, ${info}); }])
+>;
+
+// Fold add ptrtoint(x), y -> ptrtoint (ptr_add x), y
+def add_p2i_to_ptradd_matchinfo : GIDefMatchData<"std::pair<Register, bool>">;
+def add_p2i_to_ptradd : GICombineRule<
+ (defs root:$root, add_p2i_to_ptradd_matchinfo:$info),
+ (match (wip_match_opcode G_ADD):$root,
+ [{ return Helper.matchCombineAddP2IToPtrAdd(*${root}, ${info}); }]),
+ (apply [{ Helper.applyCombineAddP2IToPtrAdd(*${root}, ${info}); }])
+>;
+
+// Fold (ptr_add (int2ptr C1), C2) -> C1 + C2
+def const_ptradd_to_i2p_matchinfo : GIDefMatchData<"APInt">;
+def const_ptradd_to_i2p: GICombineRule<
+ (defs root:$root, const_ptradd_to_i2p_matchinfo:$info),
+ (match (wip_match_opcode G_PTR_ADD):$root,
+ [{ return Helper.matchCombineConstPtrAddToI2P(*${root}, ${info}); }]),
+ (apply [{ Helper.applyCombineConstPtrAddToI2P(*${root}, ${info}); }])
+>;
+
+// Simplify: (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
+def hoist_logic_op_with_same_opcode_hands: GICombineRule <
+ (defs root:$root, instruction_steps_matchdata:$info),
+ (match (wip_match_opcode G_AND, G_OR, G_XOR):$root,
+ [{ return Helper.matchHoistLogicOpWithSameOpcodeHands(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildInstructionSteps(*${root}, ${info});}])
+>;
+
+// Fold ashr (shl x, C), C -> sext_inreg (C)
+def shl_ashr_to_sext_inreg_matchinfo : GIDefMatchData<"std::tuple<Register, int64_t>">;
+def shl_ashr_to_sext_inreg : GICombineRule<
+ (defs root:$root, shl_ashr_to_sext_inreg_matchinfo:$info),
+ (match (wip_match_opcode G_ASHR): $root,
+ [{ return Helper.matchAshrShlToSextInreg(*${root}, ${info}); }]),
+ (apply [{ Helper.applyAshShlToSextInreg(*${root}, ${info});}])
+>;
+
+// Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
+def overlapping_and: GICombineRule <
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_AND):$root,
+ [{ return Helper.matchOverlappingAnd(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
+>;
+
+// Fold (x & y) -> x or (x & y) -> y when (x & y) is known to equal x or equal y.
+def redundant_and: GICombineRule <
+ (defs root:$root, register_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_AND):$root,
+ [{ return Helper.matchRedundantAnd(*${root}, ${matchinfo}); }]),
+ (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
+>;
+
+// Fold (x | y) -> x or (x | y) -> y when (x | y) is known to equal x or equal y.
+def redundant_or: GICombineRule <
+ (defs root:$root, register_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_OR):$root,
+ [{ return Helper.matchRedundantOr(*${root}, ${matchinfo}); }]),
+ (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
+>;
+
+// If the input is already sign extended, just drop the extension.
+// sext_inreg x, K ->
+// if computeNumSignBits(x) >= (x.getScalarSizeInBits() - K + 1)
+def redundant_sext_inreg: GICombineRule <
+ (defs root:$root),
+ (match (wip_match_opcode G_SEXT_INREG):$root,
+ [{ return Helper.matchRedundantSExtInReg(*${root}); }]),
+ (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
+>;
+
+// Fold (anyext (trunc x)) -> x if the source type is same as
+// the destination type.
+def anyext_trunc_fold: GICombineRule <
+ (defs root:$root, register_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_ANYEXT):$root,
+ [{ return Helper.matchCombineAnyExtTrunc(*${root}, ${matchinfo}); }]),
+ (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
+>;
+
+// Fold (zext (trunc x)) -> x if the source type is same as the destination type
+// and truncated bits are known to be zero.
+def zext_trunc_fold_matchinfo : GIDefMatchData<"Register">;
+def zext_trunc_fold: GICombineRule <
+ (defs root:$root, zext_trunc_fold_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_ZEXT):$root,
+ [{ return Helper.matchCombineZextTrunc(*${root}, ${matchinfo}); }]),
+ (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
+>;
+
+// Fold ([asz]ext ([asz]ext x)) -> ([asz]ext x).
+def ext_ext_fold_matchinfo : GIDefMatchData<"std::tuple<Register, unsigned>">;
+def ext_ext_fold: GICombineRule <
+ (defs root:$root, ext_ext_fold_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_ANYEXT, G_SEXT, G_ZEXT):$root,
+ [{ return Helper.matchCombineExtOfExt(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyCombineExtOfExt(*${root}, ${matchinfo}); }])
+>;
+
+def not_cmp_fold_matchinfo : GIDefMatchData<"SmallVector<Register, 4>">;
+def not_cmp_fold : GICombineRule<
+ (defs root:$d, not_cmp_fold_matchinfo:$info),
+ (match (wip_match_opcode G_XOR): $d,
+ [{ return Helper.matchNotCmp(*${d}, ${info}); }]),
+ (apply [{ Helper.applyNotCmp(*${d}, ${info}); }])
+>;
+
+// Fold (fneg (fneg x)) -> x.
+def fneg_fneg_fold: GICombineRule <
+ (defs root:$root, register_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_FNEG):$root,
+ [{ return Helper.matchCombineFNegOfFNeg(*${root}, ${matchinfo}); }]),
+ (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
+>;
+
+// Fold (unmerge(merge x, y, z)) -> z, y, z.
+def unmerge_merge_matchinfo : GIDefMatchData<"SmallVector<Register, 8>">;
+def unmerge_merge : GICombineRule<
+ (defs root:$d, unmerge_merge_matchinfo:$info),
+ (match (wip_match_opcode G_UNMERGE_VALUES): $d,
+ [{ return Helper.matchCombineUnmergeMergeToPlainValues(*${d}, ${info}); }]),
+ (apply [{ Helper.applyCombineUnmergeMergeToPlainValues(*${d}, ${info}); }])
+>;
+
+// Fold merge(unmerge).
+def merge_unmerge : GICombineRule<
+ (defs root:$d, register_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_MERGE_VALUES):$d,
+ [{ return Helper.matchCombineMergeUnmerge(*${d}, ${matchinfo}); }]),
+ (apply [{ Helper.replaceSingleDefInstWithReg(*${d}, ${matchinfo}); }])
+>;
+
+// Fold (fabs (fabs x)) -> (fabs x).
+def fabs_fabs_fold: GICombineRule<
+ (defs root:$root, register_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_FABS):$root,
+ [{ return Helper.matchCombineFAbsOfFAbs(*${root}, ${matchinfo}); }]),
+ (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
+>;
+
+// Fold (fabs (fneg x)) -> (fabs x).
+def fabs_fneg_fold: GICombineRule <
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_FABS):$root,
+ [{ return Helper.matchCombineFAbsOfFNeg(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
+
+// Fold (unmerge cst) -> cst1, cst2, ...
+def unmerge_cst_matchinfo : GIDefMatchData<"SmallVector<APInt, 8>">;
+def unmerge_cst : GICombineRule<
+ (defs root:$d, unmerge_cst_matchinfo:$info),
+ (match (wip_match_opcode G_UNMERGE_VALUES): $d,
+ [{ return Helper.matchCombineUnmergeConstant(*${d}, ${info}); }]),
+ (apply [{ Helper.applyCombineUnmergeConstant(*${d}, ${info}); }])
+>;
+
+// Fold (unmerge undef) -> undef, undef, ...
+def unmerge_undef : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_UNMERGE_VALUES): $root,
+ [{ return Helper.matchCombineUnmergeUndef(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
+>;
+
+// Transform x,y<dead> = unmerge z -> x = trunc z.
+def unmerge_dead_to_trunc : GICombineRule<
+ (defs root:$d),
+ (match (wip_match_opcode G_UNMERGE_VALUES): $d,
+ [{ return Helper.matchCombineUnmergeWithDeadLanesToTrunc(*${d}); }]),
+ (apply [{ Helper.applyCombineUnmergeWithDeadLanesToTrunc(*${d}); }])
+>;
+
+// Transform x,y = unmerge(zext(z)) -> x = zext z; y = 0.
+def unmerge_zext_to_zext : GICombineRule<
+ (defs root:$d),
+ (match (wip_match_opcode G_UNMERGE_VALUES): $d,
+ [{ return Helper.matchCombineUnmergeZExtToZExt(*${d}); }]),
+ (apply [{ Helper.applyCombineUnmergeZExtToZExt(*${d}); }])
+>;
+
+// Fold trunc ([asz]ext x) -> x or ([asz]ext x) or (trunc x).
+def trunc_ext_fold_matchinfo : GIDefMatchData<"std::pair<Register, unsigned>">;
+def trunc_ext_fold: GICombineRule <
+ (defs root:$root, trunc_ext_fold_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_TRUNC):$root,
+ [{ return Helper.matchCombineTruncOfExt(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyCombineTruncOfExt(*${root}, ${matchinfo}); }])
+>;
+
+// Fold trunc (shl x, K) -> shl (trunc x), K => K < VT.getScalarSizeInBits().
+def trunc_shl_matchinfo : GIDefMatchData<"std::pair<Register, Register>">;
+def trunc_shl: GICombineRule <
+ (defs root:$root, trunc_shl_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_TRUNC):$root,
+ [{ return Helper.matchCombineTruncOfShl(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyCombineTruncOfShl(*${root}, ${matchinfo}); }])
+>;
+
+// Transform (mul x, -1) -> (sub 0, x)
+def mul_by_neg_one: GICombineRule <
+ (defs root:$root),
+ (match (wip_match_opcode G_MUL):$root,
+ [{ return Helper.matchConstantOp(${root}->getOperand(2), -1); }]),
+ (apply [{ Helper.applyCombineMulByNegativeOne(*${root}); }])
+>;
+
+// Fold (xor (and x, y), y) -> (and (not x), y)
+def xor_of_and_with_same_reg_matchinfo :
+ GIDefMatchData<"std::pair<Register, Register>">;
+def xor_of_and_with_same_reg: GICombineRule <
+ (defs root:$root, xor_of_and_with_same_reg_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_XOR):$root,
+ [{ return Helper.matchXorOfAndWithSameReg(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyXorOfAndWithSameReg(*${root}, ${matchinfo}); }])
+>;
+
+// Transform (ptr_add 0, x) -> (int_to_ptr x)
+def ptr_add_with_zero: GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_PTR_ADD):$root,
+ [{ return Helper.matchPtrAddZero(*${root}); }]),
+ (apply [{ Helper.applyPtrAddZero(*${root}); }])>;
+
+def regs_small_vec : GIDefMatchData<"SmallVector<Register, 4>">;
+def combine_insert_vec_elts_build_vector : GICombineRule<
+ (defs root:$root, regs_small_vec:$info),
+ (match (wip_match_opcode G_INSERT_VECTOR_ELT):$root,
+ [{ return Helper.matchCombineInsertVecElts(*${root}, ${info}); }]),
+ (apply [{ Helper.applyCombineInsertVecElts(*${root}, ${info}); }])>;
+
+def load_or_combine : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_OR):$root,
+ [{ return Helper.matchLoadOrCombine(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+
+def truncstore_merge_matcdata : GIDefMatchData<"MergeTruncStoresInfo">;
+def truncstore_merge : GICombineRule<
+ (defs root:$root, truncstore_merge_matcdata:$info),
+ (match (wip_match_opcode G_STORE):$root,
+ [{ return Helper.matchTruncStoreMerge(*${root}, ${info}); }]),
+ (apply [{ Helper.applyTruncStoreMerge(*${root}, ${info}); }])>;
+
+def extend_through_phis_matchdata: GIDefMatchData<"MachineInstr*">;
+def extend_through_phis : GICombineRule<
+ (defs root:$root, extend_through_phis_matchdata:$matchinfo),
+ (match (wip_match_opcode G_PHI):$root,
+ [{ return Helper.matchExtendThroughPhis(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyExtendThroughPhis(*${root}, ${matchinfo}); }])>;
+
+// Currently only the one combine above.
+def insert_vec_elt_combines : GICombineGroup<
+ [combine_insert_vec_elts_build_vector]>;
+
+def extract_vec_elt_build_vec : GICombineRule<
+ (defs root:$root, register_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
+ [{ return Helper.matchExtractVecEltBuildVec(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyExtractVecEltBuildVec(*${root}, ${matchinfo}); }])>;
+
+// Fold away full elt extracts from a build_vector.
+def extract_all_elts_from_build_vector_matchinfo :
+ GIDefMatchData<"SmallVector<std::pair<Register, MachineInstr*>>">;
+def extract_all_elts_from_build_vector : GICombineRule<
+ (defs root:$root, extract_all_elts_from_build_vector_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_BUILD_VECTOR):$root,
+ [{ return Helper.matchExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }])>;
+
+def extract_vec_elt_combines : GICombineGroup<[
+ extract_vec_elt_build_vec,
+ extract_all_elts_from_build_vector]>;
+
+def funnel_shift_from_or_shift : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_OR):$root,
+ [{ return Helper.matchOrShiftToFunnelShift(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
+>;
+
+def funnel_shift_to_rotate : GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_FSHL, G_FSHR):$root,
+ [{ return Helper.matchFunnelShiftToRotate(*${root}); }]),
+ (apply [{ Helper.applyFunnelShiftToRotate(*${root}); }])
+>;
+
+def rotate_out_of_range : GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_ROTR, G_ROTL):$root,
+ [{ return Helper.matchRotateOutOfRange(*${root}); }]),
+ (apply [{ Helper.applyRotateOutOfRange(*${root}); }])
+>;
+
+def icmp_to_true_false_known_bits : GICombineRule<
+ (defs root:$d, int64_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_ICMP):$d,
+ [{ return Helper.matchICmpToTrueFalseKnownBits(*${d}, ${matchinfo}); }]),
+ (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;
+
+def icmp_to_lhs_known_bits : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_ICMP):$root,
+ [{ return Helper.matchICmpToLHSKnownBits(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+def and_or_disjoint_mask : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_AND):$root,
+ [{ return Helper.matchAndOrDisjointMask(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFnNoErase(*${root}, ${info}); }])>;
+
+def bitfield_extract_from_and : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_AND):$root,
+ [{ return Helper.matchBitfieldExtractFromAnd(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+def funnel_shift_combines : GICombineGroup<[funnel_shift_from_or_shift,
+ funnel_shift_to_rotate]>;
+
+def bitfield_extract_from_sext_inreg : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_SEXT_INREG):$root,
+ [{ return Helper.matchBitfieldExtractFromSExtInReg(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+def bitfield_extract_from_shr : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_ASHR, G_LSHR):$root,
+ [{ return Helper.matchBitfieldExtractFromShr(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+def bitfield_extract_from_shr_and : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_ASHR, G_LSHR):$root,
+ [{ return Helper.matchBitfieldExtractFromShrAnd(*${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+def form_bitfield_extract : GICombineGroup<[bitfield_extract_from_sext_inreg,
+ bitfield_extract_from_and,
+ bitfield_extract_from_shr,
+ bitfield_extract_from_shr_and]>;
+
+def udiv_by_const : GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_UDIV):$root,
+ [{ return Helper.matchUDivByConst(*${root}); }]),
+ (apply [{ Helper.applyUDivByConst(*${root}); }])>;
+
+def intdiv_combines : GICombineGroup<[udiv_by_const]>;
+
+def reassoc_ptradd : GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_PTR_ADD):$root,
+ [{ return Helper.matchReassocPtrAdd(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
+
+def reassocs : GICombineGroup<[reassoc_ptradd]>;
+
+// Constant fold operations.
+def constant_fold : GICombineRule<
+ (defs root:$d, apint_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR):$d,
+ [{ return Helper.matchConstantFold(*${d}, ${matchinfo}); }]),
+ (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;
+
+def mulo_by_2: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_UMULO, G_SMULO):$root,
+ [{ return Helper.matchMulOBy2(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
+
+def mulh_to_lshr : GICombineRule<
+ (defs root:$root),
+ (match (wip_match_opcode G_UMULH):$root,
+ [{ return Helper.matchUMulHToLShr(*${root}); }]),
+ (apply [{ Helper.applyUMulHToLShr(*${root}); }])>;
+
+def mulh_combines : GICombineGroup<[mulh_to_lshr]>;
+
+def redundant_neg_operands: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMAD, G_FMA):$root,
+ [{ return Helper.matchRedundantNegOperands(*${root}, ${matchinfo}); }]),
+ (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;
+
+// Transform (fadd x, (fmul y, z)) -> (fma y, z, x)
+// (fadd x, (fmul y, z)) -> (fmad y, z, x)
+// Transform (fadd (fmul x, y), z) -> (fma x, y, z)
+// (fadd (fmul x, y), z) -> (fmad x, y, z)
+def combine_fadd_fmul_to_fmad_or_fma: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_FADD):$root,
+ [{ return Helper.matchCombineFAddFMulToFMadOrFMA(*${root},
+ ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+// Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
+// -> (fmad (fpext x), (fpext y), z)
+// Transform (fadd x, (fpext (fmul y, z))) -> (fma (fpext y), (fpext z), x)
+// -> (fmad (fpext y), (fpext z), x)
+def combine_fadd_fpext_fmul_to_fmad_or_fma: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_FADD):$root,
+ [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMA(*${root},
+ ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+// Transform (fadd (fma x, y, (fmul z, u)), v) -> (fma x, y, (fma z, u, v))
+// (fadd (fmad x, y, (fmul z, u)), v) -> (fmad x, y, (fmad z, u, v))
+// Transform (fadd v, (fma x, y, (fmul z, u))) -> (fma x, y, (fma z, u, v))
+// (fadd v, (fmad x, y, (fmul z, u))) -> (fmad x, y, (fmad z, u, v))
+def combine_fadd_fma_fmul_to_fmad_or_fma: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_FADD):$root,
+ [{ return Helper.matchCombineFAddFMAFMulToFMadOrFMA(*${root},
+ ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+// Transform (fadd (fma x, y, (fpext (fmul u, v))), z) ->
+// (fma x, y, (fma (fpext u), (fpext v), z))
+def combine_fadd_fpext_fma_fmul_to_fmad_or_fma: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_FADD):$root,
+ [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
+ *${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+// Transform (fsub (fmul x, y), z) -> (fma x, y, -z)
+// -> (fmad x, y, -z)
+def combine_fsub_fmul_to_fmad_or_fma: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_FSUB):$root,
+ [{ return Helper.matchCombineFSubFMulToFMadOrFMA(*${root},
+ ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
+// (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x)
+def combine_fsub_fneg_fmul_to_fmad_or_fma: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_FSUB):$root,
+ [{ return Helper.matchCombineFSubFNegFMulToFMadOrFMA(*${root},
+ ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+// Transform (fsub (fpext (fmul x, y)), z) ->
+// (fma (fpext x), (fpext y), (fneg z))
+def combine_fsub_fpext_fmul_to_fmad_or_fma: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_FSUB):$root,
+ [{ return Helper.matchCombineFSubFpExtFMulToFMadOrFMA(*${root},
+ ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+// Transform (fsub (fneg (fpext (fmul x, y))), z) ->
+// (fneg (fma (fpext x), (fpext y), z))
+def combine_fsub_fpext_fneg_fmul_to_fmad_or_fma: GICombineRule<
+ (defs root:$root, build_fn_matchinfo:$info),
+ (match (wip_match_opcode G_FSUB):$root,
+ [{ return Helper.matchCombineFSubFpExtFNegFMulToFMadOrFMA(
+ *${root}, ${info}); }]),
+ (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;
+
+// FIXME: These should use the custom predicate feature once it lands.
+def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
+ undef_to_negative_one,
+ binop_left_undef_to_zero,
+ propagate_undef_any_op,
+ propagate_undef_all_ops,
+ propagate_undef_shuffle_mask,
+ erase_undef_store,
+ unmerge_undef]>;
+
+def identity_combines : GICombineGroup<[select_same_val, right_identity_zero,
+ binop_same_val, binop_left_to_zero,
+ binop_right_to_zero, p2i_to_i2p,
+ i2p_to_p2i, anyext_trunc_fold,
+ fneg_fneg_fold, right_identity_one]>;
+
+def const_combines : GICombineGroup<[constant_fp_op, const_ptradd_to_i2p,
+ overlapping_and, mulo_by_2]>;
+
+def known_bits_simplifications : GICombineGroup<[
+ redundant_and, redundant_sext_inreg, redundant_or, urem_pow2_to_mask,
+ zext_trunc_fold, icmp_to_true_false_known_bits, icmp_to_lhs_known_bits]>;
+
+def width_reduction_combines : GICombineGroup<[reduce_shl_of_extend,
+ narrow_binop_feeding_and]>;
+
+def phi_combines : GICombineGroup<[extend_through_phis]>;
+
+def select_combines : GICombineGroup<[select_undef_cmp, select_constant_cmp]>;
+
+def trivial_combines : GICombineGroup<[copy_prop, mul_to_shl, add_p2i_to_ptradd,
+ mul_by_neg_one]>;
+
+def fma_combines : GICombineGroup<[combine_fadd_fmul_to_fmad_or_fma,
+ combine_fadd_fpext_fmul_to_fmad_or_fma, combine_fadd_fma_fmul_to_fmad_or_fma,
+ combine_fadd_fpext_fma_fmul_to_fmad_or_fma, combine_fsub_fmul_to_fmad_or_fma,
+ combine_fsub_fneg_fmul_to_fmad_or_fma, combine_fsub_fpext_fmul_to_fmad_or_fma,
+ combine_fsub_fpext_fneg_fmul_to_fmad_or_fma]>;
+
+def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines,
+ extract_vec_elt_combines, combines_for_extload,
+ combine_indexed_load_store, undef_combines, identity_combines, phi_combines,
+ simplify_add_to_sub, hoist_logic_op_with_same_opcode_hands,
+ reassocs, ptr_add_immed_chain,
+ shl_ashr_to_sext_inreg, sext_inreg_of_load,
+ width_reduction_combines, select_combines,
+ known_bits_simplifications, ext_ext_fold,
+ not_cmp_fold, opt_brcond_by_inverting_cond,
+ unmerge_merge, fabs_fabs_fold, unmerge_cst, unmerge_dead_to_trunc,
+ unmerge_zext_to_zext, merge_unmerge, trunc_ext_fold, trunc_shl,
+ const_combines, xor_of_and_with_same_reg, ptr_add_with_zero,
+ shift_immed_chain, shift_of_shifted_logic_chain, load_or_combine,
+ truncstore_merge, div_rem_to_divrem, funnel_shift_combines,
+ form_bitfield_extract, constant_fold, fabs_fneg_fold,
+ intdiv_combines, mulh_combines, redundant_neg_operands,
+ and_or_disjoint_mask, fma_combines]>;
+
+// A combine group used to for prelegalizer combiners at -O0. The combines in
+// this group have been selected based on experiments to balance code size and
+// compile time performance.
+def optnone_combines : GICombineGroup<[trivial_combines,
+ ptr_add_immed_chain, combines_for_extload,
+ not_cmp_fold, opt_brcond_by_inverting_cond]>;
diff --git a/contrib/libs/llvm14/include/llvm/Target/GlobalISel/RegisterBank.td b/contrib/libs/llvm14/include/llvm/Target/GlobalISel/RegisterBank.td
new file mode 100644
index 0000000000..51578b66b1
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/Target/GlobalISel/RegisterBank.td
@@ -0,0 +1,15 @@
+//===- RegisterBank.td - Register bank definitions ---------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+
+class RegisterBank<string name, list<RegisterClass> classes> {
+ string Name = name;
+ list<RegisterClass> RegisterClasses = classes;
+}
diff --git a/contrib/libs/llvm14/include/llvm/Target/GlobalISel/SelectionDAGCompat.td b/contrib/libs/llvm14/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
new file mode 100644
index 0000000000..12eee24b57
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
@@ -0,0 +1,221 @@
+//===- TargetGlobalISel.td - Common code for GlobalISel ----*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the target-independent interfaces used to support
+// SelectionDAG instruction selection patterns (specified in
+// TargetSelectionDAG.td) when generating GlobalISel instruction selectors.
+//
+// This is intended as a compatibility layer, to enable reuse of target
+// descriptions written for SelectionDAG without requiring explicit GlobalISel
+// support. It will eventually supersede SelectionDAG patterns.
+//
+//===----------------------------------------------------------------------===//
+
+// Declare that a generic Instruction is 'equivalent' to an SDNode, that is,
+// SelectionDAG patterns involving the SDNode can be transformed to match the
+// Instruction instead.
+class GINodeEquiv<Instruction i, SDNode node> {
+ Instruction I = i;
+ SDNode Node = node;
+
+ // SelectionDAG has separate nodes for atomic and non-atomic memory operations
+ // (ISD::LOAD, ISD::ATOMIC_LOAD, ISD::STORE, ISD::ATOMIC_STORE) but GlobalISel
+ // stores this information in the MachineMemoryOperand.
+ bit CheckMMOIsNonAtomic = false;
+ bit CheckMMOIsAtomic = false;
+
+ // SelectionDAG has one node for all loads and uses predicates to
+ // differentiate them. GlobalISel on the other hand uses separate opcodes.
+ // When this is true, the resulting opcode is G_LOAD/G_SEXTLOAD/G_ZEXTLOAD
+ // depending on the predicates on the node.
+ Instruction IfSignExtend = ?;
+ Instruction IfZeroExtend = ?;
+
+ // SelectionDAG has one setcc for all compares. This differentiates
+ // for G_ICMP and G_FCMP.
+ Instruction IfFloatingPoint = ?;
+}
+
+// These are defined in the same order as the G_* instructions.
+def : GINodeEquiv<G_ANYEXT, anyext>;
+def : GINodeEquiv<G_SEXT, sext>;
+def : GINodeEquiv<G_ZEXT, zext>;
+def : GINodeEquiv<G_TRUNC, trunc>;
+def : GINodeEquiv<G_BITCAST, bitconvert>;
+// G_INTTOPTR - SelectionDAG has no equivalent.
+// G_PTRTOINT - SelectionDAG has no equivalent.
+def : GINodeEquiv<G_CONSTANT, imm>;
+// timm must not be materialized and therefore has no GlobalISel equivalent
+def : GINodeEquiv<G_FCONSTANT, fpimm>;
+def : GINodeEquiv<G_IMPLICIT_DEF, undef>;
+def : GINodeEquiv<G_FRAME_INDEX, frameindex>;
+def : GINodeEquiv<G_BLOCK_ADDR, blockaddress>;
+def : GINodeEquiv<G_ADD, add>;
+def : GINodeEquiv<G_SUB, sub>;
+def : GINodeEquiv<G_MUL, mul>;
+def : GINodeEquiv<G_UMULH, mulhu>;
+def : GINodeEquiv<G_SMULH, mulhs>;
+def : GINodeEquiv<G_SDIV, sdiv>;
+def : GINodeEquiv<G_UDIV, udiv>;
+def : GINodeEquiv<G_SREM, srem>;
+def : GINodeEquiv<G_UREM, urem>;
+def : GINodeEquiv<G_AND, and>;
+def : GINodeEquiv<G_OR, or>;
+def : GINodeEquiv<G_XOR, xor>;
+def : GINodeEquiv<G_SHL, shl>;
+def : GINodeEquiv<G_LSHR, srl>;
+def : GINodeEquiv<G_ASHR, sra>;
+def : GINodeEquiv<G_SADDSAT, saddsat>;
+def : GINodeEquiv<G_UADDSAT, uaddsat>;
+def : GINodeEquiv<G_SSUBSAT, ssubsat>;
+def : GINodeEquiv<G_USUBSAT, usubsat>;
+def : GINodeEquiv<G_SSHLSAT, sshlsat>;
+def : GINodeEquiv<G_USHLSAT, ushlsat>;
+def : GINodeEquiv<G_SMULFIX, smulfix>;
+def : GINodeEquiv<G_UMULFIX, umulfix>;
+def : GINodeEquiv<G_SMULFIXSAT, smulfixsat>;
+def : GINodeEquiv<G_UMULFIXSAT, umulfixsat>;
+def : GINodeEquiv<G_SDIVFIX, sdivfix>;
+def : GINodeEquiv<G_UDIVFIX, udivfix>;
+def : GINodeEquiv<G_SDIVFIXSAT, sdivfixsat>;
+def : GINodeEquiv<G_UDIVFIXSAT, udivfixsat>;
+def : GINodeEquiv<G_SELECT, select>;
+def : GINodeEquiv<G_FNEG, fneg>;
+def : GINodeEquiv<G_FPEXT, fpextend>;
+def : GINodeEquiv<G_FPTRUNC, fpround>;
+def : GINodeEquiv<G_FPTOSI, fp_to_sint>;
+def : GINodeEquiv<G_FPTOUI, fp_to_uint>;
+def : GINodeEquiv<G_SITOFP, sint_to_fp>;
+def : GINodeEquiv<G_UITOFP, uint_to_fp>;
+def : GINodeEquiv<G_FADD, fadd>;
+def : GINodeEquiv<G_FSUB, fsub>;
+def : GINodeEquiv<G_FMA, fma>;
+def : GINodeEquiv<G_FMAD, fmad>;
+def : GINodeEquiv<G_FMUL, fmul>;
+def : GINodeEquiv<G_FDIV, fdiv>;
+def : GINodeEquiv<G_FREM, frem>;
+def : GINodeEquiv<G_FPOW, fpow>;
+def : GINodeEquiv<G_FEXP2, fexp2>;
+def : GINodeEquiv<G_FLOG2, flog2>;
+def : GINodeEquiv<G_FCANONICALIZE, fcanonicalize>;
+def : GINodeEquiv<G_INTRINSIC, intrinsic_wo_chain>;
+// ISD::INTRINSIC_VOID can also be handled with G_INTRINSIC_W_SIDE_EFFECTS.
+def : GINodeEquiv<G_INTRINSIC_W_SIDE_EFFECTS, intrinsic_void>;
+def : GINodeEquiv<G_INTRINSIC_W_SIDE_EFFECTS, intrinsic_w_chain>;
+def : GINodeEquiv<G_BR, br>;
+def : GINodeEquiv<G_BSWAP, bswap>;
+def : GINodeEquiv<G_BITREVERSE, bitreverse>;
+def : GINodeEquiv<G_FSHL, fshl>;
+def : GINodeEquiv<G_FSHR, fshr>;
+def : GINodeEquiv<G_CTLZ, ctlz>;
+def : GINodeEquiv<G_CTTZ, cttz>;
+def : GINodeEquiv<G_CTLZ_ZERO_UNDEF, ctlz_zero_undef>;
+def : GINodeEquiv<G_CTTZ_ZERO_UNDEF, cttz_zero_undef>;
+def : GINodeEquiv<G_CTPOP, ctpop>;
+def : GINodeEquiv<G_EXTRACT_VECTOR_ELT, extractelt>;
+def : GINodeEquiv<G_CONCAT_VECTORS, concat_vectors>;
+def : GINodeEquiv<G_BUILD_VECTOR, build_vector>;
+def : GINodeEquiv<G_FCEIL, fceil>;
+def : GINodeEquiv<G_FCOS, fcos>;
+def : GINodeEquiv<G_FSIN, fsin>;
+def : GINodeEquiv<G_FABS, fabs>;
+def : GINodeEquiv<G_FSQRT, fsqrt>;
+def : GINodeEquiv<G_FFLOOR, ffloor>;
+def : GINodeEquiv<G_FRINT, frint>;
+def : GINodeEquiv<G_FNEARBYINT, fnearbyint>;
+def : GINodeEquiv<G_INTRINSIC_TRUNC, ftrunc>;
+def : GINodeEquiv<G_INTRINSIC_ROUND, fround>;
+def : GINodeEquiv<G_INTRINSIC_LRINT, lrint>;
+def : GINodeEquiv<G_FCOPYSIGN, fcopysign>;
+def : GINodeEquiv<G_SMIN, smin>;
+def : GINodeEquiv<G_SMAX, smax>;
+def : GINodeEquiv<G_UMIN, umin>;
+def : GINodeEquiv<G_UMAX, umax>;
+def : GINodeEquiv<G_ABS, abs>;
+def : GINodeEquiv<G_FMINNUM, fminnum>;
+def : GINodeEquiv<G_FMAXNUM, fmaxnum>;
+def : GINodeEquiv<G_FMINNUM_IEEE, fminnum_ieee>;
+def : GINodeEquiv<G_FMAXNUM_IEEE, fmaxnum_ieee>;
+def : GINodeEquiv<G_READCYCLECOUNTER, readcyclecounter>;
+def : GINodeEquiv<G_ROTR, rotr>;
+def : GINodeEquiv<G_ROTL, rotl>;
+def : GINodeEquiv<G_LROUND, lround>;
+def : GINodeEquiv<G_LLROUND, llround>;
+
+def : GINodeEquiv<G_STRICT_FADD, strict_fadd>;
+def : GINodeEquiv<G_STRICT_FSUB, strict_fsub>;
+def : GINodeEquiv<G_STRICT_FMUL, strict_fmul>;
+def : GINodeEquiv<G_STRICT_FDIV, strict_fdiv>;
+def : GINodeEquiv<G_STRICT_FREM, strict_frem>;
+def : GINodeEquiv<G_STRICT_FMA, strict_fma>;
+def : GINodeEquiv<G_STRICT_FSQRT, strict_fsqrt>;
+
+// Broadly speaking G_LOAD is equivalent to ISD::LOAD but there are some
+// complications that tablegen must take care of. For example, Predicates such
+// as isSignExtLoad require that this is not a perfect 1:1 mapping since a
+// sign-extending load is (G_SEXTLOAD x) in GlobalISel. Additionally,
+// G_LOAD handles both atomic and non-atomic loads where as SelectionDAG had
+// separate nodes for them. This GINodeEquiv maps the non-atomic loads to
+// G_LOAD with a non-atomic MachineMemOperand.
+def : GINodeEquiv<G_LOAD, ld> {
+ let CheckMMOIsNonAtomic = true;
+ let IfSignExtend = G_SEXTLOAD;
+ let IfZeroExtend = G_ZEXTLOAD;
+}
+
+def : GINodeEquiv<G_ICMP, setcc> {
+ let IfFloatingPoint = G_FCMP;
+}
+
+// Broadly speaking G_STORE is equivalent to ISD::STORE but there are some
+// complications that tablegen must take care of. For example, predicates such
+// as isTruncStore require that this is not a perfect 1:1 mapping since a
+// truncating store is (G_STORE (G_TRUNCATE x)) in GlobalISel. Additionally,
+// G_STORE handles both atomic and non-atomic stores where as SelectionDAG had
+// separate nodes for them. This GINodeEquiv maps the non-atomic stores to
+// G_STORE with a non-atomic MachineMemOperand.
+def : GINodeEquiv<G_STORE, st> { let CheckMMOIsNonAtomic = true; }
+
+def : GINodeEquiv<G_LOAD, atomic_load> {
+ let CheckMMOIsNonAtomic = false;
+ let CheckMMOIsAtomic = true;
+}
+
+// Operands are swapped for atomic_store vs. regular store
+def : GINodeEquiv<G_STORE, atomic_store> {
+ let CheckMMOIsNonAtomic = false;
+ let CheckMMOIsAtomic = true;
+}
+
+def : GINodeEquiv<G_ATOMIC_CMPXCHG, atomic_cmp_swap>;
+def : GINodeEquiv<G_ATOMICRMW_XCHG, atomic_swap>;
+def : GINodeEquiv<G_ATOMICRMW_ADD, atomic_load_add>;
+def : GINodeEquiv<G_ATOMICRMW_SUB, atomic_load_sub>;
+def : GINodeEquiv<G_ATOMICRMW_AND, atomic_load_and>;
+def : GINodeEquiv<G_ATOMICRMW_NAND, atomic_load_nand>;
+def : GINodeEquiv<G_ATOMICRMW_OR, atomic_load_or>;
+def : GINodeEquiv<G_ATOMICRMW_XOR, atomic_load_xor>;
+def : GINodeEquiv<G_ATOMICRMW_MIN, atomic_load_min>;
+def : GINodeEquiv<G_ATOMICRMW_MAX, atomic_load_max>;
+def : GINodeEquiv<G_ATOMICRMW_UMIN, atomic_load_umin>;
+def : GINodeEquiv<G_ATOMICRMW_UMAX, atomic_load_umax>;
+def : GINodeEquiv<G_ATOMICRMW_FADD, atomic_load_fadd>;
+def : GINodeEquiv<G_ATOMICRMW_FSUB, atomic_load_fsub>;
+def : GINodeEquiv<G_FENCE, atomic_fence>;
+
+// Specifies the GlobalISel equivalents for SelectionDAG's ComplexPattern.
+// Should be used on defs that subclass GIComplexOperandMatcher<>.
+class GIComplexPatternEquiv<ComplexPattern seldag> {
+ ComplexPattern SelDAGEquivalent = seldag;
+}
+
+// Specifies the GlobalISel equivalents for SelectionDAG's SDNodeXForm.
+// Should be used on defs that subclass GICustomOperandRenderer<>.
+class GISDNodeXFormEquiv<SDNodeXForm seldag> {
+ SDNodeXForm SelDAGEquivalent = seldag;
+}
diff --git a/contrib/libs/llvm14/include/llvm/Target/GlobalISel/Target.td b/contrib/libs/llvm14/include/llvm/Target/GlobalISel/Target.td
new file mode 100644
index 0000000000..135d4a5e0d
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/Target/GlobalISel/Target.td
@@ -0,0 +1,65 @@
+//===- Target.td - Define GlobalISel rules -----------------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the target-independent interfaces used to support
+// SelectionDAG instruction selection patterns (specified in
+// TargetSelectionDAG.td) when generating GlobalISel instruction selectors.
+//
+// This is intended as a compatibility layer, to enable reuse of target
+// descriptions written for SelectionDAG without requiring explicit GlobalISel
+// support. It will eventually supersede SelectionDAG patterns.
+//
+//===----------------------------------------------------------------------===//
+
+// Definitions that inherit from LLT define types that will be used in the
+// GlobalISel matcher.
+class LLT;
+
+def s32 : LLT;
+def s64 : LLT;
+
+// Defines a matcher for complex operands. This is analogous to ComplexPattern
+// from SelectionDAG.
+//
+// Definitions that inherit from this may also inherit from
+// GIComplexPatternEquiv to enable the import of SelectionDAG patterns involving
+// those ComplexPatterns.
+class GIComplexOperandMatcher<LLT type, string matcherfn> {
+ // The expected type of the root of the match.
+ //
+ // TODO: We should probably support, any-type, any-scalar, and multiple types
+ // in the future.
+ LLT Type = type;
+
+ // The function that determines whether the operand matches. It should be of
+ // the form:
+ // ComplexRendererFn select(MachineOperand &Root) const;
+ // where Root is the root of the match. The function should return nullptr
+ // on match failure, or a ComplexRendererFn that renders the operand in case
+ // of a successful match.
+ string MatcherFn = matcherfn;
+}
+
+// Defines a custom renderer. This is analogous to SDNodeXForm from
+// SelectionDAG. Unlike SDNodeXForm, this matches a MachineInstr and
+// renders directly to the result instruction without an intermediate node.
+//
+// Definitions that inherit from this may also inherit from GISDNodeXFormEquiv
+// to enable the import of SelectionDAG patterns involving those SDNodeXForms.
+class GICustomOperandRenderer<string rendererfn> {
+ // The function renders the operand(s) of the matched instruction to
+ // the specified instruction. It should be of the form:
+ // void render(MachineInstrBuilder &MIB, const MachineInstr &MI,
+ // int OpIdx = -1)
+ //
+ // If OpIdx is specified (i.e. not invalid/negative), this
+ // references the source operand MI.getOperand(OpIdx). Otherwise,
+ // this is the value defined by MI. This is to support the case
+ // where there is no corresponding instruction to match.
+ string RendererFn = rendererfn;
+}