diff options
author | shadchin <shadchin@yandex-team.ru> | 2022-02-10 16:44:39 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:44:39 +0300 |
commit | e9656aae26e0358d5378e5b63dcac5c8dbe0e4d0 (patch) | |
tree | 64175d5cadab313b3e7039ebaa06c5bc3295e274 /contrib/libs/llvm12/include/llvm/Target | |
parent | 2598ef1d0aee359b4b6d5fdd1758916d5907d04f (diff) | |
download | ydb-e9656aae26e0358d5378e5b63dcac5c8dbe0e4d0.tar.gz |
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/libs/llvm12/include/llvm/Target')
14 files changed, 1392 insertions, 1392 deletions
diff --git a/contrib/libs/llvm12/include/llvm/Target/CGPassBuilderOption.h b/contrib/libs/llvm12/include/llvm/Target/CGPassBuilderOption.h index 6becdb015c..d8e856f066 100644 --- a/contrib/libs/llvm12/include/llvm/Target/CGPassBuilderOption.h +++ b/contrib/libs/llvm12/include/llvm/Target/CGPassBuilderOption.h @@ -1,76 +1,76 @@ -#pragma once - -#ifdef __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" -#endif - -//===- CGPassBuilderOption.h - Options for pass builder ---------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file declares the CCState and CCValAssign classes, used for lowering -// and implementing calling conventions. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_CODEGEN_PASSBUILDER_OPTION_H -#define LLVM_CODEGEN_PASSBUILDER_OPTION_H - -#include "llvm/ADT/Optional.h" -#include "llvm/ADT/StringRef.h" -#include "llvm/Target/TargetOptions.h" -#include <vector> - -namespace llvm { -class TargetMachine; - -enum class RunOutliner { TargetDefault, AlwaysOutline, NeverOutline }; -enum class RegAllocType { Default, Basic, Fast, Greedy, PBQP }; -enum class CFLAAType { None, Steensgaard, Andersen, Both }; - -// Not one-on-one but mostly corresponding to commandline options in -// TargetPassConfig.cpp. -struct CGPassBuilderOption { - Optional<bool> OptimizeRegAlloc; - Optional<bool> EnableIPRA; - bool DebugPM = false; - bool DisableVerify = false; - bool EnableImplicitNullChecks = false; - bool EnableBlockPlacementStats = false; - bool MISchedPostRA = false; - bool EarlyLiveIntervals = false; - - bool DisableLSR = false; - bool DisableCGP = false; - bool PrintLSR = false; - bool DisableMergeICmps = false; - bool DisablePartialLibcallInlining = false; - bool DisableConstantHoisting = false; - bool PrintISelInput = false; - bool PrintGCInfo = false; - bool RequiresCodeGenSCCOrder = false; - - RunOutliner EnableMachineOutliner = RunOutliner::TargetDefault; - RegAllocType RegAlloc = RegAllocType::Default; - CFLAAType UseCFLAA = CFLAAType::None; - Optional<GlobalISelAbortMode> EnableGlobalISelAbort; - - Optional<bool> VerifyMachineCode; - Optional<bool> EnableFastISelOption; - Optional<bool> EnableGlobalISelOption; -}; - -CGPassBuilderOption getCGPassBuilderOption(); - -} // namespace llvm - -#endif // LLVM_CODEGEN_PASSBUILDER_OPTION_H - -#ifdef __GNUC__ -#pragma GCC diagnostic pop -#endif +#pragma once + +#ifdef __GNUC__ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" +#endif + +//===- CGPassBuilderOption.h - Options for pass builder ---------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file declares the CCState and CCValAssign classes, used for lowering +// and implementing calling conventions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_PASSBUILDER_OPTION_H +#define LLVM_CODEGEN_PASSBUILDER_OPTION_H + +#include "llvm/ADT/Optional.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/Target/TargetOptions.h" +#include <vector> + +namespace llvm { +class TargetMachine; + +enum class RunOutliner { TargetDefault, AlwaysOutline, NeverOutline }; +enum class RegAllocType { Default, Basic, Fast, Greedy, PBQP }; +enum class CFLAAType { None, Steensgaard, Andersen, Both }; + +// Not one-on-one but mostly corresponding to commandline options in +// TargetPassConfig.cpp. +struct CGPassBuilderOption { + Optional<bool> OptimizeRegAlloc; + Optional<bool> EnableIPRA; + bool DebugPM = false; + bool DisableVerify = false; + bool EnableImplicitNullChecks = false; + bool EnableBlockPlacementStats = false; + bool MISchedPostRA = false; + bool EarlyLiveIntervals = false; + + bool DisableLSR = false; + bool DisableCGP = false; + bool PrintLSR = false; + bool DisableMergeICmps = false; + bool DisablePartialLibcallInlining = false; + bool DisableConstantHoisting = false; + bool PrintISelInput = false; + bool PrintGCInfo = false; + bool RequiresCodeGenSCCOrder = false; + + RunOutliner EnableMachineOutliner = RunOutliner::TargetDefault; + RegAllocType RegAlloc = RegAllocType::Default; + CFLAAType UseCFLAA = CFLAAType::None; + Optional<GlobalISelAbortMode> EnableGlobalISelAbort; + + Optional<bool> VerifyMachineCode; + Optional<bool> EnableFastISelOption; + Optional<bool> EnableGlobalISelOption; +}; + +CGPassBuilderOption getCGPassBuilderOption(); + +} // namespace llvm + +#endif // LLVM_CODEGEN_PASSBUILDER_OPTION_H + +#ifdef __GNUC__ +#pragma GCC diagnostic pop +#endif diff --git a/contrib/libs/llvm12/include/llvm/Target/GenericOpcodes.td b/contrib/libs/llvm12/include/llvm/Target/GenericOpcodes.td index fb2e8b1de8..209925969d 100644 --- a/contrib/libs/llvm12/include/llvm/Target/GenericOpcodes.td +++ b/contrib/libs/llvm12/include/llvm/Target/GenericOpcodes.td @@ -16,7 +16,7 @@ //------------------------------------------------------------------------------ class GenericInstruction : StandardPseudoInstruction { - let isPreISelOpcode = true; + let isPreISelOpcode = true; } // Provide a variant of an instruction with the same operands, but @@ -31,8 +31,8 @@ class ConstrainedIntruction<GenericInstruction baseInst> : // TODO: Do we need a better way to mark reads from FP mode than // hasSideEffects? - let hasSideEffects = true; - let mayRaiseFPException = true; + let hasSideEffects = true; + let mayRaiseFPException = true; } // Extend the underlying scalar type of an operation, leaving the high bits @@ -40,7 +40,7 @@ class ConstrainedIntruction<GenericInstruction baseInst> : def G_ANYEXT : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src); - let hasSideEffects = false; + let hasSideEffects = false; } // Sign extend the underlying scalar type of an operation, copying the sign bit @@ -48,7 +48,7 @@ def G_ANYEXT : GenericInstruction { def G_SEXT : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src); - let hasSideEffects = false; + let hasSideEffects = false; } // Sign extend the a value from an arbitrary bit position, copying the sign bit @@ -62,7 +62,7 @@ def G_SEXT : GenericInstruction { def G_SEXT_INREG : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src, untyped_imm_0:$sz); - let hasSideEffects = false; + let hasSideEffects = false; } // Zero extend the underlying scalar type of an operation, putting zero bits @@ -70,7 +70,7 @@ def G_SEXT_INREG : GenericInstruction { def G_ZEXT : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src); - let hasSideEffects = false; + let hasSideEffects = false; } @@ -79,150 +79,150 @@ def G_ZEXT : GenericInstruction { def G_TRUNC : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src); - let hasSideEffects = false; + let hasSideEffects = false; } def G_IMPLICIT_DEF : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins); - let hasSideEffects = false; + let hasSideEffects = false; } def G_PHI : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins variable_ops); - let hasSideEffects = false; + let hasSideEffects = false; } def G_FRAME_INDEX : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins unknown:$src2); - let hasSideEffects = false; + let hasSideEffects = false; } def G_GLOBAL_VALUE : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins unknown:$src); - let hasSideEffects = false; + let hasSideEffects = false; } def G_INTTOPTR : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src); - let hasSideEffects = false; + let hasSideEffects = false; } def G_PTRTOINT : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src); - let hasSideEffects = false; + let hasSideEffects = false; } def G_BITCAST : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src); - let hasSideEffects = false; + let hasSideEffects = false; } // Only supports scalar result types def G_CONSTANT : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins unknown:$imm); - let hasSideEffects = false; + let hasSideEffects = false; } // Only supports scalar result types def G_FCONSTANT : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins unknown:$imm); - let hasSideEffects = false; + let hasSideEffects = false; } def G_VASTART : GenericInstruction { let OutOperandList = (outs); let InOperandList = (ins type0:$list); - let hasSideEffects = false; - let mayStore = true; + let hasSideEffects = false; + let mayStore = true; } def G_VAARG : GenericInstruction { let OutOperandList = (outs type0:$val); let InOperandList = (ins type1:$list, unknown:$align); - let hasSideEffects = false; - let mayLoad = true; - let mayStore = true; + let hasSideEffects = false; + let mayLoad = true; + let mayStore = true; } def G_CTLZ : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src); - let hasSideEffects = false; + let hasSideEffects = false; } def G_CTLZ_ZERO_UNDEF : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src); - let hasSideEffects = false; + let hasSideEffects = false; } def G_CTTZ : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src); - let hasSideEffects = false; + let hasSideEffects = false; } def G_CTTZ_ZERO_UNDEF : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src); - let hasSideEffects = false; + let hasSideEffects = false; } def G_CTPOP : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src); - let hasSideEffects = false; + let hasSideEffects = false; } def G_BSWAP : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src); - let hasSideEffects = false; + let hasSideEffects = false; } def G_BITREVERSE : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src); - let hasSideEffects = false; + let hasSideEffects = false; } def G_ADDRSPACE_CAST : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src); - let hasSideEffects = false; + let hasSideEffects = false; } def G_BLOCK_ADDR : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins unknown:$ba); - let hasSideEffects = false; + let hasSideEffects = false; } def G_JUMP_TABLE : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins unknown:$jti); - let hasSideEffects = false; + let hasSideEffects = false; } def G_DYN_STACKALLOC : GenericInstruction { let OutOperandList = (outs ptype0:$dst); let InOperandList = (ins type1:$size, i32imm:$align); - let hasSideEffects = true; + let hasSideEffects = true; } def G_FREEZE : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src); - let hasSideEffects = false; + let hasSideEffects = false; } //------------------------------------------------------------------------------ @@ -233,101 +233,101 @@ def G_FREEZE : GenericInstruction { def G_ADD : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } // Generic subtraction. def G_SUB : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = false; + let hasSideEffects = false; + let isCommutable = false; } // Generic multiplication. def G_MUL : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } // Generic signed division. def G_SDIV : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = false; + let hasSideEffects = false; + let isCommutable = false; } // Generic unsigned division. def G_UDIV : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = false; + let hasSideEffects = false; + let isCommutable = false; } // Generic signed remainder. def G_SREM : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = false; + let hasSideEffects = false; + let isCommutable = false; } // Generic unsigned remainder. def G_UREM : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = false; + let hasSideEffects = false; + let isCommutable = false; } // Generic bitwise and. def G_AND : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } // Generic bitwise or. def G_OR : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } // Generic bitwise xor. def G_XOR : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } // Generic left-shift. def G_SHL : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type1:$src2); - let hasSideEffects = false; + let hasSideEffects = false; } // Generic logical right-shift. def G_LSHR : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type1:$src2); - let hasSideEffects = false; + let hasSideEffects = false; } // Generic arithmetic right-shift. def G_ASHR : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type1:$src2); - let hasSideEffects = false; + let hasSideEffects = false; } /// Funnel 'double' shifts take 3 operands, 2 inputs and the shift amount. @@ -335,7 +335,7 @@ def G_ASHR : GenericInstruction { def G_FSHL : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2, type1:$src3); - let hasSideEffects = false; + let hasSideEffects = false; } /// Funnel 'double' shifts take 3 operands, 2 inputs and the shift amount. @@ -343,35 +343,35 @@ def G_FSHL : GenericInstruction { def G_FSHR : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2, type1:$src3); - let hasSideEffects = false; + let hasSideEffects = false; } // Generic integer comparison. def G_ICMP : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins unknown:$tst, type1:$src1, type1:$src2); - let hasSideEffects = false; + let hasSideEffects = false; } // Generic floating-point comparison. def G_FCMP : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins unknown:$tst, type1:$src1, type1:$src2); - let hasSideEffects = false; + let hasSideEffects = false; } // Generic select def G_SELECT : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$tst, type0:$src1, type0:$src2); - let hasSideEffects = false; + let hasSideEffects = false; } // Generic pointer offset. def G_PTR_ADD : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type1:$src2); - let hasSideEffects = false; + let hasSideEffects = false; } // Generic pointer mask. type1 should be an integer with the same @@ -379,48 +379,48 @@ def G_PTR_ADD : GenericInstruction { def G_PTRMASK : GenericInstruction { let OutOperandList = (outs ptype0:$dst); let InOperandList = (ins ptype0:$src, type1:$bits); - let hasSideEffects = false; + let hasSideEffects = false; } // Generic signed integer minimum. def G_SMIN : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } // Generic signed integer maximum. def G_SMAX : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } // Generic unsigned integer minimum. def G_UMIN : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } // Generic unsigned integer maximum. def G_UMAX : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; +} + +// Generic integer absolute value. +def G_ABS : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src); + let hasSideEffects = false; } -// Generic integer absolute value. -def G_ABS : GenericInstruction { - let OutOperandList = (outs type0:$dst); - let InOperandList = (ins type0:$src); - let hasSideEffects = false; -} - //------------------------------------------------------------------------------ // Overflow ops //------------------------------------------------------------------------------ @@ -429,73 +429,73 @@ def G_ABS : GenericInstruction { def G_UADDO : GenericInstruction { let OutOperandList = (outs type0:$dst, type1:$carry_out); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } // Generic unsigned addition consuming and producing a carry flag. def G_UADDE : GenericInstruction { let OutOperandList = (outs type0:$dst, type1:$carry_out); let InOperandList = (ins type0:$src1, type0:$src2, type1:$carry_in); - let hasSideEffects = false; + let hasSideEffects = false; } // Generic signed addition producing a carry flag. def G_SADDO : GenericInstruction { let OutOperandList = (outs type0:$dst, type1:$carry_out); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } // Generic signed addition consuming and producing a carry flag. def G_SADDE : GenericInstruction { let OutOperandList = (outs type0:$dst, type1:$carry_out); let InOperandList = (ins type0:$src1, type0:$src2, type1:$carry_in); - let hasSideEffects = false; + let hasSideEffects = false; } // Generic unsigned subtraction producing a carry flag. def G_USUBO : GenericInstruction { let OutOperandList = (outs type0:$dst, type1:$carry_out); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; + let hasSideEffects = false; } // Generic unsigned subtraction consuming and producing a carry flag. def G_USUBE : GenericInstruction { let OutOperandList = (outs type0:$dst, type1:$carry_out); let InOperandList = (ins type0:$src1, type0:$src2, type1:$carry_in); - let hasSideEffects = false; + let hasSideEffects = false; } // Generic signed subtraction producing a carry flag. def G_SSUBO : GenericInstruction { let OutOperandList = (outs type0:$dst, type1:$carry_out); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; + let hasSideEffects = false; } // Generic signed subtraction consuming and producing a carry flag. def G_SSUBE : GenericInstruction { let OutOperandList = (outs type0:$dst, type1:$carry_out); let InOperandList = (ins type0:$src1, type0:$src2, type1:$carry_in); - let hasSideEffects = false; + let hasSideEffects = false; } // Generic unsigned multiplication producing a carry flag. def G_UMULO : GenericInstruction { let OutOperandList = (outs type0:$dst, type1:$carry_out); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } // Generic signed multiplication producing a carry flag. def G_SMULO : GenericInstruction { let OutOperandList = (outs type0:$dst, type1:$carry_out); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } // Multiply two numbers at twice the incoming bit width (unsigned) and return @@ -503,8 +503,8 @@ def G_SMULO : GenericInstruction { def G_UMULH : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } // Multiply two numbers at twice the incoming bit width (signed) and return @@ -512,8 +512,8 @@ def G_UMULH : GenericInstruction { def G_SMULH : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } //------------------------------------------------------------------------------ @@ -524,121 +524,121 @@ def G_SMULH : GenericInstruction { def G_UADDSAT : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } // Generic saturating signed addition. def G_SADDSAT : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } // Generic saturating unsigned subtraction. def G_USUBSAT : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = false; + let hasSideEffects = false; + let isCommutable = false; } // Generic saturating signed subtraction. def G_SSUBSAT : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = false; -} - -// Generic saturating unsigned left shift. -def G_USHLSAT : GenericInstruction { - let OutOperandList = (outs type0:$dst); - let InOperandList = (ins type0:$src1, type1:$src2); - let hasSideEffects = false; - let isCommutable = false; -} - -// Generic saturating signed left shift. -def G_SSHLSAT : GenericInstruction { - let OutOperandList = (outs type0:$dst); - let InOperandList = (ins type0:$src1, type1:$src2); - let hasSideEffects = false; - let isCommutable = false; -} - -/// RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point -/// multiplication on 2 integers with the same width and scale. SCALE -/// represents the scale of both operands as fixed point numbers. This -/// SCALE parameter must be a constant integer. A scale of zero is -/// effectively performing multiplication on 2 integers. -def G_SMULFIX : GenericInstruction { - let OutOperandList = (outs type0:$dst); - let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); - let hasSideEffects = false; - let isCommutable = true; -} - -def G_UMULFIX : GenericInstruction { - let OutOperandList = (outs type0:$dst); - let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); - let hasSideEffects = false; - let isCommutable = true; -} - -/// Same as the corresponding unsaturated fixed point instructions, but the -/// result is clamped between the min and max values representable by the -/// bits of the first 2 operands. -def G_SMULFIXSAT : GenericInstruction { - let OutOperandList = (outs type0:$dst); - let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); - let hasSideEffects = false; - let isCommutable = true; -} - -def G_UMULFIXSAT : GenericInstruction { - let OutOperandList = (outs type0:$dst); - let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); - let hasSideEffects = false; - let isCommutable = true; -} - -/// RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on -/// 2 integers with the same width and scale. SCALE represents the scale -/// of both operands as fixed point numbers. This SCALE parameter must be a -/// constant integer. -def G_SDIVFIX : GenericInstruction { - let OutOperandList = (outs type0:$dst); - let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); - let hasSideEffects = false; - let isCommutable = false; -} - -def G_UDIVFIX : GenericInstruction { - let OutOperandList = (outs type0:$dst); - let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); - let hasSideEffects = false; - let isCommutable = false; -} - -/// Same as the corresponding unsaturated fixed point instructions, -/// but the result is clamped between the min and max values -/// representable by the bits of the first 2 operands. -def G_SDIVFIXSAT : GenericInstruction { - let OutOperandList = (outs type0:$dst); - let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); - let hasSideEffects = false; - let isCommutable = false; -} - -def G_UDIVFIXSAT : GenericInstruction { - let OutOperandList = (outs type0:$dst); - let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); - let hasSideEffects = false; - let isCommutable = false; -} - + let hasSideEffects = false; + let isCommutable = false; +} + +// Generic saturating unsigned left shift. +def G_USHLSAT : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src1, type1:$src2); + let hasSideEffects = false; + let isCommutable = false; +} + +// Generic saturating signed left shift. +def G_SSHLSAT : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src1, type1:$src2); + let hasSideEffects = false; + let isCommutable = false; +} + +/// RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point +/// multiplication on 2 integers with the same width and scale. SCALE +/// represents the scale of both operands as fixed point numbers. This +/// SCALE parameter must be a constant integer. A scale of zero is +/// effectively performing multiplication on 2 integers. +def G_SMULFIX : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); + let hasSideEffects = false; + let isCommutable = true; +} + +def G_UMULFIX : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); + let hasSideEffects = false; + let isCommutable = true; +} + +/// Same as the corresponding unsaturated fixed point instructions, but the +/// result is clamped between the min and max values representable by the +/// bits of the first 2 operands. +def G_SMULFIXSAT : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); + let hasSideEffects = false; + let isCommutable = true; +} + +def G_UMULFIXSAT : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); + let hasSideEffects = false; + let isCommutable = true; +} + +/// RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on +/// 2 integers with the same width and scale. SCALE represents the scale +/// of both operands as fixed point numbers. This SCALE parameter must be a +/// constant integer. +def G_SDIVFIX : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); + let hasSideEffects = false; + let isCommutable = false; +} + +def G_UDIVFIX : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); + let hasSideEffects = false; + let isCommutable = false; +} + +/// Same as the corresponding unsaturated fixed point instructions, +/// but the result is clamped between the min and max values +/// representable by the bits of the first 2 operands. +def G_SDIVFIXSAT : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); + let hasSideEffects = false; + let isCommutable = false; +} + +def G_UDIVFIXSAT : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale); + let hasSideEffects = false; + let isCommutable = false; +} + //------------------------------------------------------------------------------ // Floating Point Unary Ops. //------------------------------------------------------------------------------ @@ -646,61 +646,61 @@ def G_UDIVFIXSAT : GenericInstruction { def G_FNEG : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src); - let hasSideEffects = false; + let hasSideEffects = false; } def G_FPEXT : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src); - let hasSideEffects = false; + let hasSideEffects = false; } def G_FPTRUNC : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src); - let hasSideEffects = false; + let hasSideEffects = false; } def G_FPTOSI : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src); - let hasSideEffects = false; + let hasSideEffects = false; } def G_FPTOUI : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src); - let hasSideEffects = false; + let hasSideEffects = false; } def G_SITOFP : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src); - let hasSideEffects = false; + let hasSideEffects = false; } def G_UITOFP : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src); - let hasSideEffects = false; + let hasSideEffects = false; } def G_FABS : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src); - let hasSideEffects = false; + let hasSideEffects = false; } def G_FCOPYSIGN : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src0, type1:$src1); - let hasSideEffects = false; + let hasSideEffects = false; } def G_FCANONICALIZE : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src); - let hasSideEffects = false; + let hasSideEffects = false; } // FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two @@ -713,15 +713,15 @@ def G_FCANONICALIZE : GenericInstruction { def G_FMINNUM : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } def G_FMAXNUM : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } // FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimum or maximum on @@ -731,15 +731,15 @@ def G_FMAXNUM : GenericInstruction { def G_FMINNUM_IEEE : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } def G_FMAXNUM_IEEE : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } // FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 @@ -748,15 +748,15 @@ def G_FMAXNUM_IEEE : GenericInstruction { def G_FMINIMUM : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } def G_FMAXIMUM : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } //------------------------------------------------------------------------------ @@ -767,24 +767,24 @@ def G_FMAXIMUM : GenericInstruction { def G_FADD : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } // Generic FP subtraction. def G_FSUB : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = false; + let hasSideEffects = false; + let isCommutable = false; } // Generic FP multiplication. def G_FMUL : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; - let isCommutable = true; + let hasSideEffects = false; + let isCommutable = true; } // Generic fused multiply-add instruction. @@ -792,8 +792,8 @@ def G_FMUL : GenericInstruction { def G_FMA : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2, type0:$src3); - let hasSideEffects = false; - let isCommutable = false; + let hasSideEffects = false; + let isCommutable = false; } /// Generic FP multiply and add. Perform a * b + c, while getting the @@ -801,92 +801,92 @@ def G_FMA : GenericInstruction { def G_FMAD : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2, type0:$src3); - let hasSideEffects = false; - let isCommutable = false; + let hasSideEffects = false; + let isCommutable = false; } // Generic FP division. def G_FDIV : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; + let hasSideEffects = false; } // Generic FP remainder. def G_FREM : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; + let hasSideEffects = false; } // Floating point exponentiation. def G_FPOW : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1, type0:$src2); - let hasSideEffects = false; + let hasSideEffects = false; +} + +// Floating point exponentiation, with an integer power. +def G_FPOWI : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src0, type1:$src1); + let hasSideEffects = false; } -// Floating point exponentiation, with an integer power. -def G_FPOWI : GenericInstruction { - let OutOperandList = (outs type0:$dst); - let InOperandList = (ins type0:$src0, type1:$src1); - let hasSideEffects = false; -} - // Floating point base-e exponential of a value. def G_FEXP : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1); - let hasSideEffects = false; + let hasSideEffects = false; } // Floating point base-2 exponential of a value. def G_FEXP2 : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1); - let hasSideEffects = false; + let hasSideEffects = false; } // Floating point base-e logarithm of a value. def G_FLOG : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1); - let hasSideEffects = false; + let hasSideEffects = false; } // Floating point base-2 logarithm of a value. def G_FLOG2 : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1); - let hasSideEffects = false; + let hasSideEffects = false; } // Floating point base-10 logarithm of a value. def G_FLOG10 : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1); - let hasSideEffects = false; + let hasSideEffects = false; } // Floating point ceiling of a value. def G_FCEIL : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1); - let hasSideEffects = false; + let hasSideEffects = false; } // Floating point cosine of a value. def G_FCOS : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1); - let hasSideEffects = false; + let hasSideEffects = false; } // Floating point sine of a value. def G_FSIN : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1); - let hasSideEffects = false; + let hasSideEffects = false; } // Floating point square root of a value. @@ -896,28 +896,28 @@ def G_FSIN : GenericInstruction { def G_FSQRT : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1); - let hasSideEffects = false; + let hasSideEffects = false; } // Floating point floor of a value. def G_FFLOOR : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1); - let hasSideEffects = false; + let hasSideEffects = false; } // Floating point round to next integer. def G_FRINT : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1); - let hasSideEffects = false; + let hasSideEffects = false; } // Floating point round to the nearest integer. def G_FNEARBYINT : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1); - let hasSideEffects = false; + let hasSideEffects = false; } //------------------------------------------------------------------------------ @@ -926,31 +926,31 @@ def G_FNEARBYINT : GenericInstruction { def G_INTRINSIC_TRUNC : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1); - let hasSideEffects = false; + let hasSideEffects = false; } def G_INTRINSIC_ROUND : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src1); - let hasSideEffects = false; -} - -def G_INTRINSIC_LRINT : GenericInstruction { - let OutOperandList = (outs type0:$dst); - let InOperandList = (ins type1:$src); - let hasSideEffects = false; -} - -def G_INTRINSIC_ROUNDEVEN : GenericInstruction { - let OutOperandList = (outs type0:$dst); - let InOperandList = (ins type0:$src1); - let hasSideEffects = false; -} - + let hasSideEffects = false; +} + +def G_INTRINSIC_LRINT : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type1:$src); + let hasSideEffects = false; +} + +def G_INTRINSIC_ROUNDEVEN : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type0:$src1); + let hasSideEffects = false; +} + def G_READCYCLECOUNTER : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins); - let hasSideEffects = true; + let hasSideEffects = true; } //------------------------------------------------------------------------------ @@ -965,24 +965,24 @@ def G_READCYCLECOUNTER : GenericInstruction { def G_LOAD : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins ptype1:$addr); - let hasSideEffects = false; - let mayLoad = true; + let hasSideEffects = false; + let mayLoad = true; } // Generic sign-extended load. Expects a MachineMemOperand in addition to explicit operands. def G_SEXTLOAD : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins ptype1:$addr); - let hasSideEffects = false; - let mayLoad = true; + let hasSideEffects = false; + let mayLoad = true; } // Generic zero-extended load. Expects a MachineMemOperand in addition to explicit operands. def G_ZEXTLOAD : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins ptype1:$addr); - let hasSideEffects = false; - let mayLoad = true; + let hasSideEffects = false; + let mayLoad = true; } // Generic indexed load. Combines a GEP with a load. $newaddr is set to $base + $offset. @@ -991,32 +991,32 @@ def G_ZEXTLOAD : GenericInstruction { def G_INDEXED_LOAD : GenericInstruction { let OutOperandList = (outs type0:$dst, ptype1:$newaddr); let InOperandList = (ins ptype1:$base, type2:$offset, unknown:$am); - let hasSideEffects = false; - let mayLoad = true; + let hasSideEffects = false; + let mayLoad = true; } // Same as G_INDEXED_LOAD except that the load performed is sign-extending, as with G_SEXTLOAD. def G_INDEXED_SEXTLOAD : GenericInstruction { let OutOperandList = (outs type0:$dst, ptype1:$newaddr); let InOperandList = (ins ptype1:$base, type2:$offset, unknown:$am); - let hasSideEffects = false; - let mayLoad = true; + let hasSideEffects = false; + let mayLoad = true; } // Same as G_INDEXED_LOAD except that the load performed is zero-extending, as with G_ZEXTLOAD. def G_INDEXED_ZEXTLOAD : GenericInstruction { let OutOperandList = (outs type0:$dst, ptype1:$newaddr); let InOperandList = (ins ptype1:$base, type2:$offset, unknown:$am); - let hasSideEffects = false; - let mayLoad = true; + let hasSideEffects = false; + let mayLoad = true; } // Generic store. Expects a MachineMemOperand in addition to explicit operands. def G_STORE : GenericInstruction { let OutOperandList = (outs); let InOperandList = (ins type0:$src, ptype1:$addr); - let hasSideEffects = false; - let mayStore = true; + let hasSideEffects = false; + let mayStore = true; } // Combines a store with a GEP. See description of G_INDEXED_LOAD for indexing behaviour. @@ -1024,8 +1024,8 @@ def G_INDEXED_STORE : GenericInstruction { let OutOperandList = (outs ptype0:$newaddr); let InOperandList = (ins type1:$src, ptype0:$base, ptype2:$offset, unknown:$am); - let hasSideEffects = false; - let mayStore = true; + let hasSideEffects = false; + let mayStore = true; } // Generic atomic cmpxchg with internal success check. Expects a @@ -1033,9 +1033,9 @@ def G_INDEXED_STORE : GenericInstruction { def G_ATOMIC_CMPXCHG_WITH_SUCCESS : GenericInstruction { let OutOperandList = (outs type0:$oldval, type1:$success); let InOperandList = (ins type2:$addr, type0:$cmpval, type0:$newval); - let hasSideEffects = false; - let mayLoad = true; - let mayStore = true; + let hasSideEffects = false; + let mayLoad = true; + let mayStore = true; } // Generic atomic cmpxchg. Expects a MachineMemOperand in addition to explicit @@ -1043,9 +1043,9 @@ def G_ATOMIC_CMPXCHG_WITH_SUCCESS : GenericInstruction { def G_ATOMIC_CMPXCHG : GenericInstruction { let OutOperandList = (outs type0:$oldval); let InOperandList = (ins ptype1:$addr, type0:$cmpval, type0:$newval); - let hasSideEffects = false; - let mayLoad = true; - let mayStore = true; + let hasSideEffects = false; + let mayLoad = true; + let mayStore = true; } // Generic atomicrmw. Expects a MachineMemOperand in addition to explicit @@ -1053,9 +1053,9 @@ def G_ATOMIC_CMPXCHG : GenericInstruction { class G_ATOMICRMW_OP : GenericInstruction { let OutOperandList = (outs type0:$oldval); let InOperandList = (ins ptype1:$addr, type0:$val); - let hasSideEffects = false; - let mayLoad = true; - let mayStore = true; + let hasSideEffects = false; + let mayLoad = true; + let mayStore = true; } def G_ATOMICRMW_XCHG : G_ATOMICRMW_OP; @@ -1075,7 +1075,7 @@ def G_ATOMICRMW_FSUB : G_ATOMICRMW_OP; def G_FENCE : GenericInstruction { let OutOperandList = (outs); let InOperandList = (ins i32imm:$ordering, i32imm:$scope); - let hasSideEffects = true; + let hasSideEffects = true; } //------------------------------------------------------------------------------ @@ -1088,7 +1088,7 @@ def G_FENCE : GenericInstruction { def G_EXTRACT : GenericInstruction { let OutOperandList = (outs type0:$res); let InOperandList = (ins type1:$src, untyped_imm_0:$offset); - let hasSideEffects = false; + let hasSideEffects = false; } // Extract multiple registers specified size, starting from blocks given by @@ -1100,14 +1100,14 @@ def G_EXTRACT : GenericInstruction { def G_UNMERGE_VALUES : GenericInstruction { let OutOperandList = (outs type0:$dst0, variable_ops); let InOperandList = (ins type1:$src); - let hasSideEffects = false; + let hasSideEffects = false; } // Insert a smaller register into a larger one at the specified bit-index. def G_INSERT : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src, type1:$op, untyped_imm_0:$offset); - let hasSideEffects = false; + let hasSideEffects = false; } // Concatenate multiple registers of the same size into a wider register. @@ -1117,7 +1117,7 @@ def G_INSERT : GenericInstruction { def G_MERGE_VALUES : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src0, variable_ops); - let hasSideEffects = false; + let hasSideEffects = false; } /// Create a vector from multiple scalar registers. No implicit @@ -1126,7 +1126,7 @@ def G_MERGE_VALUES : GenericInstruction { def G_BUILD_VECTOR : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src0, variable_ops); - let hasSideEffects = false; + let hasSideEffects = false; } /// Like G_BUILD_VECTOR, but truncates the larger operand types to fit the @@ -1134,24 +1134,24 @@ def G_BUILD_VECTOR : GenericInstruction { def G_BUILD_VECTOR_TRUNC : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src0, variable_ops); - let hasSideEffects = false; + let hasSideEffects = false; } /// Create a vector by concatenating vectors together. def G_CONCAT_VECTORS : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src0, variable_ops); - let hasSideEffects = false; + let hasSideEffects = false; } // Intrinsic without side effects. def G_INTRINSIC : GenericInstruction { let OutOperandList = (outs); let InOperandList = (ins unknown:$intrin, variable_ops); - let hasSideEffects = false; + let hasSideEffects = false; // Conservatively assume this is convergent. If there turnes out to - // be a need, there should be separate convergent intrinsic opcodes. + // be a need, there should be separate convergent intrinsic opcodes. let isConvergent = 1; } @@ -1159,13 +1159,13 @@ def G_INTRINSIC : GenericInstruction { def G_INTRINSIC_W_SIDE_EFFECTS : GenericInstruction { let OutOperandList = (outs); let InOperandList = (ins unknown:$intrin, variable_ops); - let hasSideEffects = true; - let mayLoad = true; - let mayStore = true; + let hasSideEffects = true; + let mayLoad = true; + let mayStore = true; // Conservatively assume this is convergent. If there turnes out to - // be a need, there should be separate convergent intrinsic opcodes. - let isConvergent = true; + // be a need, there should be separate convergent intrinsic opcodes. + let isConvergent = true; } //------------------------------------------------------------------------------ @@ -1176,61 +1176,61 @@ def G_INTRINSIC_W_SIDE_EFFECTS : GenericInstruction { def G_BR : GenericInstruction { let OutOperandList = (outs); let InOperandList = (ins unknown:$src1); - let hasSideEffects = false; - let isBranch = true; - let isTerminator = true; - let isBarrier = true; + let hasSideEffects = false; + let isBranch = true; + let isTerminator = true; + let isBarrier = true; } // Generic conditional branch. def G_BRCOND : GenericInstruction { let OutOperandList = (outs); let InOperandList = (ins type0:$tst, unknown:$truebb); - let hasSideEffects = false; - let isBranch = true; - let isTerminator = true; + let hasSideEffects = false; + let isBranch = true; + let isTerminator = true; } // Generic indirect branch. def G_BRINDIRECT : GenericInstruction { let OutOperandList = (outs); let InOperandList = (ins type0:$src1); - let hasSideEffects = false; - let isBranch = true; - let isTerminator = true; - let isBarrier = true; - let isIndirectBranch = true; + let hasSideEffects = false; + let isBranch = true; + let isTerminator = true; + let isBarrier = true; + let isIndirectBranch = true; } // Generic branch to jump table entry def G_BRJT : GenericInstruction { let OutOperandList = (outs); let InOperandList = (ins ptype0:$tbl, unknown:$jti, type1:$idx); - let hasSideEffects = false; - let isBranch = true; - let isTerminator = true; - let isBarrier = true; - let isIndirectBranch = true; + let hasSideEffects = false; + let isBranch = true; + let isTerminator = true; + let isBarrier = true; + let isIndirectBranch = true; } def G_READ_REGISTER : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins unknown:$register); - let hasSideEffects = true; + let hasSideEffects = true; // Assume convergent. It's probably not worth the effort of somehow // modeling convergent and nonconvergent register accesses. - let isConvergent = true; + let isConvergent = true; } def G_WRITE_REGISTER : GenericInstruction { let OutOperandList = (outs); let InOperandList = (ins unknown:$register, type0:$value); - let hasSideEffects = true; + let hasSideEffects = true; // Assume convergent. It's probably not worth the effort of somehow // modeling convergent and nonconvergent register accesses. - let isConvergent = true; + let isConvergent = true; } //------------------------------------------------------------------------------ @@ -1241,14 +1241,14 @@ def G_WRITE_REGISTER : GenericInstruction { def G_INSERT_VECTOR_ELT : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type0:$src, type1:$elt, type2:$idx); - let hasSideEffects = false; + let hasSideEffects = false; } // Generic extractelement. def G_EXTRACT_VECTOR_ELT : GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$src, type2:$idx); - let hasSideEffects = false; + let hasSideEffects = false; } // Generic shufflevector. @@ -1258,48 +1258,48 @@ def G_EXTRACT_VECTOR_ELT : GenericInstruction { def G_SHUFFLE_VECTOR: GenericInstruction { let OutOperandList = (outs type0:$dst); let InOperandList = (ins type1:$v1, type1:$v2, unknown:$mask); - let hasSideEffects = false; + let hasSideEffects = false; } //------------------------------------------------------------------------------ -// Vector reductions -//------------------------------------------------------------------------------ - -class VectorReduction : GenericInstruction { - let OutOperandList = (outs type0:$dst); - let InOperandList = (ins type1:$v); - let hasSideEffects = false; -} - -def G_VECREDUCE_SEQ_FADD : GenericInstruction { - let OutOperandList = (outs type0:$dst); - let InOperandList = (ins type1:$acc, type2:$v); - let hasSideEffects = false; -} - -def G_VECREDUCE_SEQ_FMUL : GenericInstruction { - let OutOperandList = (outs type0:$dst); - let InOperandList = (ins type1:$acc, type2:$v); - let hasSideEffects = false; -} - -def G_VECREDUCE_FADD : VectorReduction; -def G_VECREDUCE_FMUL : VectorReduction; - -def G_VECREDUCE_FMAX : VectorReduction; -def G_VECREDUCE_FMIN : VectorReduction; - -def G_VECREDUCE_ADD : VectorReduction; -def G_VECREDUCE_MUL : VectorReduction; -def G_VECREDUCE_AND : VectorReduction; -def G_VECREDUCE_OR : VectorReduction; -def G_VECREDUCE_XOR : VectorReduction; -def G_VECREDUCE_SMAX : VectorReduction; -def G_VECREDUCE_SMIN : VectorReduction; -def G_VECREDUCE_UMAX : VectorReduction; -def G_VECREDUCE_UMIN : VectorReduction; - -//------------------------------------------------------------------------------ +// Vector reductions +//------------------------------------------------------------------------------ + +class VectorReduction : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type1:$v); + let hasSideEffects = false; +} + +def G_VECREDUCE_SEQ_FADD : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type1:$acc, type2:$v); + let hasSideEffects = false; +} + +def G_VECREDUCE_SEQ_FMUL : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins type1:$acc, type2:$v); + let hasSideEffects = false; +} + +def G_VECREDUCE_FADD : VectorReduction; +def G_VECREDUCE_FMUL : VectorReduction; + +def G_VECREDUCE_FMAX : VectorReduction; +def G_VECREDUCE_FMIN : VectorReduction; + +def G_VECREDUCE_ADD : VectorReduction; +def G_VECREDUCE_MUL : VectorReduction; +def G_VECREDUCE_AND : VectorReduction; +def G_VECREDUCE_OR : VectorReduction; +def G_VECREDUCE_XOR : VectorReduction; +def G_VECREDUCE_SMAX : VectorReduction; +def G_VECREDUCE_SMIN : VectorReduction; +def G_VECREDUCE_UMAX : VectorReduction; +def G_VECREDUCE_UMIN : VectorReduction; + +//------------------------------------------------------------------------------ // Constrained floating point ops //------------------------------------------------------------------------------ @@ -1310,30 +1310,30 @@ def G_STRICT_FDIV : ConstrainedIntruction<G_FDIV>; def G_STRICT_FREM : ConstrainedIntruction<G_FREM>; def G_STRICT_FMA : ConstrainedIntruction<G_FMA>; def G_STRICT_FSQRT : ConstrainedIntruction<G_FSQRT>; - -//------------------------------------------------------------------------------ -// Memory intrinsics -//------------------------------------------------------------------------------ - -def G_MEMCPY : GenericInstruction { - let OutOperandList = (outs); - let InOperandList = (ins ptype0:$dst_addr, ptype1:$src_addr, type2:$size, untyped_imm_0:$tailcall); - let hasSideEffects = false; - let mayLoad = true; - let mayStore = true; -} - -def G_MEMMOVE : GenericInstruction { - let OutOperandList = (outs); - let InOperandList = (ins ptype0:$dst_addr, ptype1:$src_addr, type2:$size, untyped_imm_0:$tailcall); - let hasSideEffects = false; - let mayLoad = true; - let mayStore = true; -} - -def G_MEMSET : GenericInstruction { - let OutOperandList = (outs); - let InOperandList = (ins ptype0:$dst_addr, type1:$value, type2:$size, untyped_imm_0:$tailcall); - let hasSideEffects = false; - let mayStore = true; -} + +//------------------------------------------------------------------------------ +// Memory intrinsics +//------------------------------------------------------------------------------ + +def G_MEMCPY : GenericInstruction { + let OutOperandList = (outs); + let InOperandList = (ins ptype0:$dst_addr, ptype1:$src_addr, type2:$size, untyped_imm_0:$tailcall); + let hasSideEffects = false; + let mayLoad = true; + let mayStore = true; +} + +def G_MEMMOVE : GenericInstruction { + let OutOperandList = (outs); + let InOperandList = (ins ptype0:$dst_addr, ptype1:$src_addr, type2:$size, untyped_imm_0:$tailcall); + let hasSideEffects = false; + let mayLoad = true; + let mayStore = true; +} + +def G_MEMSET : GenericInstruction { + let OutOperandList = (outs); + let InOperandList = (ins ptype0:$dst_addr, type1:$value, type2:$size, untyped_imm_0:$tailcall); + let hasSideEffects = false; + let mayStore = true; +} diff --git a/contrib/libs/llvm12/include/llvm/Target/GlobalISel/Combine.td b/contrib/libs/llvm12/include/llvm/Target/GlobalISel/Combine.td index 2dde4acdc9..e2c7a90a1b 100644 --- a/contrib/libs/llvm12/include/llvm/Target/GlobalISel/Combine.td +++ b/contrib/libs/llvm12/include/llvm/Target/GlobalISel/Combine.td @@ -85,7 +85,7 @@ class GIDefMatchData<string type> : GIDefKind { def extending_load_matchdata : GIDefMatchData<"PreferredTuple">; def indexed_load_store_matchdata : GIDefMatchData<"IndexedLoadStoreMatchInfo">; -def instruction_steps_matchdata: GIDefMatchData<"InstructionStepsMatchInfo">; +def instruction_steps_matchdata: GIDefMatchData<"InstructionStepsMatchInfo">; /// The operator at the root of a GICombineRule.Match dag. def match; @@ -126,30 +126,30 @@ def extending_loads : GICombineRule< (apply [{ Helper.applyCombineExtendingLoads(*${root}, ${matchinfo}); }])>; def combines_for_extload: GICombineGroup<[extending_loads]>; -def sext_trunc_sextload : GICombineRule< +def sext_trunc_sextload : GICombineRule< (defs root:$d), (match (wip_match_opcode G_SEXT_INREG):$d, - [{ return Helper.matchSextTruncSextLoad(*${d}); }]), - (apply [{ Helper.applySextTruncSextLoad(*${d}); }])>; - -def sext_inreg_of_load_matchdata : GIDefMatchData<"std::tuple<Register, unsigned>">; -def sext_inreg_of_load : GICombineRule< - (defs root:$root, sext_inreg_of_load_matchdata:$matchinfo), - (match (wip_match_opcode G_SEXT_INREG):$root, - [{ return Helper.matchSextInRegOfLoad(*${root}, ${matchinfo}); }]), - (apply [{ return Helper.applySextInRegOfLoad(*${root}, ${matchinfo}); }])>; - + [{ return Helper.matchSextTruncSextLoad(*${d}); }]), + (apply [{ Helper.applySextTruncSextLoad(*${d}); }])>; + +def sext_inreg_of_load_matchdata : GIDefMatchData<"std::tuple<Register, unsigned>">; +def sext_inreg_of_load : GICombineRule< + (defs root:$root, sext_inreg_of_load_matchdata:$matchinfo), + (match (wip_match_opcode G_SEXT_INREG):$root, + [{ return Helper.matchSextInRegOfLoad(*${root}, ${matchinfo}); }]), + (apply [{ return Helper.applySextInRegOfLoad(*${root}, ${matchinfo}); }])>; + def combine_indexed_load_store : GICombineRule< (defs root:$root, indexed_load_store_matchdata:$matchinfo), (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD, G_STORE):$root, [{ return Helper.matchCombineIndexedLoadStore(*${root}, ${matchinfo}); }]), (apply [{ Helper.applyCombineIndexedLoadStore(*${root}, ${matchinfo}); }])>; -def opt_brcond_by_inverting_cond : GICombineRule< +def opt_brcond_by_inverting_cond : GICombineRule< (defs root:$root), (match (wip_match_opcode G_BR):$root, - [{ return Helper.matchOptBrCondByInvertingCond(*${root}); }]), - (apply [{ Helper.applyOptBrCondByInvertingCond(*${root}); }])>; + [{ return Helper.matchOptBrCondByInvertingCond(*${root}); }]), + (apply [{ Helper.applyOptBrCondByInvertingCond(*${root}); }])>; def ptr_add_immed_matchdata : GIDefMatchData<"PtrAddChain">; def ptr_add_immed_chain : GICombineRule< @@ -158,23 +158,23 @@ def ptr_add_immed_chain : GICombineRule< [{ return Helper.matchPtrAddImmedChain(*${d}, ${matchinfo}); }]), (apply [{ Helper.applyPtrAddImmedChain(*${d}, ${matchinfo}); }])>; -// Fold shift (shift base x), y -> shift base, (x+y), if shifts are same -def shift_immed_matchdata : GIDefMatchData<"RegisterImmPair">; -def shift_immed_chain : GICombineRule< - (defs root:$d, shift_immed_matchdata:$matchinfo), - (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_SSHLSAT, G_USHLSAT):$d, - [{ return Helper.matchShiftImmedChain(*${d}, ${matchinfo}); }]), - (apply [{ Helper.applyShiftImmedChain(*${d}, ${matchinfo}); }])>; - -// Transform shift (logic (shift X, C0), Y), C1 -// -> logic (shift X, (C0+C1)), (shift Y, C1), if shifts are same -def shift_of_shifted_logic_matchdata : GIDefMatchData<"ShiftOfShiftedLogic">; -def shift_of_shifted_logic_chain : GICombineRule< - (defs root:$d, shift_of_shifted_logic_matchdata:$matchinfo), - (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_USHLSAT, G_SSHLSAT):$d, - [{ return Helper.matchShiftOfShiftedLogic(*${d}, ${matchinfo}); }]), - (apply [{ Helper.applyShiftOfShiftedLogic(*${d}, ${matchinfo}); }])>; - +// Fold shift (shift base x), y -> shift base, (x+y), if shifts are same +def shift_immed_matchdata : GIDefMatchData<"RegisterImmPair">; +def shift_immed_chain : GICombineRule< + (defs root:$d, shift_immed_matchdata:$matchinfo), + (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_SSHLSAT, G_USHLSAT):$d, + [{ return Helper.matchShiftImmedChain(*${d}, ${matchinfo}); }]), + (apply [{ Helper.applyShiftImmedChain(*${d}, ${matchinfo}); }])>; + +// Transform shift (logic (shift X, C0), Y), C1 +// -> logic (shift X, (C0+C1)), (shift Y, C1), if shifts are same +def shift_of_shifted_logic_matchdata : GIDefMatchData<"ShiftOfShiftedLogic">; +def shift_of_shifted_logic_chain : GICombineRule< + (defs root:$d, shift_of_shifted_logic_matchdata:$matchinfo), + (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_USHLSAT, G_SSHLSAT):$d, + [{ return Helper.matchShiftOfShiftedLogic(*${d}, ${matchinfo}); }]), + (apply [{ Helper.applyShiftOfShiftedLogic(*${d}, ${matchinfo}); }])>; + def mul_to_shl_matchdata : GIDefMatchData<"unsigned">; def mul_to_shl : GICombineRule< (defs root:$d, mul_to_shl_matchdata:$matchinfo), @@ -182,14 +182,14 @@ def mul_to_shl : GICombineRule< [{ return Helper.matchCombineMulToShl(*${mi}, ${matchinfo}); }]), (apply [{ Helper.applyCombineMulToShl(*${mi}, ${matchinfo}); }])>; -// shl ([asz]ext x), y => zext (shl x, y), if shift does not overflow int -def reduce_shl_of_extend_matchdata : GIDefMatchData<"RegisterImmPair">; -def reduce_shl_of_extend : GICombineRule< - (defs root:$dst, reduce_shl_of_extend_matchdata:$matchinfo), - (match (G_SHL $dst, $src0, $src1):$mi, - [{ return Helper.matchCombineShlOfExtend(*${mi}, ${matchinfo}); }]), - (apply [{ Helper.applyCombineShlOfExtend(*${mi}, ${matchinfo}); }])>; - +// shl ([asz]ext x), y => zext (shl x, y), if shift does not overflow int +def reduce_shl_of_extend_matchdata : GIDefMatchData<"RegisterImmPair">; +def reduce_shl_of_extend : GICombineRule< + (defs root:$dst, reduce_shl_of_extend_matchdata:$matchinfo), + (match (G_SHL $dst, $src0, $src1):$mi, + [{ return Helper.matchCombineShlOfExtend(*${mi}, ${matchinfo}); }]), + (apply [{ Helper.applyCombineShlOfExtend(*${mi}, ${matchinfo}); }])>; + // [us]itofp(undef) = 0, because the result value is bounded. def undef_to_fp_zero : GICombineRule< (defs root:$root), @@ -209,17 +209,17 @@ def undef_to_negative_one: GICombineRule< [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]), (apply [{ Helper.replaceInstWithConstant(*${root}, -1); }])>; -def binop_left_undef_to_zero: GICombineRule< - (defs root:$root), - (match (wip_match_opcode G_SHL):$root, - [{ return Helper.matchOperandIsUndef(*${root}, 1); }]), - (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>; - +def binop_left_undef_to_zero: GICombineRule< + (defs root:$root), + (match (wip_match_opcode G_SHL):$root, + [{ return Helper.matchOperandIsUndef(*${root}, 1); }]), + (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>; + // Instructions where if any source operand is undef, the instruction can be // replaced with undef. def propagate_undef_any_op: GICombineRule< (defs root:$root), - (match (wip_match_opcode G_ADD, G_FPTOSI, G_FPTOUI, G_SUB, G_XOR, G_TRUNC):$root, + (match (wip_match_opcode G_ADD, G_FPTOSI, G_FPTOUI, G_SUB, G_XOR, G_TRUNC):$root, [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]), (apply [{ Helper.replaceInstWithUndef(*${root}); }])>; @@ -246,24 +246,24 @@ def select_same_val: GICombineRule< (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }]) >; -// Fold (undef ? x : y) -> y -def select_undef_cmp: GICombineRule< - (defs root:$root), - (match (wip_match_opcode G_SELECT):$root, - [{ return Helper.matchUndefSelectCmp(*${root}); }]), - (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }]) ->; - -// Fold (true ? x : y) -> x -// Fold (false ? x : y) -> y -def select_constant_cmp_matchdata : GIDefMatchData<"unsigned">; -def select_constant_cmp: GICombineRule< - (defs root:$root, select_constant_cmp_matchdata:$matchinfo), - (match (wip_match_opcode G_SELECT):$root, - [{ return Helper.matchConstantSelectCmp(*${root}, ${matchinfo}); }]), - (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, ${matchinfo}); }]) ->; - +// Fold (undef ? x : y) -> y +def select_undef_cmp: GICombineRule< + (defs root:$root), + (match (wip_match_opcode G_SELECT):$root, + [{ return Helper.matchUndefSelectCmp(*${root}); }]), + (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 2); }]) +>; + +// Fold (true ? x : y) -> x +// Fold (false ? x : y) -> y +def select_constant_cmp_matchdata : GIDefMatchData<"unsigned">; +def select_constant_cmp: GICombineRule< + (defs root:$root, select_constant_cmp_matchdata:$matchinfo), + (match (wip_match_opcode G_SELECT):$root, + [{ return Helper.matchConstantSelectCmp(*${root}, ${matchinfo}); }]), + (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, ${matchinfo}); }]) +>; + // Fold x op 0 -> x def right_identity_zero: GICombineRule< (defs root:$root), @@ -272,14 +272,14 @@ def right_identity_zero: GICombineRule< (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }]) >; -// Fold x op 1 -> x -def right_identity_one: GICombineRule< - (defs root:$root), - (match (wip_match_opcode G_MUL):$root, - [{ return Helper.matchConstantOp(${root}->getOperand(2), 1); }]), - (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }]) ->; - +// Fold x op 1 -> x +def right_identity_one: GICombineRule< + (defs root:$root), + (match (wip_match_opcode G_MUL):$root, + [{ return Helper.matchConstantOp(${root}->getOperand(2), 1); }]), + (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }]) +>; + // Fold (x op x) - > x def binop_same_val: GICombineRule< (defs root:$root), @@ -296,13 +296,13 @@ def binop_left_to_zero: GICombineRule< (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }]) >; -def urem_pow2_to_mask : GICombineRule< - (defs root:$root), - (match (wip_match_opcode G_UREM):$root, - [{ return Helper.matchOperandIsKnownToBeAPowerOfTwo(*${root}, 2); }]), - (apply [{ return Helper.applySimplifyURemByPow2(*${root}); }]) ->; - +def urem_pow2_to_mask : GICombineRule< + (defs root:$root), + (match (wip_match_opcode G_UREM):$root, + [{ return Helper.matchOperandIsKnownToBeAPowerOfTwo(*${root}, 2); }]), + (apply [{ return Helper.applySimplifyURemByPow2(*${root}); }]) +>; + // Fold (x op 0) - > 0 def binop_right_to_zero: GICombineRule< (defs root:$root), @@ -327,240 +327,240 @@ def simplify_add_to_sub: GICombineRule < (apply [{ return Helper.applySimplifyAddToSub(*${root}, ${info});}]) >; -// Fold fp_op(cst) to the constant result of the floating point operation. -def constant_fp_op_matchinfo: GIDefMatchData<"Optional<APFloat>">; -def constant_fp_op: GICombineRule < - (defs root:$root, constant_fp_op_matchinfo:$info), - (match (wip_match_opcode G_FNEG, G_FABS, G_FPTRUNC, G_FSQRT, G_FLOG2):$root, - [{ return Helper.matchCombineConstantFoldFpUnary(*${root}, ${info}); }]), - (apply [{ return Helper.applyCombineConstantFoldFpUnary(*${root}, ${info}); }]) ->; - -// Fold int2ptr(ptr2int(x)) -> x -def p2i_to_i2p_matchinfo: GIDefMatchData<"Register">; -def p2i_to_i2p: GICombineRule< - (defs root:$root, p2i_to_i2p_matchinfo:$info), - (match (wip_match_opcode G_INTTOPTR):$root, - [{ return Helper.matchCombineI2PToP2I(*${root}, ${info}); }]), - (apply [{ return Helper.applyCombineI2PToP2I(*${root}, ${info}); }]) ->; - -// Fold ptr2int(int2ptr(x)) -> x -def i2p_to_p2i_matchinfo: GIDefMatchData<"Register">; -def i2p_to_p2i: GICombineRule< - (defs root:$root, i2p_to_p2i_matchinfo:$info), - (match (wip_match_opcode G_PTRTOINT):$root, - [{ return Helper.matchCombineP2IToI2P(*${root}, ${info}); }]), - (apply [{ return Helper.applyCombineP2IToI2P(*${root}, ${info}); }]) ->; - -// Fold add ptrtoint(x), y -> ptrtoint (ptr_add x), y -def add_p2i_to_ptradd_matchinfo : GIDefMatchData<"std::pair<Register, bool>">; -def add_p2i_to_ptradd : GICombineRule< - (defs root:$root, add_p2i_to_ptradd_matchinfo:$info), - (match (wip_match_opcode G_ADD):$root, - [{ return Helper.matchCombineAddP2IToPtrAdd(*${root}, ${info}); }]), - (apply [{ return Helper.applyCombineAddP2IToPtrAdd(*${root}, ${info}); }]) ->; - -// Fold (ptr_add (int2ptr C1), C2) -> C1 + C2 -def const_ptradd_to_i2p_matchinfo : GIDefMatchData<"int64_t">; -def const_ptradd_to_i2p: GICombineRule< - (defs root:$root, const_ptradd_to_i2p_matchinfo:$info), - (match (wip_match_opcode G_PTR_ADD):$root, - [{ return Helper.matchCombineConstPtrAddToI2P(*${root}, ${info}); }]), - (apply [{ return Helper.applyCombineConstPtrAddToI2P(*${root}, ${info}); }]) ->; - -// Simplify: (logic_op (op x...), (op y...)) -> (op (logic_op x, y)) -def hoist_logic_op_with_same_opcode_hands: GICombineRule < - (defs root:$root, instruction_steps_matchdata:$info), - (match (wip_match_opcode G_AND, G_OR, G_XOR):$root, - [{ return Helper.matchHoistLogicOpWithSameOpcodeHands(*${root}, ${info}); }]), - (apply [{ return Helper.applyBuildInstructionSteps(*${root}, ${info});}]) ->; - -// Fold ashr (shl x, C), C -> sext_inreg (C) -def shl_ashr_to_sext_inreg_matchinfo : GIDefMatchData<"std::tuple<Register, int64_t>">; -def shl_ashr_to_sext_inreg : GICombineRule< - (defs root:$root, shl_ashr_to_sext_inreg_matchinfo:$info), - (match (wip_match_opcode G_ASHR): $root, - [{ return Helper.matchAshrShlToSextInreg(*${root}, ${info}); }]), - (apply [{ return Helper.applyAshShlToSextInreg(*${root}, ${info});}]) ->; -// Fold (x & y) -> x or (x & y) -> y when (x & y) is known to equal x or equal y. -def redundant_and_matchinfo : GIDefMatchData<"Register">; -def redundant_and: GICombineRule < - (defs root:$root, redundant_and_matchinfo:$matchinfo), - (match (wip_match_opcode G_AND):$root, - [{ return Helper.matchRedundantAnd(*${root}, ${matchinfo}); }]), - (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }]) ->; - -// Fold (x | y) -> x or (x | y) -> y when (x | y) is known to equal x or equal y. -def redundant_or_matchinfo : GIDefMatchData<"Register">; -def redundant_or: GICombineRule < - (defs root:$root, redundant_or_matchinfo:$matchinfo), - (match (wip_match_opcode G_OR):$root, - [{ return Helper.matchRedundantOr(*${root}, ${matchinfo}); }]), - (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }]) ->; - -// If the input is already sign extended, just drop the extension. -// sext_inreg x, K -> -// if computeNumSignBits(x) >= (x.getScalarSizeInBits() - K + 1) -def redundant_sext_inreg: GICombineRule < - (defs root:$root), - (match (wip_match_opcode G_SEXT_INREG):$root, - [{ return Helper.matchRedundantSExtInReg(*${root}); }]), - (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }]) ->; - -// Fold (anyext (trunc x)) -> x if the source type is same as -// the destination type. -def anyext_trunc_fold_matchinfo : GIDefMatchData<"Register">; -def anyext_trunc_fold: GICombineRule < - (defs root:$root, anyext_trunc_fold_matchinfo:$matchinfo), - (match (wip_match_opcode G_ANYEXT):$root, - [{ return Helper.matchCombineAnyExtTrunc(*${root}, ${matchinfo}); }]), - (apply [{ return Helper.applyCombineAnyExtTrunc(*${root}, ${matchinfo}); }]) ->; - -// Fold ([asz]ext ([asz]ext x)) -> ([asz]ext x). -def ext_ext_fold_matchinfo : GIDefMatchData<"std::tuple<Register, unsigned>">; -def ext_ext_fold: GICombineRule < - (defs root:$root, ext_ext_fold_matchinfo:$matchinfo), - (match (wip_match_opcode G_ANYEXT, G_SEXT, G_ZEXT):$root, - [{ return Helper.matchCombineExtOfExt(*${root}, ${matchinfo}); }]), - (apply [{ return Helper.applyCombineExtOfExt(*${root}, ${matchinfo}); }]) ->; - -def not_cmp_fold_matchinfo : GIDefMatchData<"SmallVector<Register, 4>">; -def not_cmp_fold : GICombineRule< - (defs root:$d, not_cmp_fold_matchinfo:$info), - (match (wip_match_opcode G_XOR): $d, - [{ return Helper.matchNotCmp(*${d}, ${info}); }]), - (apply [{ return Helper.applyNotCmp(*${d}, ${info}); }]) ->; - -// Fold (fneg (fneg x)) -> x. -def fneg_fneg_fold_matchinfo : GIDefMatchData<"Register">; -def fneg_fneg_fold: GICombineRule < - (defs root:$root, fneg_fneg_fold_matchinfo:$matchinfo), - (match (wip_match_opcode G_FNEG):$root, - [{ return Helper.matchCombineFNegOfFNeg(*${root}, ${matchinfo}); }]), - (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }]) ->; - -// Fold (unmerge(merge x, y, z)) -> z, y, z. -def unmerge_merge_matchinfo : GIDefMatchData<"SmallVector<Register, 8>">; -def unmerge_merge : GICombineRule< - (defs root:$d, unmerge_merge_matchinfo:$info), - (match (wip_match_opcode G_UNMERGE_VALUES): $d, - [{ return Helper.matchCombineUnmergeMergeToPlainValues(*${d}, ${info}); }]), - (apply [{ return Helper.applyCombineUnmergeMergeToPlainValues(*${d}, ${info}); }]) ->; - -// Fold (fabs (fabs x)) -> (fabs x). -def fabs_fabs_fold_matchinfo : GIDefMatchData<"Register">; -def fabs_fabs_fold: GICombineRule< - (defs root:$root, fabs_fabs_fold_matchinfo:$matchinfo), - (match (wip_match_opcode G_FABS):$root, - [{ return Helper.matchCombineFAbsOfFAbs(*${root}, ${matchinfo}); }]), - (apply [{ return Helper.applyCombineFAbsOfFAbs(*${root}, ${matchinfo}); }]) ->; - -// Fold (unmerge cst) -> cst1, cst2, ... -def unmerge_cst_matchinfo : GIDefMatchData<"SmallVector<APInt, 8>">; -def unmerge_cst : GICombineRule< - (defs root:$d, unmerge_cst_matchinfo:$info), - (match (wip_match_opcode G_UNMERGE_VALUES): $d, - [{ return Helper.matchCombineUnmergeConstant(*${d}, ${info}); }]), - (apply [{ return Helper.applyCombineUnmergeConstant(*${d}, ${info}); }]) ->; - -// Transform x,y<dead> = unmerge z -> x = trunc z. -def unmerge_dead_to_trunc : GICombineRule< - (defs root:$d), - (match (wip_match_opcode G_UNMERGE_VALUES): $d, - [{ return Helper.matchCombineUnmergeWithDeadLanesToTrunc(*${d}); }]), - (apply [{ return Helper.applyCombineUnmergeWithDeadLanesToTrunc(*${d}); }]) ->; - -// Transform x,y = unmerge(zext(z)) -> x = zext z; y = 0. -def unmerge_zext_to_zext : GICombineRule< - (defs root:$d), - (match (wip_match_opcode G_UNMERGE_VALUES): $d, - [{ return Helper.matchCombineUnmergeZExtToZExt(*${d}); }]), - (apply [{ return Helper.applyCombineUnmergeZExtToZExt(*${d}); }]) ->; - -// Fold trunc ([asz]ext x) -> x or ([asz]ext x) or (trunc x). -def trunc_ext_fold_matchinfo : GIDefMatchData<"std::pair<Register, unsigned>">; -def trunc_ext_fold: GICombineRule < - (defs root:$root, trunc_ext_fold_matchinfo:$matchinfo), - (match (wip_match_opcode G_TRUNC):$root, - [{ return Helper.matchCombineTruncOfExt(*${root}, ${matchinfo}); }]), - (apply [{ return Helper.applyCombineTruncOfExt(*${root}, ${matchinfo}); }]) ->; - -// Fold trunc (shl x, K) -> shl (trunc x), K => K < VT.getScalarSizeInBits(). -def trunc_shl_matchinfo : GIDefMatchData<"std::pair<Register, Register>">; -def trunc_shl: GICombineRule < - (defs root:$root, trunc_shl_matchinfo:$matchinfo), - (match (wip_match_opcode G_TRUNC):$root, - [{ return Helper.matchCombineTruncOfShl(*${root}, ${matchinfo}); }]), - (apply [{ return Helper.applyCombineTruncOfShl(*${root}, ${matchinfo}); }]) ->; - -// Transform (mul x, -1) -> (sub 0, x) -def mul_by_neg_one: GICombineRule < - (defs root:$root), - (match (wip_match_opcode G_MUL):$root, - [{ return Helper.matchConstantOp(${root}->getOperand(2), -1); }]), - (apply [{ return Helper.applyCombineMulByNegativeOne(*${root}); }]) ->; - -// Fold (xor (and x, y), y) -> (and (not x), y) -def xor_of_and_with_same_reg_matchinfo : - GIDefMatchData<"std::pair<Register, Register>">; -def xor_of_and_with_same_reg: GICombineRule < - (defs root:$root, xor_of_and_with_same_reg_matchinfo:$matchinfo), - (match (wip_match_opcode G_XOR):$root, - [{ return Helper.matchXorOfAndWithSameReg(*${root}, ${matchinfo}); }]), - (apply [{ return Helper.applyXorOfAndWithSameReg(*${root}, ${matchinfo}); }]) ->; - -// Transform (ptr_add 0, x) -> (int_to_ptr x) -def ptr_add_with_zero: GICombineRule< - (defs root:$root), - (match (wip_match_opcode G_PTR_ADD):$root, - [{ return Helper.matchPtrAddZero(*${root}); }]), - (apply [{ return Helper.applyPtrAddZero(*${root}); }])>; - -def regs_small_vec : GIDefMatchData<"SmallVector<Register, 4>">; -def combine_insert_vec_elts_build_vector : GICombineRule< - (defs root:$root, regs_small_vec:$info), - (match (wip_match_opcode G_INSERT_VECTOR_ELT):$root, - [{ return Helper.matchCombineInsertVecElts(*${root}, ${info}); }]), - (apply [{ return Helper.applyCombineInsertVecElts(*${root}, ${info}); }])>; - -def load_or_combine_matchdata : -GIDefMatchData<"std::function<void(MachineIRBuilder &)>">; -def load_or_combine : GICombineRule< - (defs root:$root, load_or_combine_matchdata:$info), - (match (wip_match_opcode G_OR):$root, - [{ return Helper.matchLoadOrCombine(*${root}, ${info}); }]), - (apply [{ return Helper.applyLoadOrCombine(*${root}, ${info}); }])>; - -// Currently only the one combine above. -def insert_vec_elt_combines : GICombineGroup< - [combine_insert_vec_elts_build_vector]>; - +// Fold fp_op(cst) to the constant result of the floating point operation. +def constant_fp_op_matchinfo: GIDefMatchData<"Optional<APFloat>">; +def constant_fp_op: GICombineRule < + (defs root:$root, constant_fp_op_matchinfo:$info), + (match (wip_match_opcode G_FNEG, G_FABS, G_FPTRUNC, G_FSQRT, G_FLOG2):$root, + [{ return Helper.matchCombineConstantFoldFpUnary(*${root}, ${info}); }]), + (apply [{ return Helper.applyCombineConstantFoldFpUnary(*${root}, ${info}); }]) +>; + +// Fold int2ptr(ptr2int(x)) -> x +def p2i_to_i2p_matchinfo: GIDefMatchData<"Register">; +def p2i_to_i2p: GICombineRule< + (defs root:$root, p2i_to_i2p_matchinfo:$info), + (match (wip_match_opcode G_INTTOPTR):$root, + [{ return Helper.matchCombineI2PToP2I(*${root}, ${info}); }]), + (apply [{ return Helper.applyCombineI2PToP2I(*${root}, ${info}); }]) +>; + +// Fold ptr2int(int2ptr(x)) -> x +def i2p_to_p2i_matchinfo: GIDefMatchData<"Register">; +def i2p_to_p2i: GICombineRule< + (defs root:$root, i2p_to_p2i_matchinfo:$info), + (match (wip_match_opcode G_PTRTOINT):$root, + [{ return Helper.matchCombineP2IToI2P(*${root}, ${info}); }]), + (apply [{ return Helper.applyCombineP2IToI2P(*${root}, ${info}); }]) +>; + +// Fold add ptrtoint(x), y -> ptrtoint (ptr_add x), y +def add_p2i_to_ptradd_matchinfo : GIDefMatchData<"std::pair<Register, bool>">; +def add_p2i_to_ptradd : GICombineRule< + (defs root:$root, add_p2i_to_ptradd_matchinfo:$info), + (match (wip_match_opcode G_ADD):$root, + [{ return Helper.matchCombineAddP2IToPtrAdd(*${root}, ${info}); }]), + (apply [{ return Helper.applyCombineAddP2IToPtrAdd(*${root}, ${info}); }]) +>; + +// Fold (ptr_add (int2ptr C1), C2) -> C1 + C2 +def const_ptradd_to_i2p_matchinfo : GIDefMatchData<"int64_t">; +def const_ptradd_to_i2p: GICombineRule< + (defs root:$root, const_ptradd_to_i2p_matchinfo:$info), + (match (wip_match_opcode G_PTR_ADD):$root, + [{ return Helper.matchCombineConstPtrAddToI2P(*${root}, ${info}); }]), + (apply [{ return Helper.applyCombineConstPtrAddToI2P(*${root}, ${info}); }]) +>; + +// Simplify: (logic_op (op x...), (op y...)) -> (op (logic_op x, y)) +def hoist_logic_op_with_same_opcode_hands: GICombineRule < + (defs root:$root, instruction_steps_matchdata:$info), + (match (wip_match_opcode G_AND, G_OR, G_XOR):$root, + [{ return Helper.matchHoistLogicOpWithSameOpcodeHands(*${root}, ${info}); }]), + (apply [{ return Helper.applyBuildInstructionSteps(*${root}, ${info});}]) +>; + +// Fold ashr (shl x, C), C -> sext_inreg (C) +def shl_ashr_to_sext_inreg_matchinfo : GIDefMatchData<"std::tuple<Register, int64_t>">; +def shl_ashr_to_sext_inreg : GICombineRule< + (defs root:$root, shl_ashr_to_sext_inreg_matchinfo:$info), + (match (wip_match_opcode G_ASHR): $root, + [{ return Helper.matchAshrShlToSextInreg(*${root}, ${info}); }]), + (apply [{ return Helper.applyAshShlToSextInreg(*${root}, ${info});}]) +>; +// Fold (x & y) -> x or (x & y) -> y when (x & y) is known to equal x or equal y. +def redundant_and_matchinfo : GIDefMatchData<"Register">; +def redundant_and: GICombineRule < + (defs root:$root, redundant_and_matchinfo:$matchinfo), + (match (wip_match_opcode G_AND):$root, + [{ return Helper.matchRedundantAnd(*${root}, ${matchinfo}); }]), + (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }]) +>; + +// Fold (x | y) -> x or (x | y) -> y when (x | y) is known to equal x or equal y. +def redundant_or_matchinfo : GIDefMatchData<"Register">; +def redundant_or: GICombineRule < + (defs root:$root, redundant_or_matchinfo:$matchinfo), + (match (wip_match_opcode G_OR):$root, + [{ return Helper.matchRedundantOr(*${root}, ${matchinfo}); }]), + (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }]) +>; + +// If the input is already sign extended, just drop the extension. +// sext_inreg x, K -> +// if computeNumSignBits(x) >= (x.getScalarSizeInBits() - K + 1) +def redundant_sext_inreg: GICombineRule < + (defs root:$root), + (match (wip_match_opcode G_SEXT_INREG):$root, + [{ return Helper.matchRedundantSExtInReg(*${root}); }]), + (apply [{ return Helper.replaceSingleDefInstWithOperand(*${root}, 1); }]) +>; + +// Fold (anyext (trunc x)) -> x if the source type is same as +// the destination type. +def anyext_trunc_fold_matchinfo : GIDefMatchData<"Register">; +def anyext_trunc_fold: GICombineRule < + (defs root:$root, anyext_trunc_fold_matchinfo:$matchinfo), + (match (wip_match_opcode G_ANYEXT):$root, + [{ return Helper.matchCombineAnyExtTrunc(*${root}, ${matchinfo}); }]), + (apply [{ return Helper.applyCombineAnyExtTrunc(*${root}, ${matchinfo}); }]) +>; + +// Fold ([asz]ext ([asz]ext x)) -> ([asz]ext x). +def ext_ext_fold_matchinfo : GIDefMatchData<"std::tuple<Register, unsigned>">; +def ext_ext_fold: GICombineRule < + (defs root:$root, ext_ext_fold_matchinfo:$matchinfo), + (match (wip_match_opcode G_ANYEXT, G_SEXT, G_ZEXT):$root, + [{ return Helper.matchCombineExtOfExt(*${root}, ${matchinfo}); }]), + (apply [{ return Helper.applyCombineExtOfExt(*${root}, ${matchinfo}); }]) +>; + +def not_cmp_fold_matchinfo : GIDefMatchData<"SmallVector<Register, 4>">; +def not_cmp_fold : GICombineRule< + (defs root:$d, not_cmp_fold_matchinfo:$info), + (match (wip_match_opcode G_XOR): $d, + [{ return Helper.matchNotCmp(*${d}, ${info}); }]), + (apply [{ return Helper.applyNotCmp(*${d}, ${info}); }]) +>; + +// Fold (fneg (fneg x)) -> x. +def fneg_fneg_fold_matchinfo : GIDefMatchData<"Register">; +def fneg_fneg_fold: GICombineRule < + (defs root:$root, fneg_fneg_fold_matchinfo:$matchinfo), + (match (wip_match_opcode G_FNEG):$root, + [{ return Helper.matchCombineFNegOfFNeg(*${root}, ${matchinfo}); }]), + (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }]) +>; + +// Fold (unmerge(merge x, y, z)) -> z, y, z. +def unmerge_merge_matchinfo : GIDefMatchData<"SmallVector<Register, 8>">; +def unmerge_merge : GICombineRule< + (defs root:$d, unmerge_merge_matchinfo:$info), + (match (wip_match_opcode G_UNMERGE_VALUES): $d, + [{ return Helper.matchCombineUnmergeMergeToPlainValues(*${d}, ${info}); }]), + (apply [{ return Helper.applyCombineUnmergeMergeToPlainValues(*${d}, ${info}); }]) +>; + +// Fold (fabs (fabs x)) -> (fabs x). +def fabs_fabs_fold_matchinfo : GIDefMatchData<"Register">; +def fabs_fabs_fold: GICombineRule< + (defs root:$root, fabs_fabs_fold_matchinfo:$matchinfo), + (match (wip_match_opcode G_FABS):$root, + [{ return Helper.matchCombineFAbsOfFAbs(*${root}, ${matchinfo}); }]), + (apply [{ return Helper.applyCombineFAbsOfFAbs(*${root}, ${matchinfo}); }]) +>; + +// Fold (unmerge cst) -> cst1, cst2, ... +def unmerge_cst_matchinfo : GIDefMatchData<"SmallVector<APInt, 8>">; +def unmerge_cst : GICombineRule< + (defs root:$d, unmerge_cst_matchinfo:$info), + (match (wip_match_opcode G_UNMERGE_VALUES): $d, + [{ return Helper.matchCombineUnmergeConstant(*${d}, ${info}); }]), + (apply [{ return Helper.applyCombineUnmergeConstant(*${d}, ${info}); }]) +>; + +// Transform x,y<dead> = unmerge z -> x = trunc z. +def unmerge_dead_to_trunc : GICombineRule< + (defs root:$d), + (match (wip_match_opcode G_UNMERGE_VALUES): $d, + [{ return Helper.matchCombineUnmergeWithDeadLanesToTrunc(*${d}); }]), + (apply [{ return Helper.applyCombineUnmergeWithDeadLanesToTrunc(*${d}); }]) +>; + +// Transform x,y = unmerge(zext(z)) -> x = zext z; y = 0. +def unmerge_zext_to_zext : GICombineRule< + (defs root:$d), + (match (wip_match_opcode G_UNMERGE_VALUES): $d, + [{ return Helper.matchCombineUnmergeZExtToZExt(*${d}); }]), + (apply [{ return Helper.applyCombineUnmergeZExtToZExt(*${d}); }]) +>; + +// Fold trunc ([asz]ext x) -> x or ([asz]ext x) or (trunc x). +def trunc_ext_fold_matchinfo : GIDefMatchData<"std::pair<Register, unsigned>">; +def trunc_ext_fold: GICombineRule < + (defs root:$root, trunc_ext_fold_matchinfo:$matchinfo), + (match (wip_match_opcode G_TRUNC):$root, + [{ return Helper.matchCombineTruncOfExt(*${root}, ${matchinfo}); }]), + (apply [{ return Helper.applyCombineTruncOfExt(*${root}, ${matchinfo}); }]) +>; + +// Fold trunc (shl x, K) -> shl (trunc x), K => K < VT.getScalarSizeInBits(). +def trunc_shl_matchinfo : GIDefMatchData<"std::pair<Register, Register>">; +def trunc_shl: GICombineRule < + (defs root:$root, trunc_shl_matchinfo:$matchinfo), + (match (wip_match_opcode G_TRUNC):$root, + [{ return Helper.matchCombineTruncOfShl(*${root}, ${matchinfo}); }]), + (apply [{ return Helper.applyCombineTruncOfShl(*${root}, ${matchinfo}); }]) +>; + +// Transform (mul x, -1) -> (sub 0, x) +def mul_by_neg_one: GICombineRule < + (defs root:$root), + (match (wip_match_opcode G_MUL):$root, + [{ return Helper.matchConstantOp(${root}->getOperand(2), -1); }]), + (apply [{ return Helper.applyCombineMulByNegativeOne(*${root}); }]) +>; + +// Fold (xor (and x, y), y) -> (and (not x), y) +def xor_of_and_with_same_reg_matchinfo : + GIDefMatchData<"std::pair<Register, Register>">; +def xor_of_and_with_same_reg: GICombineRule < + (defs root:$root, xor_of_and_with_same_reg_matchinfo:$matchinfo), + (match (wip_match_opcode G_XOR):$root, + [{ return Helper.matchXorOfAndWithSameReg(*${root}, ${matchinfo}); }]), + (apply [{ return Helper.applyXorOfAndWithSameReg(*${root}, ${matchinfo}); }]) +>; + +// Transform (ptr_add 0, x) -> (int_to_ptr x) +def ptr_add_with_zero: GICombineRule< + (defs root:$root), + (match (wip_match_opcode G_PTR_ADD):$root, + [{ return Helper.matchPtrAddZero(*${root}); }]), + (apply [{ return Helper.applyPtrAddZero(*${root}); }])>; + +def regs_small_vec : GIDefMatchData<"SmallVector<Register, 4>">; +def combine_insert_vec_elts_build_vector : GICombineRule< + (defs root:$root, regs_small_vec:$info), + (match (wip_match_opcode G_INSERT_VECTOR_ELT):$root, + [{ return Helper.matchCombineInsertVecElts(*${root}, ${info}); }]), + (apply [{ return Helper.applyCombineInsertVecElts(*${root}, ${info}); }])>; + +def load_or_combine_matchdata : +GIDefMatchData<"std::function<void(MachineIRBuilder &)>">; +def load_or_combine : GICombineRule< + (defs root:$root, load_or_combine_matchdata:$info), + (match (wip_match_opcode G_OR):$root, + [{ return Helper.matchLoadOrCombine(*${root}, ${info}); }]), + (apply [{ return Helper.applyLoadOrCombine(*${root}, ${info}); }])>; + +// Currently only the one combine above. +def insert_vec_elt_combines : GICombineGroup< + [combine_insert_vec_elts_build_vector]>; + // FIXME: These should use the custom predicate feature once it lands. def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero, undef_to_negative_one, - binop_left_undef_to_zero, + binop_left_undef_to_zero, propagate_undef_any_op, propagate_undef_all_ops, propagate_undef_shuffle_mask, @@ -568,31 +568,31 @@ def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero, def identity_combines : GICombineGroup<[select_same_val, right_identity_zero, binop_same_val, binop_left_to_zero, - binop_right_to_zero, p2i_to_i2p, - i2p_to_p2i, anyext_trunc_fold, - fneg_fneg_fold, right_identity_one]>; - -def const_combines : GICombineGroup<[constant_fp_op, const_ptradd_to_i2p]>; - -def known_bits_simplifications : GICombineGroup<[ - redundant_and, redundant_sext_inreg, redundant_or, urem_pow2_to_mask]>; - -def width_reduction_combines : GICombineGroup<[reduce_shl_of_extend]>; - -def select_combines : GICombineGroup<[select_undef_cmp, select_constant_cmp]>; - -def trivial_combines : GICombineGroup<[copy_prop, mul_to_shl, add_p2i_to_ptradd, - mul_by_neg_one]>; - -def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines, - ptr_add_immed_chain, combines_for_extload, combine_indexed_load_store, - undef_combines, identity_combines, simplify_add_to_sub, - hoist_logic_op_with_same_opcode_hands, - shl_ashr_to_sext_inreg, sext_inreg_of_load, - width_reduction_combines, select_combines, - known_bits_simplifications, ext_ext_fold, - not_cmp_fold, opt_brcond_by_inverting_cond, - unmerge_merge, fabs_fabs_fold, unmerge_cst, unmerge_dead_to_trunc, - unmerge_zext_to_zext, trunc_ext_fold, trunc_shl, - const_combines, xor_of_and_with_same_reg, ptr_add_with_zero, - shift_immed_chain, shift_of_shifted_logic_chain, load_or_combine]>; + binop_right_to_zero, p2i_to_i2p, + i2p_to_p2i, anyext_trunc_fold, + fneg_fneg_fold, right_identity_one]>; + +def const_combines : GICombineGroup<[constant_fp_op, const_ptradd_to_i2p]>; + +def known_bits_simplifications : GICombineGroup<[ + redundant_and, redundant_sext_inreg, redundant_or, urem_pow2_to_mask]>; + +def width_reduction_combines : GICombineGroup<[reduce_shl_of_extend]>; + +def select_combines : GICombineGroup<[select_undef_cmp, select_constant_cmp]>; + +def trivial_combines : GICombineGroup<[copy_prop, mul_to_shl, add_p2i_to_ptradd, + mul_by_neg_one]>; + +def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines, + ptr_add_immed_chain, combines_for_extload, combine_indexed_load_store, + undef_combines, identity_combines, simplify_add_to_sub, + hoist_logic_op_with_same_opcode_hands, + shl_ashr_to_sext_inreg, sext_inreg_of_load, + width_reduction_combines, select_combines, + known_bits_simplifications, ext_ext_fold, + not_cmp_fold, opt_brcond_by_inverting_cond, + unmerge_merge, fabs_fabs_fold, unmerge_cst, unmerge_dead_to_trunc, + unmerge_zext_to_zext, trunc_ext_fold, trunc_shl, + const_combines, xor_of_and_with_same_reg, ptr_add_with_zero, + shift_immed_chain, shift_of_shifted_logic_chain, load_or_combine]>; diff --git a/contrib/libs/llvm12/include/llvm/Target/GlobalISel/SelectionDAGCompat.td b/contrib/libs/llvm12/include/llvm/Target/GlobalISel/SelectionDAGCompat.td index 4520354169..6fb8a6b15d 100644 --- a/contrib/libs/llvm12/include/llvm/Target/GlobalISel/SelectionDAGCompat.td +++ b/contrib/libs/llvm12/include/llvm/Target/GlobalISel/SelectionDAGCompat.td @@ -26,8 +26,8 @@ class GINodeEquiv<Instruction i, SDNode node> { // SelectionDAG has separate nodes for atomic and non-atomic memory operations // (ISD::LOAD, ISD::ATOMIC_LOAD, ISD::STORE, ISD::ATOMIC_STORE) but GlobalISel // stores this information in the MachineMemoryOperand. - bit CheckMMOIsNonAtomic = false; - bit CheckMMOIsAtomic = false; + bit CheckMMOIsNonAtomic = false; + bit CheckMMOIsAtomic = false; // SelectionDAG has one node for all loads and uses predicates to // differentiate them. GlobalISel on the other hand uses separate opcodes. @@ -52,8 +52,8 @@ def : GINodeEquiv<G_BITCAST, bitconvert>; def : GINodeEquiv<G_CONSTANT, imm>; def : GINodeEquiv<G_FCONSTANT, fpimm>; def : GINodeEquiv<G_IMPLICIT_DEF, undef>; -def : GINodeEquiv<G_FRAME_INDEX, frameindex>; -def : GINodeEquiv<G_BLOCK_ADDR, blockaddress>; +def : GINodeEquiv<G_FRAME_INDEX, frameindex>; +def : GINodeEquiv<G_BLOCK_ADDR, blockaddress>; def : GINodeEquiv<G_ADD, add>; def : GINodeEquiv<G_SUB, sub>; def : GINodeEquiv<G_MUL, mul>; @@ -73,16 +73,16 @@ def : GINodeEquiv<G_SADDSAT, saddsat>; def : GINodeEquiv<G_UADDSAT, uaddsat>; def : GINodeEquiv<G_SSUBSAT, ssubsat>; def : GINodeEquiv<G_USUBSAT, usubsat>; -def : GINodeEquiv<G_SSHLSAT, sshlsat>; -def : GINodeEquiv<G_USHLSAT, ushlsat>; -def : GINodeEquiv<G_SMULFIX, smulfix>; -def : GINodeEquiv<G_UMULFIX, umulfix>; -def : GINodeEquiv<G_SMULFIXSAT, smulfixsat>; -def : GINodeEquiv<G_UMULFIXSAT, umulfixsat>; -def : GINodeEquiv<G_SDIVFIX, sdivfix>; -def : GINodeEquiv<G_UDIVFIX, udivfix>; -def : GINodeEquiv<G_SDIVFIXSAT, sdivfixsat>; -def : GINodeEquiv<G_UDIVFIXSAT, udivfixsat>; +def : GINodeEquiv<G_SSHLSAT, sshlsat>; +def : GINodeEquiv<G_USHLSAT, ushlsat>; +def : GINodeEquiv<G_SMULFIX, smulfix>; +def : GINodeEquiv<G_UMULFIX, umulfix>; +def : GINodeEquiv<G_SMULFIXSAT, smulfixsat>; +def : GINodeEquiv<G_UMULFIXSAT, umulfixsat>; +def : GINodeEquiv<G_SDIVFIX, sdivfix>; +def : GINodeEquiv<G_UDIVFIX, udivfix>; +def : GINodeEquiv<G_SDIVFIXSAT, sdivfixsat>; +def : GINodeEquiv<G_UDIVFIXSAT, udivfixsat>; def : GINodeEquiv<G_SELECT, select>; def : GINodeEquiv<G_FNEG, fneg>; def : GINodeEquiv<G_FPEXT, fpextend>; @@ -116,7 +116,7 @@ def : GINodeEquiv<G_CTTZ, cttz>; def : GINodeEquiv<G_CTLZ_ZERO_UNDEF, ctlz_zero_undef>; def : GINodeEquiv<G_CTTZ_ZERO_UNDEF, cttz_zero_undef>; def : GINodeEquiv<G_CTPOP, ctpop>; -def : GINodeEquiv<G_EXTRACT_VECTOR_ELT, extractelt>; +def : GINodeEquiv<G_EXTRACT_VECTOR_ELT, extractelt>; def : GINodeEquiv<G_CONCAT_VECTORS, concat_vectors>; def : GINodeEquiv<G_BUILD_VECTOR, build_vector>; def : GINodeEquiv<G_FCEIL, fceil>; @@ -129,13 +129,13 @@ def : GINodeEquiv<G_FRINT, frint>; def : GINodeEquiv<G_FNEARBYINT, fnearbyint>; def : GINodeEquiv<G_INTRINSIC_TRUNC, ftrunc>; def : GINodeEquiv<G_INTRINSIC_ROUND, fround>; -def : GINodeEquiv<G_INTRINSIC_LRINT, lrint>; +def : GINodeEquiv<G_INTRINSIC_LRINT, lrint>; def : GINodeEquiv<G_FCOPYSIGN, fcopysign>; def : GINodeEquiv<G_SMIN, smin>; def : GINodeEquiv<G_SMAX, smax>; def : GINodeEquiv<G_UMIN, umin>; def : GINodeEquiv<G_UMAX, umax>; -def : GINodeEquiv<G_ABS, abs>; +def : GINodeEquiv<G_ABS, abs>; def : GINodeEquiv<G_FMINNUM, fminnum>; def : GINodeEquiv<G_FMAXNUM, fmaxnum>; def : GINodeEquiv<G_FMINNUM_IEEE, fminnum_ieee>; @@ -158,7 +158,7 @@ def : GINodeEquiv<G_STRICT_FSQRT, strict_fsqrt>; // separate nodes for them. This GINodeEquiv maps the non-atomic loads to // G_LOAD with a non-atomic MachineMemOperand. def : GINodeEquiv<G_LOAD, ld> { - let CheckMMOIsNonAtomic = true; + let CheckMMOIsNonAtomic = true; let IfSignExtend = G_SEXTLOAD; let IfZeroExtend = G_ZEXTLOAD; } @@ -174,19 +174,19 @@ def : GINodeEquiv<G_ICMP, setcc> { // G_STORE handles both atomic and non-atomic stores where as SelectionDAG had // separate nodes for them. This GINodeEquiv maps the non-atomic stores to // G_STORE with a non-atomic MachineMemOperand. -def : GINodeEquiv<G_STORE, st> { let CheckMMOIsNonAtomic = true; } +def : GINodeEquiv<G_STORE, st> { let CheckMMOIsNonAtomic = true; } def : GINodeEquiv<G_LOAD, atomic_load> { - let CheckMMOIsNonAtomic = false; - let CheckMMOIsAtomic = true; + let CheckMMOIsNonAtomic = false; + let CheckMMOIsAtomic = true; +} + +// Operands are swapped for atomic_store vs. regular store +def : GINodeEquiv<G_STORE, atomic_store> { + let CheckMMOIsNonAtomic = false; + let CheckMMOIsAtomic = true; } -// Operands are swapped for atomic_store vs. regular store -def : GINodeEquiv<G_STORE, atomic_store> { - let CheckMMOIsNonAtomic = false; - let CheckMMOIsAtomic = true; -} - def : GINodeEquiv<G_ATOMIC_CMPXCHG, atomic_cmp_swap>; def : GINodeEquiv<G_ATOMICRMW_XCHG, atomic_swap>; def : GINodeEquiv<G_ATOMICRMW_ADD, atomic_load_add>; diff --git a/contrib/libs/llvm12/include/llvm/Target/Target.td b/contrib/libs/llvm12/include/llvm/Target/Target.td index 0261df6fb2..1c97d70a47 100644 --- a/contrib/libs/llvm12/include/llvm/Target/Target.td +++ b/contrib/libs/llvm12/include/llvm/Target/Target.td @@ -110,9 +110,9 @@ class SubRegIndex<int size, int offset = 0> { // ComposedSubRegIndex - A sub-register that is the result of composing A and B. // Offset is set to the sum of A and B's Offsets. Size is set to B's Size. class ComposedSubRegIndex<SubRegIndex A, SubRegIndex B> - : SubRegIndex<B.Size, !cond(!eq(A.Offset, -1): -1, - !eq(B.Offset, -1): -1, - true: !add(A.Offset, B.Offset))> { + : SubRegIndex<B.Size, !cond(!eq(A.Offset, -1): -1, + !eq(B.Offset, -1): -1, + true: !add(A.Offset, B.Offset))> { // See SubRegIndex. let ComposedOf = [A, B]; } @@ -175,12 +175,12 @@ class Register<string n, list<string> altNames = []> { // completely determined by the value of its sub-registers. For example, the // x86 register AX is covered by its sub-registers AL and AH, but EAX is not // covered by its sub-register AX. - bit CoveredBySubRegs = false; + bit CoveredBySubRegs = false; // HWEncoding - The target specific hardware encoding for this register. bits<16> HWEncoding = 0; - bit isArtificial = false; + bit isArtificial = false; } // RegisterWithSubRegs - This can be used to define instances of Register which @@ -252,7 +252,7 @@ class RegisterClass<string namespace, list<ValueType> regTypes, int alignment, // isAllocatable - Specify that the register class can be used for virtual // registers and register allocation. Some register classes are only used to // model instruction operand constraints, and should have isAllocatable = 0. - bit isAllocatable = true; + bit isAllocatable = true; // AltOrders - List of alternative allocation orders. The default order is // MemberList itself, and that is good enough for most targets since the @@ -278,7 +278,7 @@ class RegisterClass<string namespace, list<ValueType> regTypes, int alignment, // Generate register pressure set for this register class and any class // synthesized from it. Set to 0 to inhibit unneeded pressure sets. - bit GeneratePressureSet = true; + bit GeneratePressureSet = true; // Weight override for register pressure calculation. This is the value // TargetRegisterClass::getRegClassWeight() will return. The weight is in @@ -452,7 +452,7 @@ class InstructionEncoding { // DecodeInstB() is not able to determine if all possible values of ?? are // valid or not. If DecodeInstB() returns Fail the decoder will attempt to // decode the bitpattern as InstA too. - bit hasCompleteDecoder = true; + bit hasCompleteDecoder = true; } // Allows specifying an InstructionEncoding by HwMode. If an Instruction specifies @@ -506,59 +506,59 @@ class Instruction : InstructionEncoding { // Indicates if this is a pre-isel opcode that should be // legalized/regbankselected/selected. - bit isPreISelOpcode = false; + bit isPreISelOpcode = false; // These bits capture information about the high-level semantics of the // instruction. - bit isReturn = false; // Is this instruction a return instruction? - bit isBranch = false; // Is this instruction a branch instruction? - bit isEHScopeReturn = false; // Does this instruction end an EH scope? - bit isIndirectBranch = false; // Is this instruction an indirect branch? - bit isCompare = false; // Is this instruction a comparison instruction? - bit isMoveImm = false; // Is this instruction a move immediate instruction? - bit isMoveReg = false; // Is this instruction a move register instruction? - bit isBitcast = false; // Is this instruction a bitcast instruction? - bit isSelect = false; // Is this instruction a select instruction? - bit isBarrier = false; // Can control flow fall through this instruction? - bit isCall = false; // Is this instruction a call instruction? - bit isAdd = false; // Is this instruction an add instruction? - bit isTrap = false; // Is this instruction a trap instruction? - bit canFoldAsLoad = false; // Can this be folded as a simple memory operand? - bit mayLoad = ?; // Is it possible for this inst to read memory? - bit mayStore = ?; // Is it possible for this inst to write memory? - bit mayRaiseFPException = false; // Can this raise a floating-point exception? - bit isConvertibleToThreeAddress = false; // Can this 2-addr instruction promote? - bit isCommutable = false; // Is this 3 operand instruction commutable? - bit isTerminator = false; // Is this part of the terminator for a basic block? - bit isReMaterializable = false; // Is this instruction re-materializable? - bit isPredicable = false; // 1 means this instruction is predicable - // even if it does not have any operand - // tablegen can identify as a predicate - bit isUnpredicable = false; // 1 means this instruction is not predicable - // even if it _does_ have a predicate operand - bit hasDelaySlot = false; // Does this instruction have an delay slot? - bit usesCustomInserter = false; // Pseudo instr needing special help. - bit hasPostISelHook = false; // To be *adjusted* after isel by target hook. - bit hasCtrlDep = false; // Does this instruction r/w ctrl-flow chains? - bit isNotDuplicable = false; // Is it unsafe to duplicate this instruction? - bit isConvergent = false; // Is this instruction convergent? - bit isAuthenticated = false; // Does this instruction authenticate a pointer? - bit isAsCheapAsAMove = false; // As cheap (or cheaper) than a move instruction. - bit hasExtraSrcRegAllocReq = false; // Sources have special regalloc requirement? - bit hasExtraDefRegAllocReq = false; // Defs have special regalloc requirement? - bit isRegSequence = false; // Is this instruction a kind of reg sequence? - // If so, make sure to override - // TargetInstrInfo::getRegSequenceLikeInputs. - bit isPseudo = false; // Is this instruction a pseudo-instruction? - // If so, won't have encoding information for - // the [MC]CodeEmitter stuff. - bit isExtractSubreg = false; // Is this instruction a kind of extract subreg? - // If so, make sure to override - // TargetInstrInfo::getExtractSubregLikeInputs. - bit isInsertSubreg = false; // Is this instruction a kind of insert subreg? - // If so, make sure to override - // TargetInstrInfo::getInsertSubregLikeInputs. - bit variadicOpsAreDefs = false; // Are variadic operands definitions? + bit isReturn = false; // Is this instruction a return instruction? + bit isBranch = false; // Is this instruction a branch instruction? + bit isEHScopeReturn = false; // Does this instruction end an EH scope? + bit isIndirectBranch = false; // Is this instruction an indirect branch? + bit isCompare = false; // Is this instruction a comparison instruction? + bit isMoveImm = false; // Is this instruction a move immediate instruction? + bit isMoveReg = false; // Is this instruction a move register instruction? + bit isBitcast = false; // Is this instruction a bitcast instruction? + bit isSelect = false; // Is this instruction a select instruction? + bit isBarrier = false; // Can control flow fall through this instruction? + bit isCall = false; // Is this instruction a call instruction? + bit isAdd = false; // Is this instruction an add instruction? + bit isTrap = false; // Is this instruction a trap instruction? + bit canFoldAsLoad = false; // Can this be folded as a simple memory operand? + bit mayLoad = ?; // Is it possible for this inst to read memory? + bit mayStore = ?; // Is it possible for this inst to write memory? + bit mayRaiseFPException = false; // Can this raise a floating-point exception? + bit isConvertibleToThreeAddress = false; // Can this 2-addr instruction promote? + bit isCommutable = false; // Is this 3 operand instruction commutable? + bit isTerminator = false; // Is this part of the terminator for a basic block? + bit isReMaterializable = false; // Is this instruction re-materializable? + bit isPredicable = false; // 1 means this instruction is predicable + // even if it does not have any operand + // tablegen can identify as a predicate + bit isUnpredicable = false; // 1 means this instruction is not predicable + // even if it _does_ have a predicate operand + bit hasDelaySlot = false; // Does this instruction have an delay slot? + bit usesCustomInserter = false; // Pseudo instr needing special help. + bit hasPostISelHook = false; // To be *adjusted* after isel by target hook. + bit hasCtrlDep = false; // Does this instruction r/w ctrl-flow chains? + bit isNotDuplicable = false; // Is it unsafe to duplicate this instruction? + bit isConvergent = false; // Is this instruction convergent? + bit isAuthenticated = false; // Does this instruction authenticate a pointer? + bit isAsCheapAsAMove = false; // As cheap (or cheaper) than a move instruction. + bit hasExtraSrcRegAllocReq = false; // Sources have special regalloc requirement? + bit hasExtraDefRegAllocReq = false; // Defs have special regalloc requirement? + bit isRegSequence = false; // Is this instruction a kind of reg sequence? + // If so, make sure to override + // TargetInstrInfo::getRegSequenceLikeInputs. + bit isPseudo = false; // Is this instruction a pseudo-instruction? + // If so, won't have encoding information for + // the [MC]CodeEmitter stuff. + bit isExtractSubreg = false; // Is this instruction a kind of extract subreg? + // If so, make sure to override + // TargetInstrInfo::getExtractSubregLikeInputs. + bit isInsertSubreg = false; // Is this instruction a kind of insert subreg? + // If so, make sure to override + // TargetInstrInfo::getInsertSubregLikeInputs. + bit variadicOpsAreDefs = false; // Are variadic operands definitions? // Does the instruction have side effects that are not captured by any // operands of the instruction or other flags? @@ -581,15 +581,15 @@ class Instruction : InstructionEncoding { // CodeEmitter unchanged, but duplicates a canonical instruction // definition's encoding and should be ignored when constructing the // assembler match tables. - bit isCodeGenOnly = false; + bit isCodeGenOnly = false; // Is this instruction a pseudo instruction for use by the assembler parser. - bit isAsmParserOnly = false; + bit isAsmParserOnly = false; // This instruction is not expected to be queried for scheduling latencies // and therefore needs no scheduling information even for a complete // scheduling model. - bit hasNoSchedulingInfo = false; + bit hasNoSchedulingInfo = false; InstrItinClass Itinerary = NoItinerary;// Execution steps used for scheduling. @@ -630,13 +630,13 @@ class Instruction : InstructionEncoding { /// UseNamedOperandTable - If set, the operand indices of this instruction /// can be queried via the getNamedOperandIdx() function which is generated /// by TableGen. - bit UseNamedOperandTable = false; + bit UseNamedOperandTable = false; /// Should FastISel ignore this instruction. For certain ISAs, they have /// instructions which map to the same ISD Opcode, value type operands and /// instruction selection predicates. FastISel cannot handle such cases, but /// SelectionDAG can. - bit FastISelShouldIgnore = false; + bit FastISelShouldIgnore = false; } /// Defines an additional encoding that disassembles to the given instruction @@ -651,7 +651,7 @@ class AdditionalEncoding<Instruction I> : InstructionEncoding { /// pseudo. class PseudoInstExpansion<dag Result> { dag ResultInst = Result; // The instruction to generate. - bit isPseudo = true; + bit isPseudo = true; } /// Predicates - These are extra conditionals which are turned into instruction @@ -662,7 +662,7 @@ class Predicate<string cond> { /// AssemblerMatcherPredicate - If this feature can be used by the assembler /// matcher, this is true. Targets should set this by inheriting their /// feature from the AssemblerPredicate class in addition to Predicate. - bit AssemblerMatcherPredicate = false; + bit AssemblerMatcherPredicate = false; /// AssemblerCondDag - Set of subtarget features being tested used /// as alternative condition string used for assembler matcher. Must be used @@ -688,7 +688,7 @@ class Predicate<string cond> { /// every function change. Most predicates can leave this at '0'. /// /// Ignored by SelectionDAG, it always recomputes the predicate on every use. - bit RecomputePerFunction = false; + bit RecomputePerFunction = false; } /// NoHonorSignDependentRounding - This predicate is true if support for @@ -788,7 +788,7 @@ class AsmOperandClass { /// marked as IsOptional. /// /// Optional arguments must be at the end of the operand list. - bit IsOptional = false; + bit IsOptional = false; /// The name of the method on the target specific asm parser that returns the /// default operand for this optional operand. This method is only used if @@ -809,7 +809,7 @@ class Operand<ValueType ty> : DAGOperand { ValueType Type = ty; string PrintMethod = "printOperand"; string EncoderMethod = ""; - bit hasCompleteDecoder = true; + bit hasCompleteDecoder = true; string OperandType = "OPERAND_UNKNOWN"; dag MIOperandInfo = (ops); @@ -877,8 +877,8 @@ def f64imm : Operand<f64>; // have the same LLT). class TypedOperand<string Ty> : Operand<untyped> { let OperandType = Ty; - bit IsPointer = false; - bit IsImmediate = false; + bit IsPointer = false; + bit IsImmediate = false; } def type0 : TypedOperand<"OPERAND_GENERIC_0">; @@ -888,7 +888,7 @@ def type3 : TypedOperand<"OPERAND_GENERIC_3">; def type4 : TypedOperand<"OPERAND_GENERIC_4">; def type5 : TypedOperand<"OPERAND_GENERIC_5">; -let IsPointer = true in { +let IsPointer = true in { def ptype0 : TypedOperand<"OPERAND_GENERIC_0">; def ptype1 : TypedOperand<"OPERAND_GENERIC_1">; def ptype2 : TypedOperand<"OPERAND_GENERIC_2">; @@ -900,7 +900,7 @@ let IsPointer = true in { // untyped_imm is for operands where isImm() will be true. It currently has no // special behaviour and is only used for clarity. def untyped_imm_0 : TypedOperand<"OPERAND_GENERIC_IMM_0"> { - let IsImmediate = true; + let IsImmediate = true; } /// zero_reg definition - Special node to stand for the zero register. @@ -952,7 +952,7 @@ class InstrInfo { // For instance, while both Sparc and PowerPC are big-endian platforms, the // Sparc manual specifies its instructions in the format [31..0] (big), while // PowerPC specifies them using the format [0..31] (little). - bit isLittleEndianEncoding = false; + bit isLittleEndianEncoding = false; // The instruction properties mayLoad, mayStore, and hasSideEffects are unset // by default, and TableGen will infer their value from the instruction @@ -963,7 +963,7 @@ class InstrInfo { // is set, it will guess a safe value instead. // // This option is a temporary migration help. It will go away. - bit guessInstructionProperties = true; + bit guessInstructionProperties = true; // TableGen's instruction encoder generator has support for matching operands // to bit-field variables both by name and by position. While matching by @@ -975,7 +975,7 @@ class InstrInfo { // This option is temporary; it will go away once the TableGen decoder // generator has better support for complex operands and targets have // migrated away from using positionally encoded operands. - bit decodePositionallyEncodedOperands = false; + bit decodePositionallyEncodedOperands = false; // When set, this indicates that there will be no overlap between those // operands that are matched by ordering (positional operands) and those @@ -984,7 +984,7 @@ class InstrInfo { // This option is temporary; it will go away once the TableGen decoder // generator has better support for complex operands and targets have // migrated away from using positionally encoded operands. - bit noNamedPositionallyEncodedOperands = false; + bit noNamedPositionallyEncodedOperands = false; } // Standard Pseudo Instructions. @@ -994,31 +994,31 @@ class InstrInfo { // targets that set guessInstructionProperties=0. Any local definition of // mayLoad/mayStore takes precedence over these default values. class StandardPseudoInstruction : Instruction { - let mayLoad = false; - let mayStore = false; - let isCodeGenOnly = true; - let isPseudo = true; - let hasNoSchedulingInfo = true; + let mayLoad = false; + let mayStore = false; + let isCodeGenOnly = true; + let isPseudo = true; + let hasNoSchedulingInfo = true; let Namespace = "TargetOpcode"; } def PHI : StandardPseudoInstruction { let OutOperandList = (outs unknown:$dst); let InOperandList = (ins variable_ops); let AsmString = "PHINODE"; - let hasSideEffects = false; + let hasSideEffects = false; } def INLINEASM : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins variable_ops); let AsmString = ""; - let hasSideEffects = false; // Note side effect is encoded in an operand. + let hasSideEffects = false; // Note side effect is encoded in an operand. } def INLINEASM_BR : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins variable_ops); let AsmString = ""; // Unlike INLINEASM, this is always treated as having side-effects. - let hasSideEffects = true; + let hasSideEffects = true; // Despite potentially branching, this instruction is intentionally _not_ // marked as a terminator or a branch. } @@ -1026,177 +1026,177 @@ def CFI_INSTRUCTION : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins i32imm:$id); let AsmString = ""; - let hasCtrlDep = true; - let hasSideEffects = false; - let isNotDuplicable = true; + let hasCtrlDep = true; + let hasSideEffects = false; + let isNotDuplicable = true; } def EH_LABEL : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins i32imm:$id); let AsmString = ""; - let hasCtrlDep = true; - let hasSideEffects = false; - let isNotDuplicable = true; + let hasCtrlDep = true; + let hasSideEffects = false; + let isNotDuplicable = true; } def GC_LABEL : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins i32imm:$id); let AsmString = ""; - let hasCtrlDep = true; - let hasSideEffects = false; - let isNotDuplicable = true; + let hasCtrlDep = true; + let hasSideEffects = false; + let isNotDuplicable = true; } def ANNOTATION_LABEL : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins i32imm:$id); let AsmString = ""; - let hasCtrlDep = true; - let hasSideEffects = false; - let isNotDuplicable = true; + let hasCtrlDep = true; + let hasSideEffects = false; + let isNotDuplicable = true; } def KILL : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins variable_ops); let AsmString = ""; - let hasSideEffects = false; + let hasSideEffects = false; } def EXTRACT_SUBREG : StandardPseudoInstruction { let OutOperandList = (outs unknown:$dst); let InOperandList = (ins unknown:$supersrc, i32imm:$subidx); let AsmString = ""; - let hasSideEffects = false; + let hasSideEffects = false; } def INSERT_SUBREG : StandardPseudoInstruction { let OutOperandList = (outs unknown:$dst); let InOperandList = (ins unknown:$supersrc, unknown:$subsrc, i32imm:$subidx); let AsmString = ""; - let hasSideEffects = false; + let hasSideEffects = false; let Constraints = "$supersrc = $dst"; } def IMPLICIT_DEF : StandardPseudoInstruction { let OutOperandList = (outs unknown:$dst); let InOperandList = (ins); let AsmString = ""; - let hasSideEffects = false; - let isReMaterializable = true; - let isAsCheapAsAMove = true; + let hasSideEffects = false; + let isReMaterializable = true; + let isAsCheapAsAMove = true; } def SUBREG_TO_REG : StandardPseudoInstruction { let OutOperandList = (outs unknown:$dst); let InOperandList = (ins unknown:$implsrc, unknown:$subsrc, i32imm:$subidx); let AsmString = ""; - let hasSideEffects = false; + let hasSideEffects = false; } def COPY_TO_REGCLASS : StandardPseudoInstruction { let OutOperandList = (outs unknown:$dst); let InOperandList = (ins unknown:$src, i32imm:$regclass); let AsmString = ""; - let hasSideEffects = false; - let isAsCheapAsAMove = true; + let hasSideEffects = false; + let isAsCheapAsAMove = true; } def DBG_VALUE : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins variable_ops); let AsmString = "DBG_VALUE"; - let hasSideEffects = false; -} -def DBG_INSTR_REF : StandardPseudoInstruction { - let OutOperandList = (outs); - let InOperandList = (ins variable_ops); - let AsmString = "DBG_INSTR_REF"; - let hasSideEffects = false; -} + let hasSideEffects = false; +} +def DBG_INSTR_REF : StandardPseudoInstruction { + let OutOperandList = (outs); + let InOperandList = (ins variable_ops); + let AsmString = "DBG_INSTR_REF"; + let hasSideEffects = false; +} def DBG_LABEL : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins unknown:$label); let AsmString = "DBG_LABEL"; - let hasSideEffects = false; + let hasSideEffects = false; } def REG_SEQUENCE : StandardPseudoInstruction { let OutOperandList = (outs unknown:$dst); let InOperandList = (ins unknown:$supersrc, variable_ops); let AsmString = ""; - let hasSideEffects = false; - let isAsCheapAsAMove = true; + let hasSideEffects = false; + let isAsCheapAsAMove = true; } def COPY : StandardPseudoInstruction { let OutOperandList = (outs unknown:$dst); let InOperandList = (ins unknown:$src); let AsmString = ""; - let hasSideEffects = false; - let isAsCheapAsAMove = true; - let hasNoSchedulingInfo = false; + let hasSideEffects = false; + let isAsCheapAsAMove = true; + let hasNoSchedulingInfo = false; } def BUNDLE : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins variable_ops); let AsmString = "BUNDLE"; - let hasSideEffects = false; + let hasSideEffects = false; } def LIFETIME_START : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins i32imm:$id); let AsmString = "LIFETIME_START"; - let hasSideEffects = false; + let hasSideEffects = false; } def LIFETIME_END : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins i32imm:$id); let AsmString = "LIFETIME_END"; - let hasSideEffects = false; -} -def PSEUDO_PROBE : StandardPseudoInstruction { - let OutOperandList = (outs); - let InOperandList = (ins i64imm:$guid, i64imm:$index, i8imm:$type, i32imm:$attr); - let AsmString = "PSEUDO_PROBE"; - let hasSideEffects = 1; -} - + let hasSideEffects = false; +} +def PSEUDO_PROBE : StandardPseudoInstruction { + let OutOperandList = (outs); + let InOperandList = (ins i64imm:$guid, i64imm:$index, i8imm:$type, i32imm:$attr); + let AsmString = "PSEUDO_PROBE"; + let hasSideEffects = 1; +} + def STACKMAP : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins i64imm:$id, i32imm:$nbytes, variable_ops); - let hasSideEffects = true; - let isCall = true; - let mayLoad = true; - let usesCustomInserter = true; + let hasSideEffects = true; + let isCall = true; + let mayLoad = true; + let usesCustomInserter = true; } def PATCHPOINT : StandardPseudoInstruction { let OutOperandList = (outs unknown:$dst); let InOperandList = (ins i64imm:$id, i32imm:$nbytes, unknown:$callee, i32imm:$nargs, i32imm:$cc, variable_ops); - let hasSideEffects = true; - let isCall = true; - let mayLoad = true; - let usesCustomInserter = true; + let hasSideEffects = true; + let isCall = true; + let mayLoad = true; + let usesCustomInserter = true; } def STATEPOINT : StandardPseudoInstruction { - let OutOperandList = (outs variable_ops); + let OutOperandList = (outs variable_ops); let InOperandList = (ins variable_ops); - let usesCustomInserter = true; - let mayLoad = true; - let mayStore = true; - let hasSideEffects = true; - let isCall = true; + let usesCustomInserter = true; + let mayLoad = true; + let mayStore = true; + let hasSideEffects = true; + let isCall = true; } def LOAD_STACK_GUARD : StandardPseudoInstruction { let OutOperandList = (outs ptr_rc:$dst); let InOperandList = (ins); - let mayLoad = true; - bit isReMaterializable = true; - let hasSideEffects = false; - bit isPseudo = true; + let mayLoad = true; + bit isReMaterializable = true; + let hasSideEffects = false; + bit isPseudo = true; } def PREALLOCATED_SETUP : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins i32imm:$a); - let usesCustomInserter = true; - let hasSideEffects = true; + let usesCustomInserter = true; + let hasSideEffects = true; } def PREALLOCATED_ARG : StandardPseudoInstruction { let OutOperandList = (outs ptr_rc:$loc); let InOperandList = (ins i32imm:$a, i32imm:$b); - let usesCustomInserter = true; - let hasSideEffects = true; + let usesCustomInserter = true; + let hasSideEffects = true; } def LOCAL_ESCAPE : StandardPseudoInstruction { // This instruction is really just a label. It has to be part of the chain so @@ -1204,94 +1204,94 @@ def LOCAL_ESCAPE : StandardPseudoInstruction { // no side effects. let OutOperandList = (outs); let InOperandList = (ins ptr_rc:$symbol, i32imm:$id); - let hasSideEffects = false; - let hasCtrlDep = true; + let hasSideEffects = false; + let hasCtrlDep = true; } def FAULTING_OP : StandardPseudoInstruction { let OutOperandList = (outs unknown:$dst); let InOperandList = (ins variable_ops); - let usesCustomInserter = true; - let hasSideEffects = true; - let mayLoad = true; - let mayStore = true; - let isTerminator = true; - let isBranch = true; + let usesCustomInserter = true; + let hasSideEffects = true; + let mayLoad = true; + let mayStore = true; + let isTerminator = true; + let isBranch = true; } def PATCHABLE_OP : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins variable_ops); - let usesCustomInserter = true; - let mayLoad = true; - let mayStore = true; - let hasSideEffects = true; + let usesCustomInserter = true; + let mayLoad = true; + let mayStore = true; + let hasSideEffects = true; } def PATCHABLE_FUNCTION_ENTER : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins); let AsmString = "# XRay Function Enter."; - let usesCustomInserter = true; - let hasSideEffects = true; + let usesCustomInserter = true; + let hasSideEffects = true; } def PATCHABLE_RET : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins variable_ops); let AsmString = "# XRay Function Patchable RET."; - let usesCustomInserter = true; - let hasSideEffects = true; - let isTerminator = true; - let isReturn = true; + let usesCustomInserter = true; + let hasSideEffects = true; + let isTerminator = true; + let isReturn = true; } def PATCHABLE_FUNCTION_EXIT : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins); let AsmString = "# XRay Function Exit."; - let usesCustomInserter = true; - let hasSideEffects = true; - let isReturn = false; // Original return instruction will follow + let usesCustomInserter = true; + let hasSideEffects = true; + let isReturn = false; // Original return instruction will follow } def PATCHABLE_TAIL_CALL : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins variable_ops); let AsmString = "# XRay Tail Call Exit."; - let usesCustomInserter = true; - let hasSideEffects = true; - let isReturn = true; + let usesCustomInserter = true; + let hasSideEffects = true; + let isReturn = true; } def PATCHABLE_EVENT_CALL : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins ptr_rc:$event, unknown:$size); let AsmString = "# XRay Custom Event Log."; - let usesCustomInserter = true; - let isCall = true; - let mayLoad = true; - let mayStore = true; - let hasSideEffects = true; + let usesCustomInserter = true; + let isCall = true; + let mayLoad = true; + let mayStore = true; + let hasSideEffects = true; } def PATCHABLE_TYPED_EVENT_CALL : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins unknown:$type, ptr_rc:$event, unknown:$size); let AsmString = "# XRay Typed Event Log."; - let usesCustomInserter = true; - let isCall = true; - let mayLoad = true; - let mayStore = true; - let hasSideEffects = true; + let usesCustomInserter = true; + let isCall = true; + let mayLoad = true; + let mayStore = true; + let hasSideEffects = true; } def FENTRY_CALL : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins); let AsmString = "# FEntry call"; - let usesCustomInserter = true; - let isCall = true; - let mayLoad = true; - let mayStore = true; - let hasSideEffects = true; + let usesCustomInserter = true; + let isCall = true; + let mayLoad = true; + let mayStore = true; + let hasSideEffects = true; } def ICALL_BRANCH_FUNNEL : StandardPseudoInstruction { let OutOperandList = (outs); let InOperandList = (ins variable_ops); let AsmString = ""; - let hasSideEffects = true; + let hasSideEffects = true; } // Generic opcodes used in GlobalISel. @@ -1317,7 +1317,7 @@ class AsmParser { // ShouldEmitMatchRegisterName - Set to false if the target needs a hand // written register name matcher - bit ShouldEmitMatchRegisterName = true; + bit ShouldEmitMatchRegisterName = true; // Set to true if the target needs a generated 'alternative register name' // matcher. @@ -1325,7 +1325,7 @@ class AsmParser { // This generates a function which can be used to lookup registers from // their aliases. This function will fail when called on targets where // several registers share the same alias (i.e. not a 1:1 mapping). - bit ShouldEmitMatchRegisterAltName = false; + bit ShouldEmitMatchRegisterAltName = false; // Set to true if MatchRegisterName and MatchRegisterAltName functions // should be generated even if there are duplicate register names. The @@ -1333,11 +1333,11 @@ class AsmParser { // (e.g. in validateTargetOperandClass), and there are no guarantees about // which numeric register identifier will be returned in the case of // multiple matches. - bit AllowDuplicateRegisterNames = false; + bit AllowDuplicateRegisterNames = false; // HasMnemonicFirst - Set to false if target instructions don't always // start with a mnemonic as the first token. - bit HasMnemonicFirst = true; + bit HasMnemonicFirst = true; // ReportMultipleNearMisses - // When 0, the assembly matcher reports an error for one encoding or operand @@ -1345,7 +1345,7 @@ class AsmParser { // When 1, the assembly matcher returns a list of encodings that were close // to matching the parsed instruction, so to allow more detailed error // messages. - bit ReportMultipleNearMisses = false; + bit ReportMultipleNearMisses = false; } def DefaultAsmParser : AsmParser; @@ -1356,7 +1356,7 @@ def DefaultAsmParser : AsmParser; // class AsmParserVariant { // Variant - AsmParsers can be of multiple different variants. Variants are - // used to support targets that need to parse multiple formats for the + // used to support targets that need to parse multiple formats for the // assembly language. int Variant = 0; @@ -1392,7 +1392,7 @@ def all_of; /// AssemblerPredicate - This is a Predicate that can be used when the assembler /// matches instructions and aliases. class AssemblerPredicate<dag cond, string name = ""> { - bit AssemblerMatcherPredicate = true; + bit AssemblerMatcherPredicate = true; dag AssemblerCondDag = cond; string PredicateName = name; } @@ -1467,7 +1467,7 @@ class InstAlias<string Asm, dag Result, int Emit = 1, string VariantName = ""> { // Setting this to 0 will cause the alias to ignore the Result instruction's // defined AsmMatchConverter and instead use the function generated by the // dag Result. - bit UseInstAsmMatchConverter = true; + bit UseInstAsmMatchConverter = true; // Assembler variant name to use for this alias. If not specified then // assembler variants will be determined based on AsmString @@ -1572,8 +1572,8 @@ class ComplexDeprecationPredicate<string dep> { // by the scheduler. Each Processor definition requires corresponding // instruction itineraries. // -class Processor<string n, ProcessorItineraries pi, list<SubtargetFeature> f, - list<SubtargetFeature> tunef = []> { +class Processor<string n, ProcessorItineraries pi, list<SubtargetFeature> f, + list<SubtargetFeature> tunef = []> { // Name - Chip set name. Used by command line (-mcpu=) to determine the // appropriate target chip. // @@ -1589,12 +1589,12 @@ class Processor<string n, ProcessorItineraries pi, list<SubtargetFeature> f, // Features - list of list<SubtargetFeature> Features = f; - - // TuneFeatures - list of features for tuning for this CPU. If the target - // supports -mtune, this should contain the list of features used to make - // microarchitectural optimization decisions for a given processor. While - // Features should contain the architectural features for the processor. - list<SubtargetFeature> TuneFeatures = tunef; + + // TuneFeatures - list of features for tuning for this CPU. If the target + // supports -mtune, this should contain the list of features used to make + // microarchitectural optimization decisions for a given processor. While + // Features should contain the architectural features for the processor. + list<SubtargetFeature> TuneFeatures = tunef; } // ProcessorModel allows subtargets to specify the more general @@ -1603,9 +1603,9 @@ class Processor<string n, ProcessorItineraries pi, list<SubtargetFeature> f, // // Although this class always passes NoItineraries to the Processor // class, the SchedMachineModel may still define valid Itineraries. -class ProcessorModel<string n, SchedMachineModel m, list<SubtargetFeature> f, - list<SubtargetFeature> tunef = []> - : Processor<n, NoItineraries, f, tunef> { +class ProcessorModel<string n, SchedMachineModel m, list<SubtargetFeature> f, + list<SubtargetFeature> tunef = []> + : Processor<n, NoItineraries, f, tunef> { let SchedModel = m; } diff --git a/contrib/libs/llvm12/include/llvm/Target/TargetCallingConv.td b/contrib/libs/llvm12/include/llvm/Target/TargetCallingConv.td index 6e7277c165..b3d4fe9d0d 100644 --- a/contrib/libs/llvm12/include/llvm/Target/TargetCallingConv.td +++ b/contrib/libs/llvm12/include/llvm/Target/TargetCallingConv.td @@ -187,15 +187,15 @@ class CallingConv<list<CCAction> actions> { /// If true, this calling convention will be emitted as externally visible in /// the llvm namespaces instead of as a static function. - bit Entry = false; + bit Entry = false; - bit Custom = false; + bit Custom = false; } /// CustomCallingConv - An instance of this is used to declare calling /// conventions that are implemented using a custom function of the same name. class CustomCallingConv : CallingConv<[]> { - let Custom = true; + let Custom = true; } /// CalleeSavedRegs - A list of callee saved registers for a given calling diff --git a/contrib/libs/llvm12/include/llvm/Target/TargetInstrPredicate.td b/contrib/libs/llvm12/include/llvm/Target/TargetInstrPredicate.td index 04022cf45b..9f2cde9d92 100644 --- a/contrib/libs/llvm12/include/llvm/Target/TargetInstrPredicate.td +++ b/contrib/libs/llvm12/include/llvm/Target/TargetInstrPredicate.td @@ -11,7 +11,7 @@ // MCInstPredicate definitions are used by target scheduling models to describe // constraints on instructions. // -// Here is an example of an MCInstPredicate definition in TableGen: +// Here is an example of an MCInstPredicate definition in TableGen: // // def MCInstPredicateExample : CheckAll<[ // CheckOpcode<[BLR]>, @@ -126,11 +126,11 @@ class CheckRegOperand<int Index, Register R> : CheckOperandBase<Index> { // Check if register operand at index `Index` is the invalid register. class CheckInvalidRegOperand<int Index> : CheckOperandBase<Index>; -// Return true if machine operand at position `Index` is a valid -// register operand. -class CheckValidRegOperand<int Index> : - CheckNot<CheckInvalidRegOperand<Index>>; - +// Return true if machine operand at position `Index` is a valid +// register operand. +class CheckValidRegOperand<int Index> : + CheckNot<CheckInvalidRegOperand<Index>>; + // Check that the operand at position `Index` is immediate `Imm`. // If field `FunctionMapper` is a non-empty string, then function // `FunctionMapper` is applied to the operand value, and the return value is then @@ -259,20 +259,20 @@ class CheckFunctionPredicate<string MCInstFn, string MachineInstrFn> : MCInstPre string MachineInstrFnName = MachineInstrFn; } -// Similar to CheckFunctionPredicate. However it assumes that MachineInstrFn is -// a method in TargetInstrInfo, and MCInstrFn takes an extra pointer to -// MCInstrInfo. -// -// It Expands to: -// - TIIPointer->MachineInstrFn(MI) -// - MCInstrFn(MI, MCII); -class CheckFunctionPredicateWithTII<string MCInstFn, string MachineInstrFn, string -TIIPointer = "TII"> : MCInstPredicate { - string MCInstFnName = MCInstFn; - string TIIPtrName = TIIPointer; - string MachineInstrFnName = MachineInstrFn; -} - +// Similar to CheckFunctionPredicate. However it assumes that MachineInstrFn is +// a method in TargetInstrInfo, and MCInstrFn takes an extra pointer to +// MCInstrInfo. +// +// It Expands to: +// - TIIPointer->MachineInstrFn(MI) +// - MCInstrFn(MI, MCII); +class CheckFunctionPredicateWithTII<string MCInstFn, string MachineInstrFn, string +TIIPointer = "TII"> : MCInstPredicate { + string MCInstFnName = MCInstFn; + string TIIPtrName = TIIPointer; + string MachineInstrFnName = MachineInstrFn; +} + // Used to classify machine instructions based on a machine instruction // predicate. // @@ -319,8 +319,8 @@ class DepBreakingClass<list<Instruction> opcodes, MCInstPredicate pred, // - A list of subtarget hooks (Delegates) that are called from this function. // class STIPredicateDecl<string name, MCInstPredicate default = FalsePred, - bit overrides = true, bit expandForMC = true, - bit updatesOpcodeMask = false, + bit overrides = true, bit expandForMC = true, + bit updatesOpcodeMask = false, list<STIPredicateDecl> delegates = []> { string Name = name; @@ -355,7 +355,7 @@ class STIPredicate<STIPredicateDecl declaration, // Convenience classes and definitions used by processor scheduling models to // describe dependency breaking instructions and move elimination candidates. -let UpdatesOpcodeMask = true in { +let UpdatesOpcodeMask = true in { def IsZeroIdiomDecl : STIPredicateDecl<"isZeroIdiom">; diff --git a/contrib/libs/llvm12/include/llvm/Target/TargetItinerary.td b/contrib/libs/llvm12/include/llvm/Target/TargetItinerary.td index 12e77f1e6d..a432d4e42b 100644 --- a/contrib/libs/llvm12/include/llvm/Target/TargetItinerary.td +++ b/contrib/libs/llvm12/include/llvm/Target/TargetItinerary.td @@ -8,7 +8,7 @@ // // This file defines the target-independent scheduling interfaces // which should be implemented by each target that uses instruction -// itineraries for scheduling. Itineraries are detailed reservation +// itineraries for scheduling. Itineraries are detailed reservation // tables for each instruction class. They are most appropriate for // in-order machine with complicated scheduling or bundling constraints. // diff --git a/contrib/libs/llvm12/include/llvm/Target/TargetLoweringObjectFile.h b/contrib/libs/llvm12/include/llvm/Target/TargetLoweringObjectFile.h index 9c40c26448..7befbe1fb2 100644 --- a/contrib/libs/llvm12/include/llvm/Target/TargetLoweringObjectFile.h +++ b/contrib/libs/llvm12/include/llvm/Target/TargetLoweringObjectFile.h @@ -45,7 +45,7 @@ class Module; class SectionKind; class StringRef; class TargetMachine; -class DSOLocalEquivalent; +class DSOLocalEquivalent; class TargetLoweringObjectFile : public MCObjectFileInfo { /// Name-mangler for global names. @@ -55,7 +55,7 @@ protected: bool SupportIndirectSymViaGOTPCRel = false; bool SupportGOTPCRelWithOffset = true; bool SupportDebugThreadLocalLocation = true; - bool SupportDSOLocalEquivalentLowering = false; + bool SupportDSOLocalEquivalentLowering = false; /// PersonalityEncoding, LSDAEncoding, TTypeEncoding - Some encoding values /// for EH. @@ -70,8 +70,8 @@ protected: /// This section contains the static destructor pointer list. MCSection *StaticDtorSection = nullptr; - const TargetMachine *TM = nullptr; - + const TargetMachine *TM = nullptr; + public: TargetLoweringObjectFile() = default; TargetLoweringObjectFile(const TargetLoweringObjectFile &) = delete; @@ -92,9 +92,9 @@ public: /// Emit the module-level metadata that the platform cares about. virtual void emitModuleMetadata(MCStreamer &Streamer, Module &M) const {} - /// Emit Call Graph Profile metadata. - void emitCGProfileMetadata(MCStreamer &Streamer, Module &M) const; - + /// Emit Call Graph Profile metadata. + void emitCGProfileMetadata(MCStreamer &Streamer, Module &M) const; + /// Get the module-level metadata that the platform cares about. virtual void getModuleMetadata(Module &M) {} @@ -132,10 +132,10 @@ public: virtual MCSection *getSectionForJumpTable(const Function &F, const TargetMachine &TM) const; - virtual MCSection *getSectionForLSDA(const Function &F, - const TargetMachine &TM) const { - return LSDASection; - } + virtual MCSection *getSectionForLSDA(const Function &F, + const TargetMachine &TM) const { + return LSDASection; + } virtual bool shouldPutJumpTableInFunctionSection(bool UsesLabelDifference, const Function &F) const; @@ -169,7 +169,7 @@ public: unsigned getPersonalityEncoding() const { return PersonalityEncoding; } unsigned getLSDAEncoding() const { return LSDAEncoding; } unsigned getTTypeEncoding() const { return TTypeEncoding; } - unsigned getCallSiteEncoding() const; + unsigned getCallSiteEncoding() const; const MCExpr *getTTypeReference(const MCSymbolRefExpr *Sym, unsigned Encoding, MCStreamer &Streamer) const; @@ -194,17 +194,17 @@ public: return nullptr; } - /// Target supports a native lowering of a dso_local_equivalent constant - /// without needing to replace it with equivalent IR. - bool supportDSOLocalEquivalentLowering() const { - return SupportDSOLocalEquivalentLowering; - } - - virtual const MCExpr *lowerDSOLocalEquivalent(const DSOLocalEquivalent *Equiv, - const TargetMachine &TM) const { - return nullptr; - } - + /// Target supports a native lowering of a dso_local_equivalent constant + /// without needing to replace it with equivalent IR. + bool supportDSOLocalEquivalentLowering() const { + return SupportDSOLocalEquivalentLowering; + } + + virtual const MCExpr *lowerDSOLocalEquivalent(const DSOLocalEquivalent *Equiv, + const TargetMachine &TM) const { + return nullptr; + } + /// Target supports replacing a data "PC"-relative access to a symbol /// through another symbol, by accessing the later via a GOT entry instead? bool supportIndirectSymViaGOTPCRel() const { @@ -249,8 +249,8 @@ public: /// On targets that support TOC entries, return a section for the entry given /// the symbol it refers to. /// TODO: Implement this interface for existing ELF targets. - virtual MCSection *getSectionForTOCEntry(const MCSymbol *S, - const TargetMachine &TM) const { + virtual MCSection *getSectionForTOCEntry(const MCSymbol *S, + const TargetMachine &TM) const { return nullptr; } @@ -271,8 +271,8 @@ public: /// If supported, return the function entry point symbol. /// Otherwise, returns nulltpr. - /// Func must be a function or an alias which has a function as base object. - virtual MCSymbol *getFunctionEntryPointSymbol(const GlobalValue *Func, + /// Func must be a function or an alias which has a function as base object. + virtual MCSymbol *getFunctionEntryPointSymbol(const GlobalValue *Func, const TargetMachine &TM) const { return nullptr; } diff --git a/contrib/libs/llvm12/include/llvm/Target/TargetMachine.h b/contrib/libs/llvm12/include/llvm/Target/TargetMachine.h index 64a9976d26..4e934b5d4e 100644 --- a/contrib/libs/llvm12/include/llvm/Target/TargetMachine.h +++ b/contrib/libs/llvm12/include/llvm/Target/TargetMachine.h @@ -23,36 +23,36 @@ #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Triple.h" #include "llvm/IR/DataLayout.h" -#include "llvm/IR/PassManager.h" +#include "llvm/IR/PassManager.h" #include "llvm/Pass.h" #include "llvm/Support/CodeGen.h" -#include "llvm/Support/Error.h" -#include "llvm/Target/CGPassBuilderOption.h" +#include "llvm/Support/Error.h" +#include "llvm/Target/CGPassBuilderOption.h" #include "llvm/Target/TargetOptions.h" #include <string> namespace llvm { -class AAManager; -template <typename IRUnitT, typename AnalysisManagerT, typename... ExtraArgTs> -class PassManager; -using ModulePassManager = PassManager<Module>; - +class AAManager; +template <typename IRUnitT, typename AnalysisManagerT, typename... ExtraArgTs> +class PassManager; +using ModulePassManager = PassManager<Module>; + class Function; class GlobalValue; -class MachineFunctionPassManager; -class MachineFunctionAnalysisManager; +class MachineFunctionPassManager; +class MachineFunctionAnalysisManager; class MachineModuleInfoWrapperPass; class Mangler; class MCAsmInfo; class MCContext; class MCInstrInfo; class MCRegisterInfo; -class MCStreamer; +class MCStreamer; class MCSubtargetInfo; class MCSymbol; class raw_pwrite_stream; -class PassBuilder; +class PassBuilder; class PassManagerBuilder; struct PerFunctionMIParsingState; class SMDiagnostic; @@ -130,7 +130,7 @@ public: const Triple &getTargetTriple() const { return TargetTriple; } StringRef getTargetCPU() const { return TargetCPU; } StringRef getTargetFeatureString() const { return TargetFS; } - void setTargetFeatureString(StringRef FS) { TargetFS = std::string(FS); } + void setTargetFeatureString(StringRef FS) { TargetFS = std::string(FS); } /// Virtual method implemented by subclasses that returns a reference to that /// target's TargetSubtargetInfo-derived member variable. @@ -261,9 +261,9 @@ public: Options.SupportsDebugEntryValues = Enable; } - bool getAIXExtendedAltivecABI() const { - return Options.EnableAIXExtendedAltivecABI; - } + bool getAIXExtendedAltivecABI() const { + return Options.EnableAIXExtendedAltivecABI; + } bool getUniqueSectionNames() const { return Options.UniqueSectionNames; } @@ -284,16 +284,16 @@ public: return Options.FunctionSections; } - /// Return true if visibility attribute should not be emitted in XCOFF, - /// corresponding to -mignore-xcoff-visibility. - bool getIgnoreXCOFFVisibility() const { - return Options.IgnoreXCOFFVisibility; - } - - /// Return true if XCOFF traceback table should be emitted, - /// corresponding to -xcoff-traceback-table. - bool getXCOFFTracebackTable() const { return Options.XCOFFTracebackTable; } - + /// Return true if visibility attribute should not be emitted in XCOFF, + /// corresponding to -mignore-xcoff-visibility. + bool getIgnoreXCOFFVisibility() const { + return Options.IgnoreXCOFFVisibility; + } + + /// Return true if XCOFF traceback table should be emitted, + /// corresponding to -xcoff-traceback-table. + bool getXCOFFTracebackTable() const { return Options.XCOFFTracebackTable; } + /// If basic blocks should be emitted into their own section, /// corresponding to -fbasic-block-sections. llvm::BasicBlockSection getBBSectionsType() const { @@ -305,19 +305,19 @@ public: return Options.BBSectionsFuncListBuf.get(); } - /// Returns true if a cast between SrcAS and DestAS is a noop. - virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const { - return false; - } - - /// If the specified generic pointer could be assumed as a pointer to a - /// specific address space, return that address space. - /// - /// Under offloading programming, the offloading target may be passed with - /// values only prepared on the host side and could assume certain - /// properties. - virtual unsigned getAssumedAddrSpace(const Value *V) const { return -1; } - + /// Returns true if a cast between SrcAS and DestAS is a noop. + virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const { + return false; + } + + /// If the specified generic pointer could be assumed as a pointer to a + /// specific address space, return that address space. + /// + /// Under offloading programming, the offloading target may be passed with + /// values only prepared on the host side and could assume certain + /// properties. + virtual unsigned getAssumedAddrSpace(const Value *V) const { return -1; } + /// Get a \c TargetIRAnalysis appropriate for the target. /// /// This is used to construct the new pass manager's target IR analysis pass, @@ -335,15 +335,15 @@ public: /// PassManagerBuilder::addExtension. virtual void adjustPassManager(PassManagerBuilder &) {} - /// Allow the target to modify the pass pipeline with New Pass Manager - /// (similar to adjustPassManager for Legacy Pass manager). - virtual void registerPassBuilderCallbacks(PassBuilder &, - bool DebugPassManager) {} - - /// Allow the target to register alias analyses with the AAManager for use - /// with the new pass manager. Only affects the "default" AAManager. - virtual void registerDefaultAliasAnalyses(AAManager &) {} - + /// Allow the target to modify the pass pipeline with New Pass Manager + /// (similar to adjustPassManager for Legacy Pass manager). + virtual void registerPassBuilderCallbacks(PassBuilder &, + bool DebugPassManager) {} + + /// Allow the target to register alias analyses with the AAManager for use + /// with the new pass manager. Only affects the "default" AAManager. + virtual void registerDefaultAliasAnalyses(AAManager &) {} + /// Add passes to the specified pass manager to get the specified file /// emitted. Typically this will involve several steps of code generation. /// This method should return true if emission of this file type is not @@ -383,8 +383,8 @@ public: /// The integer bit size to use for SjLj based exception handling. static constexpr unsigned DefaultSjLjDataSize = 32; virtual unsigned getSjLjDataSize() const { return DefaultSjLjDataSize; } - - static std::pair<int, int> parseBinutilsVersion(StringRef Version); + + static std::pair<int, int> parseBinutilsVersion(StringRef Version); }; /// This class describes a target machine that is implemented with the LLVM @@ -420,21 +420,21 @@ public: bool DisableVerify = true, MachineModuleInfoWrapperPass *MMIWP = nullptr) override; - virtual Error buildCodeGenPipeline(ModulePassManager &, - MachineFunctionPassManager &, - MachineFunctionAnalysisManager &, - raw_pwrite_stream &, raw_pwrite_stream *, - CodeGenFileType, CGPassBuilderOption, - PassInstrumentationCallbacks *) { - return make_error<StringError>("buildCodeGenPipeline is not overriden", - inconvertibleErrorCode()); - } - - virtual std::pair<StringRef, bool> getPassNameFromLegacyName(StringRef) { - llvm_unreachable( - "getPassNameFromLegacyName parseMIRPipeline is not overriden"); - } - + virtual Error buildCodeGenPipeline(ModulePassManager &, + MachineFunctionPassManager &, + MachineFunctionAnalysisManager &, + raw_pwrite_stream &, raw_pwrite_stream *, + CodeGenFileType, CGPassBuilderOption, + PassInstrumentationCallbacks *) { + return make_error<StringError>("buildCodeGenPipeline is not overriden", + inconvertibleErrorCode()); + } + + virtual std::pair<StringRef, bool> getPassNameFromLegacyName(StringRef) { + llvm_unreachable( + "getPassNameFromLegacyName parseMIRPipeline is not overriden"); + } + /// Add passes to the specified pass manager to get machine code emitted with /// the MCJIT. This method returns true if machine code is not supported. It /// fills the MCContext Ctx pointer which can be used to build custom @@ -455,10 +455,10 @@ public: raw_pwrite_stream *DwoOut, CodeGenFileType FileType, MCContext &Context); - Expected<std::unique_ptr<MCStreamer>> - createMCStreamer(raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, - CodeGenFileType FileType, MCContext &Ctx); - + Expected<std::unique_ptr<MCStreamer>> + createMCStreamer(raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, + CodeGenFileType FileType, MCContext &Ctx); + /// True if the target uses physical regs (as nearly all targets do). False /// for stack machines such as WebAssembly and other virtual-register /// machines. If true, all vregs must be allocated before PEI. If false, then diff --git a/contrib/libs/llvm12/include/llvm/Target/TargetOptions.h b/contrib/libs/llvm12/include/llvm/Target/TargetOptions.h index 2e0571315a..2ae71d2938 100644 --- a/contrib/libs/llvm12/include/llvm/Target/TargetOptions.h +++ b/contrib/libs/llvm12/include/llvm/Target/TargetOptions.h @@ -74,18 +74,18 @@ namespace llvm { Labels, // Do not use Basic Block Sections but label basic blocks. This // is useful when associating profile counts from virtual addresses // to basic blocks. - Preset, // Similar to list but the blocks are identified by passes which - // seek to use Basic Block Sections, e.g. MachineFunctionSplitter. - // This option cannot be set via the command line. + Preset, // Similar to list but the blocks are identified by passes which + // seek to use Basic Block Sections, e.g. MachineFunctionSplitter. + // This option cannot be set via the command line. None // Do not use Basic Block Sections. }; - enum class StackProtectorGuards { - None, - TLS, - Global - }; - + enum class StackProtectorGuards { + None, + TLS, + Global + }; + enum class EABI { Unknown, Default, // Default means not specified @@ -129,34 +129,34 @@ namespace llvm { class TargetOptions { public: TargetOptions() - : UnsafeFPMath(false), NoInfsFPMath(false), NoNaNsFPMath(false), - NoTrappingFPMath(true), NoSignedZerosFPMath(false), - EnableAIXExtendedAltivecABI(false), + : UnsafeFPMath(false), NoInfsFPMath(false), NoNaNsFPMath(false), + NoTrappingFPMath(true), NoSignedZerosFPMath(false), + EnableAIXExtendedAltivecABI(false), HonorSignDependentRoundingFPMathOption(false), NoZerosInBSS(false), GuaranteedTailCallOpt(false), StackSymbolOrdering(true), EnableFastISel(false), EnableGlobalISel(false), UseInitArray(false), DisableIntegratedAS(false), RelaxELFRelocations(false), FunctionSections(false), DataSections(false), - IgnoreXCOFFVisibility(false), XCOFFTracebackTable(true), + IgnoreXCOFFVisibility(false), XCOFFTracebackTable(true), UniqueSectionNames(true), UniqueBasicBlockSectionNames(false), TrapUnreachable(false), NoTrapAfterNoreturn(false), TLSSize(0), EmulatedTLS(false), ExplicitEmulatedTLS(false), EnableIPRA(false), EmitStackSizeSection(false), EnableMachineOutliner(false), - EnableMachineFunctionSplitter(false), SupportsDefaultOutlining(false), - EmitAddrsig(false), EmitCallSiteInfo(false), - SupportsDebugEntryValues(false), EnableDebugEntryValues(false), - PseudoProbeForProfiling(false), ValueTrackingVariableLocations(false), - ForceDwarfFrameSection(false), XRayOmitFunctionIndex(false), + EnableMachineFunctionSplitter(false), SupportsDefaultOutlining(false), + EmitAddrsig(false), EmitCallSiteInfo(false), + SupportsDebugEntryValues(false), EnableDebugEntryValues(false), + PseudoProbeForProfiling(false), ValueTrackingVariableLocations(false), + ForceDwarfFrameSection(false), XRayOmitFunctionIndex(false), FPDenormalMode(DenormalMode::IEEE, DenormalMode::IEEE) {} /// DisableFramePointerElim - This returns true if frame pointer elimination /// optimization should be disabled for the given machine function. bool DisableFramePointerElim(const MachineFunction &MF) const; - /// If greater than 0, override the default value of - /// MCAsmInfo::BinutilsVersion. - std::pair<int, int> BinutilsVersion{0, 0}; - + /// If greater than 0, override the default value of + /// MCAsmInfo::BinutilsVersion. + std::pair<int, int> BinutilsVersion{0, 0}; + /// UnsafeFPMath - This flag is enabled when the /// -enable-unsafe-fp-math flag is specified on the command line. When /// this flag is off (the default), the code generator is not allowed to @@ -187,12 +187,12 @@ namespace llvm { /// argument or result as insignificant. unsigned NoSignedZerosFPMath : 1; - /// EnableAIXExtendedAltivecABI - This flag returns true when -vec-extabi is - /// specified. The code generator is then able to use both volatile and - /// nonvolitle vector regisers. When false, the code generator only uses - /// volatile vector registers which is the default setting on AIX. - unsigned EnableAIXExtendedAltivecABI : 1; - + /// EnableAIXExtendedAltivecABI - This flag returns true when -vec-extabi is + /// specified. The code generator is then able to use both volatile and + /// nonvolitle vector regisers. When false, the code generator only uses + /// volatile vector registers which is the default setting on AIX. + unsigned EnableAIXExtendedAltivecABI : 1; + /// HonorSignDependentRoundingFPMath - This returns true when the /// -enable-sign-dependent-rounding-fp-math is specified. If this returns /// false (the default), the code generator is allowed to assume that the @@ -255,12 +255,12 @@ namespace llvm { /// Emit data into separate sections. unsigned DataSections : 1; - /// Do not emit visibility attribute for xcoff. - unsigned IgnoreXCOFFVisibility : 1; - - /// Emit XCOFF traceback table. - unsigned XCOFFTracebackTable : 1; - + /// Do not emit visibility attribute for xcoff. + unsigned IgnoreXCOFFVisibility : 1; + + /// Emit XCOFF traceback table. + unsigned XCOFFTracebackTable : 1; + unsigned UniqueSectionNames : 1; /// Use unique names for basic block sections. @@ -292,9 +292,9 @@ namespace llvm { /// Enables the MachineOutliner pass. unsigned EnableMachineOutliner : 1; - /// Enables the MachineFunctionSplitter pass. - unsigned EnableMachineFunctionSplitter : 1; - + /// Enables the MachineFunctionSplitter pass. + unsigned EnableMachineFunctionSplitter : 1; + /// Set if the target supports default outlining behaviour. unsigned SupportsDefaultOutlining : 1; @@ -323,30 +323,30 @@ namespace llvm { /// production. bool ShouldEmitDebugEntryValues() const; - /// Emit pseudo probes into the binary for sample profiling - unsigned PseudoProbeForProfiling : 1; - - // When set to true, use experimental new debug variable location tracking, - // which seeks to follow the values of variables rather than their location, - // post isel. - unsigned ValueTrackingVariableLocations : 1; - + /// Emit pseudo probes into the binary for sample profiling + unsigned PseudoProbeForProfiling : 1; + + // When set to true, use experimental new debug variable location tracking, + // which seeks to follow the values of variables rather than their location, + // post isel. + unsigned ValueTrackingVariableLocations : 1; + /// Emit DWARF debug frame section. unsigned ForceDwarfFrameSection : 1; /// Emit XRay Function Index section unsigned XRayOmitFunctionIndex : 1; - /// Stack protector guard offset to use. - unsigned StackProtectorGuardOffset : 32; - - /// Stack protector guard mode to use, e.g. tls, global. - StackProtectorGuards StackProtectorGuard = - StackProtectorGuards::None; - - /// Stack protector guard reg to use, e.g. usually fs or gs in X86. - std::string StackProtectorGuardReg = "None"; - + /// Stack protector guard offset to use. + unsigned StackProtectorGuardOffset : 32; + + /// Stack protector guard mode to use, e.g. tls, global. + StackProtectorGuards StackProtectorGuard = + StackProtectorGuards::None; + + /// Stack protector guard reg to use, e.g. usually fs or gs in X86. + std::string StackProtectorGuardReg = "None"; + /// FloatABIType - This setting is set by -float-abi=xxx option is specfied /// on the command line. This setting may either be Default, Soft, or Hard. /// Default selects the target's default behavior. Soft selects the ABI for diff --git a/contrib/libs/llvm12/include/llvm/Target/TargetPfmCounters.td b/contrib/libs/llvm12/include/llvm/Target/TargetPfmCounters.td index 93ab042819..b00f3e19c3 100644 --- a/contrib/libs/llvm12/include/llvm/Target/TargetPfmCounters.td +++ b/contrib/libs/llvm12/include/llvm/Target/TargetPfmCounters.td @@ -7,8 +7,8 @@ //===----------------------------------------------------------------------===// // // This file defines the target-independent interfaces for performance counters. -// -//===----------------------------------------------------------------------===// +// +//===----------------------------------------------------------------------===// // Definition of a hardware counters from libpfm identifiers. class PfmCounter<string counter> { diff --git a/contrib/libs/llvm12/include/llvm/Target/TargetSchedule.td b/contrib/libs/llvm12/include/llvm/Target/TargetSchedule.td index ce6e316dae..a822878ead 100644 --- a/contrib/libs/llvm12/include/llvm/Target/TargetSchedule.td +++ b/contrib/libs/llvm12/include/llvm/Target/TargetSchedule.td @@ -87,7 +87,7 @@ class SchedMachineModel { // Per-cycle resources tables. ProcessorItineraries Itineraries = NoItineraries; - bit PostRAScheduler = false; // Enable Post RegAlloc Scheduler pass. + bit PostRAScheduler = false; // Enable Post RegAlloc Scheduler pass. // Subtargets that define a model for only a subset of instructions // that have a scheduling class (itinerary class or SchedRW list) @@ -96,13 +96,13 @@ class SchedMachineModel { // be an error. This should only be set during initial bringup, // or there will be no way to catch simple errors in the model // resulting from changes to the instruction definitions. - bit CompleteModel = true; + bit CompleteModel = true; // Indicates that we should do full overlap checking for multiple InstrRWs // defining the same instructions within the same SchedMachineModel. // FIXME: Remove when all in tree targets are clean with the full check // enabled. - bit FullInstRWOverlapCheck = true; + bit FullInstRWOverlapCheck = true; // A processor may only implement part of published ISA, due to either new ISA // extensions, (e.g. Pentium 4 doesn't have AVX) or implementation @@ -118,12 +118,12 @@ class SchedMachineModel { // field. list<Predicate> UnsupportedFeatures = []; - bit NoModel = false; // Special tag to indicate missing machine model. + bit NoModel = false; // Special tag to indicate missing machine model. } def NoSchedModel : SchedMachineModel { - let NoModel = true; - let CompleteModel = false; + let NoModel = true; + let CompleteModel = false; } // Define a kind of processor resource that may be common across @@ -254,14 +254,14 @@ class ProcWriteResources<list<ProcResourceKind> resources> { list<int> ResourceCycles = []; int Latency = 1; int NumMicroOps = 1; - bit BeginGroup = false; - bit EndGroup = false; + bit BeginGroup = false; + bit EndGroup = false; // Allow a processor to mark some scheduling classes as unsupported // for stronger verification. - bit Unsupported = false; + bit Unsupported = false; // Allow a processor to mark some scheduling classes as single-issue. // SingleIssue is an alias for Begin/End Group. - bit SingleIssue = false; + bit SingleIssue = false; SchedMachineModel SchedModel = ?; } @@ -317,7 +317,7 @@ class ProcReadAdvance<int cycles, list<SchedWrite> writes = []> { list<SchedWrite> ValidWrites = writes; // Allow a processor to mark some scheduling classes as unsupported // for stronger verification. - bit Unsupported = false; + bit Unsupported = false; SchedMachineModel SchedModel = ?; } @@ -395,7 +395,7 @@ class SchedVar<SchedPredicateBase pred, list<SchedReadWrite> selected> { // SchedModel silences warnings but is ignored. class SchedVariant<list<SchedVar> variants> { list<SchedVar> Variants = variants; - bit Variadic = false; + bit Variadic = false; SchedMachineModel SchedModel = ?; } @@ -428,7 +428,7 @@ class InstRW<list<SchedReadWrite> rw, dag instrlist> { dag Instrs = instrlist; SchedMachineModel SchedModel = ?; // Allow a subtarget to mark some instructions as unsupported. - bit Unsupported = false; + bit Unsupported = false; } // Map a set of itinerary classes to SchedReadWrite resources. This is @@ -535,7 +535,7 @@ class SchedAlias<SchedReadWrite match, SchedReadWrite alias> { class RegisterFile<int numPhysRegs, list<RegisterClass> Classes = [], list<int> Costs = [], list<bit> AllowMoveElim = [], - int MaxMoveElimPerCy = 0, bit AllowZeroMoveElimOnly = false> { + int MaxMoveElimPerCy = 0, bit AllowZeroMoveElimOnly = false> { list<RegisterClass> RegClasses = Classes; list<int> RegCosts = Costs; list<bit> AllowMoveElimination = AllowMoveElim; diff --git a/contrib/libs/llvm12/include/llvm/Target/TargetSelectionDAG.td b/contrib/libs/llvm12/include/llvm/Target/TargetSelectionDAG.td index cebce0f6d3..a09feca6ca 100644 --- a/contrib/libs/llvm12/include/llvm/Target/TargetSelectionDAG.td +++ b/contrib/libs/llvm12/include/llvm/Target/TargetSelectionDAG.td @@ -164,9 +164,9 @@ def SDTIntToFPOp : SDTypeProfile<1, 1, [ // [su]int_to_fp def SDTFPToIntOp : SDTypeProfile<1, 1, [ // fp_to_[su]int SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1> ]>; -def SDTFPToIntSatOp : SDTypeProfile<1, 2, [ // fp_to_[su]int_sat - SDTCisInt<0>, SDTCisFP<1>, SDTCisInt<2>, SDTCisSameNumEltsAs<0, 1> -]>; +def SDTFPToIntSatOp : SDTypeProfile<1, 2, [ // fp_to_[su]int_sat + SDTCisInt<0>, SDTCisFP<1>, SDTCisInt<2>, SDTCisSameNumEltsAs<0, 1> +]>; def SDTExtInreg : SDTypeProfile<1, 2, [ // sext_inreg SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisVT<2, OtherVT>, SDTCisVTSmallerThanOp<2, 1> @@ -215,8 +215,8 @@ def SDTCatchret : SDTypeProfile<0, 2, [ // catchret def SDTNone : SDTypeProfile<0, 0, []>; // ret, trap -def SDTUBSANTrap : SDTypeProfile<0, 1, []>; // ubsantrap - +def SDTUBSANTrap : SDTypeProfile<0, 1, []>; // ubsantrap + def SDTLoad : SDTypeProfile<1, 1, [ // load SDTCisPtrTy<1> ]>; @@ -250,11 +250,11 @@ def SDTVecInsert : SDTypeProfile<1, 3, [ // vector insert def SDTVecReduce : SDTypeProfile<1, 1, [ // vector reduction SDTCisInt<0>, SDTCisVec<1> ]>; -def SDTFPVecReduce : SDTypeProfile<1, 1, [ // FP vector reduction - SDTCisFP<0>, SDTCisVec<1> -]>; +def SDTFPVecReduce : SDTypeProfile<1, 1, [ // FP vector reduction + SDTCisFP<0>, SDTCisVec<1> +]>; + - def SDTSubVecExtract : SDTypeProfile<1, 2, [// subvector extract SDTCisSubVecOfVec<0,1>, SDTCisInt<2> ]>; @@ -405,8 +405,8 @@ def saddsat : SDNode<"ISD::SADDSAT" , SDTIntBinOp, [SDNPCommutative]>; def uaddsat : SDNode<"ISD::UADDSAT" , SDTIntBinOp, [SDNPCommutative]>; def ssubsat : SDNode<"ISD::SSUBSAT" , SDTIntBinOp>; def usubsat : SDNode<"ISD::USUBSAT" , SDTIntBinOp>; -def sshlsat : SDNode<"ISD::SSHLSAT" , SDTIntBinOp>; -def ushlsat : SDNode<"ISD::USHLSAT" , SDTIntBinOp>; +def sshlsat : SDNode<"ISD::SSHLSAT" , SDTIntBinOp>; +def ushlsat : SDNode<"ISD::USHLSAT" , SDTIntBinOp>; def smulfix : SDNode<"ISD::SMULFIX" , SDTIntScaledBinOp, [SDNPCommutative]>; def smulfixsat : SDNode<"ISD::SMULFIXSAT", SDTIntScaledBinOp, [SDNPCommutative]>; @@ -443,15 +443,15 @@ def vecreduce_smax : SDNode<"ISD::VECREDUCE_SMAX", SDTVecReduce>; def vecreduce_umax : SDNode<"ISD::VECREDUCE_UMAX", SDTVecReduce>; def vecreduce_smin : SDNode<"ISD::VECREDUCE_SMIN", SDTVecReduce>; def vecreduce_umin : SDNode<"ISD::VECREDUCE_UMIN", SDTVecReduce>; -def vecreduce_fadd : SDNode<"ISD::VECREDUCE_FADD", SDTFPVecReduce>; +def vecreduce_fadd : SDNode<"ISD::VECREDUCE_FADD", SDTFPVecReduce>; def fadd : SDNode<"ISD::FADD" , SDTFPBinOp, [SDNPCommutative]>; def fsub : SDNode<"ISD::FSUB" , SDTFPBinOp>; def fmul : SDNode<"ISD::FMUL" , SDTFPBinOp, [SDNPCommutative]>; def fdiv : SDNode<"ISD::FDIV" , SDTFPBinOp>; def frem : SDNode<"ISD::FREM" , SDTFPBinOp>; -def fma : SDNode<"ISD::FMA" , SDTFPTernaryOp, [SDNPCommutative]>; -def fmad : SDNode<"ISD::FMAD" , SDTFPTernaryOp, [SDNPCommutative]>; +def fma : SDNode<"ISD::FMA" , SDTFPTernaryOp, [SDNPCommutative]>; +def fmad : SDNode<"ISD::FMAD" , SDTFPTernaryOp, [SDNPCommutative]>; def fabs : SDNode<"ISD::FABS" , SDTFPUnaryOp>; def fminnum : SDNode<"ISD::FMINNUM" , SDTFPBinOp, [SDNPCommutative, SDNPAssociative]>; @@ -494,8 +494,8 @@ def sint_to_fp : SDNode<"ISD::SINT_TO_FP" , SDTIntToFPOp>; def uint_to_fp : SDNode<"ISD::UINT_TO_FP" , SDTIntToFPOp>; def fp_to_sint : SDNode<"ISD::FP_TO_SINT" , SDTFPToIntOp>; def fp_to_uint : SDNode<"ISD::FP_TO_UINT" , SDTFPToIntOp>; -def fp_to_sint_sat : SDNode<"ISD::FP_TO_SINT_SAT" , SDTFPToIntSatOp>; -def fp_to_uint_sat : SDNode<"ISD::FP_TO_UINT_SAT" , SDTFPToIntSatOp>; +def fp_to_sint_sat : SDNode<"ISD::FP_TO_SINT_SAT" , SDTFPToIntSatOp>; +def fp_to_uint_sat : SDNode<"ISD::FP_TO_UINT_SAT" , SDTFPToIntSatOp>; def f16_to_fp : SDNode<"ISD::FP16_TO_FP" , SDTIntToFPOp>; def fp_to_f16 : SDNode<"ISD::FP_TO_FP16" , SDTFPToIntOp>; @@ -510,7 +510,7 @@ def strict_fdiv : SDNode<"ISD::STRICT_FDIV", def strict_frem : SDNode<"ISD::STRICT_FREM", SDTFPBinOp, [SDNPHasChain]>; def strict_fma : SDNode<"ISD::STRICT_FMA", - SDTFPTernaryOp, [SDNPHasChain, SDNPCommutative]>; + SDTFPTernaryOp, [SDNPHasChain, SDNPCommutative]>; def strict_fsqrt : SDNode<"ISD::STRICT_FSQRT", SDTFPUnaryOp, [SDNPHasChain]>; def strict_fsin : SDNode<"ISD::STRICT_FSIN", @@ -567,8 +567,8 @@ def strict_sint_to_fp : SDNode<"ISD::STRICT_SINT_TO_FP", SDTIntToFPOp, [SDNPHasChain]>; def strict_uint_to_fp : SDNode<"ISD::STRICT_UINT_TO_FP", SDTIntToFPOp, [SDNPHasChain]>; -def strict_fsetcc : SDNode<"ISD::STRICT_FSETCC", SDTSetCC, [SDNPHasChain]>; -def strict_fsetccs : SDNode<"ISD::STRICT_FSETCCS", SDTSetCC, [SDNPHasChain]>; +def strict_fsetcc : SDNode<"ISD::STRICT_FSETCC", SDTSetCC, [SDNPHasChain]>; +def strict_fsetccs : SDNode<"ISD::STRICT_FSETCCS", SDTSetCC, [SDNPHasChain]>; def setcc : SDNode<"ISD::SETCC" , SDTSetCC>; def select : SDNode<"ISD::SELECT" , SDTSelect>; @@ -587,8 +587,8 @@ def trap : SDNode<"ISD::TRAP" , SDTNone, [SDNPHasChain, SDNPSideEffect]>; def debugtrap : SDNode<"ISD::DEBUGTRAP" , SDTNone, [SDNPHasChain, SDNPSideEffect]>; -def ubsantrap : SDNode<"ISD::UBSANTRAP" , SDTUBSANTrap, - [SDNPHasChain, SDNPSideEffect]>; +def ubsantrap : SDNode<"ISD::UBSANTRAP" , SDTUBSANTrap, + [SDNPHasChain, SDNPSideEffect]>; def prefetch : SDNode<"ISD::PREFETCH" , SDTPrefetch, [SDNPHasChain, SDNPMayLoad, SDNPMayStore, @@ -652,7 +652,7 @@ def ist : SDNode<"ISD::STORE" , SDTIStore, def vector_shuffle : SDNode<"ISD::VECTOR_SHUFFLE", SDTVecShuffle, []>; def build_vector : SDNode<"ISD::BUILD_VECTOR", SDTypeProfile<1, -1, []>, []>; -def splat_vector : SDNode<"ISD::SPLAT_VECTOR", SDTypeProfile<1, 1, []>, []>; +def splat_vector : SDNode<"ISD::SPLAT_VECTOR", SDTypeProfile<1, 1, []>, []>; def scalar_to_vector : SDNode<"ISD::SCALAR_TO_VECTOR", SDTypeProfile<1, 1, []>, []>; @@ -768,7 +768,7 @@ class PatFrags<dag ops, list<dag> frags, code pred = [{}], // This is useful when Fragments involves associative / commutative // operators: a single piece of code can easily refer to all operands even // when re-associated / commuted variants of the fragment are matched. - bit PredicateCodeUsesOperands = false; + bit PredicateCodeUsesOperands = false; // Define a few pre-packaged predicates. This helps GlobalISel import // existing rules from SelectionDAG for many common cases. @@ -867,13 +867,13 @@ class ImmLeaf<ValueType vt, code pred, SDNodeXForm xform = NOOP_SDNodeXForm, SDNode ImmNode = imm> : PatFrag<(ops), (vt ImmNode), [{}], xform> { let ImmediateCode = pred; - bit FastIselShouldIgnore = false; + bit FastIselShouldIgnore = false; // Is the data type of the immediate an APInt? - bit IsAPInt = false; + bit IsAPInt = false; // Is the data type of the immediate an APFloat? - bit IsAPFloat = false; + bit IsAPFloat = false; } // Convenience wrapper for ImmLeaf to use timm/TargetConstant instead @@ -890,8 +890,8 @@ class TImmLeaf<ValueType vt, code pred, SDNodeXForm xform = NOOP_SDNodeXForm, // IntImmLeaf will allow GlobalISel to import the rule. class IntImmLeaf<ValueType vt, code pred, SDNodeXForm xform = NOOP_SDNodeXForm> : ImmLeaf<vt, pred, xform> { - let IsAPInt = true; - let FastIselShouldIgnore = true; + let IsAPInt = true; + let FastIselShouldIgnore = true; } // An ImmLeaf except that Imm is an APFloat. @@ -900,8 +900,8 @@ class IntImmLeaf<ValueType vt, code pred, SDNodeXForm xform = NOOP_SDNodeXForm> // generate code for rules that make use of it. class FPImmLeaf<ValueType vt, code pred, SDNodeXForm xform = NOOP_SDNodeXForm> : ImmLeaf<vt, pred, xform, fpimm> { - let IsAPFloat = true; - let FastIselShouldIgnore = true; + let IsAPFloat = true; + let FastIselShouldIgnore = true; } // Leaf fragments. @@ -909,23 +909,23 @@ class FPImmLeaf<ValueType vt, code pred, SDNodeXForm xform = NOOP_SDNodeXForm> def vtInt : PatLeaf<(vt), [{ return N->getVT().isInteger(); }]>; def vtFP : PatLeaf<(vt), [{ return N->getVT().isFloatingPoint(); }]>; -// Use ISD::isConstantSplatVectorAllOnes or ISD::isConstantSplatVectorAllZeros -// to look for the corresponding build_vector or splat_vector. Will look through -// bitcasts and check for either opcode, except when used as a pattern root. -// When used as a pattern root, only fixed-length build_vector and scalable -// splat_vector are supported. -def immAllOnesV; // ISD::isConstantSplatVectorAllOnes -def immAllZerosV; // ISD::isConstantSplatVectorAllZeros +// Use ISD::isConstantSplatVectorAllOnes or ISD::isConstantSplatVectorAllZeros +// to look for the corresponding build_vector or splat_vector. Will look through +// bitcasts and check for either opcode, except when used as a pattern root. +// When used as a pattern root, only fixed-length build_vector and scalable +// splat_vector are supported. +def immAllOnesV; // ISD::isConstantSplatVectorAllOnes +def immAllZerosV; // ISD::isConstantSplatVectorAllZeros // Other helper fragments. def not : PatFrag<(ops node:$in), (xor node:$in, -1)>; def vnot : PatFrag<(ops node:$in), (xor node:$in, immAllOnesV)>; def ineg : PatFrag<(ops node:$in), (sub 0, node:$in)>; -def zanyext : PatFrags<(ops node:$op), - [(zext node:$op), - (anyext node:$op)]>; - +def zanyext : PatFrags<(ops node:$op), + [(zext node:$op), + (anyext node:$op)]>; + // null_frag - The null pattern operator is used in multiclass instantiations // which accept an SDPatternOperator for use in matching patterns for internal // definitions. When expanding a pattern, if the null fragment is referenced @@ -935,222 +935,222 @@ def null_frag : SDPatternOperator; // load fragments. def unindexedload : PatFrag<(ops node:$ptr), (ld node:$ptr)> { - let IsLoad = true; - let IsUnindexed = true; + let IsLoad = true; + let IsUnindexed = true; } def load : PatFrag<(ops node:$ptr), (unindexedload node:$ptr)> { - let IsLoad = true; - let IsNonExtLoad = true; + let IsLoad = true; + let IsNonExtLoad = true; } // extending load fragments. def extload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr)> { - let IsLoad = true; - let IsAnyExtLoad = true; + let IsLoad = true; + let IsAnyExtLoad = true; } def sextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr)> { - let IsLoad = true; - let IsSignExtLoad = true; + let IsLoad = true; + let IsSignExtLoad = true; } def zextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr)> { - let IsLoad = true; - let IsZeroExtLoad = true; + let IsLoad = true; + let IsZeroExtLoad = true; } def extloadi1 : PatFrag<(ops node:$ptr), (extload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let MemoryVT = i1; } def extloadi8 : PatFrag<(ops node:$ptr), (extload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let MemoryVT = i8; } def extloadi16 : PatFrag<(ops node:$ptr), (extload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let MemoryVT = i16; } def extloadi32 : PatFrag<(ops node:$ptr), (extload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let MemoryVT = i32; } def extloadf16 : PatFrag<(ops node:$ptr), (extload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let MemoryVT = f16; } def extloadf32 : PatFrag<(ops node:$ptr), (extload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let MemoryVT = f32; } def extloadf64 : PatFrag<(ops node:$ptr), (extload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let MemoryVT = f64; } def sextloadi1 : PatFrag<(ops node:$ptr), (sextload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let MemoryVT = i1; } def sextloadi8 : PatFrag<(ops node:$ptr), (sextload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let MemoryVT = i8; } def sextloadi16 : PatFrag<(ops node:$ptr), (sextload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let MemoryVT = i16; } def sextloadi32 : PatFrag<(ops node:$ptr), (sextload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let MemoryVT = i32; } def zextloadi1 : PatFrag<(ops node:$ptr), (zextload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let MemoryVT = i1; } def zextloadi8 : PatFrag<(ops node:$ptr), (zextload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let MemoryVT = i8; } def zextloadi16 : PatFrag<(ops node:$ptr), (zextload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let MemoryVT = i16; } def zextloadi32 : PatFrag<(ops node:$ptr), (zextload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let MemoryVT = i32; } def extloadvi1 : PatFrag<(ops node:$ptr), (extload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let ScalarMemoryVT = i1; } def extloadvi8 : PatFrag<(ops node:$ptr), (extload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let ScalarMemoryVT = i8; } def extloadvi16 : PatFrag<(ops node:$ptr), (extload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let ScalarMemoryVT = i16; } def extloadvi32 : PatFrag<(ops node:$ptr), (extload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let ScalarMemoryVT = i32; } def extloadvf32 : PatFrag<(ops node:$ptr), (extload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let ScalarMemoryVT = f32; } def extloadvf64 : PatFrag<(ops node:$ptr), (extload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let ScalarMemoryVT = f64; } def sextloadvi1 : PatFrag<(ops node:$ptr), (sextload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let ScalarMemoryVT = i1; } def sextloadvi8 : PatFrag<(ops node:$ptr), (sextload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let ScalarMemoryVT = i8; } def sextloadvi16 : PatFrag<(ops node:$ptr), (sextload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let ScalarMemoryVT = i16; } def sextloadvi32 : PatFrag<(ops node:$ptr), (sextload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let ScalarMemoryVT = i32; } def zextloadvi1 : PatFrag<(ops node:$ptr), (zextload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let ScalarMemoryVT = i1; } def zextloadvi8 : PatFrag<(ops node:$ptr), (zextload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let ScalarMemoryVT = i8; } def zextloadvi16 : PatFrag<(ops node:$ptr), (zextload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let ScalarMemoryVT = i16; } def zextloadvi32 : PatFrag<(ops node:$ptr), (zextload node:$ptr)> { - let IsLoad = true; + let IsLoad = true; let ScalarMemoryVT = i32; } // store fragments. def unindexedstore : PatFrag<(ops node:$val, node:$ptr), (st node:$val, node:$ptr)> { - let IsStore = true; - let IsUnindexed = true; + let IsStore = true; + let IsUnindexed = true; } def store : PatFrag<(ops node:$val, node:$ptr), (unindexedstore node:$val, node:$ptr)> { - let IsStore = true; - let IsTruncStore = false; + let IsStore = true; + let IsTruncStore = false; } // truncstore fragments. def truncstore : PatFrag<(ops node:$val, node:$ptr), (unindexedstore node:$val, node:$ptr)> { - let IsStore = true; - let IsTruncStore = true; + let IsStore = true; + let IsTruncStore = true; } def truncstorei8 : PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr)> { - let IsStore = true; + let IsStore = true; let MemoryVT = i8; } def truncstorei16 : PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr)> { - let IsStore = true; + let IsStore = true; let MemoryVT = i16; } def truncstorei32 : PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr)> { - let IsStore = true; + let IsStore = true; let MemoryVT = i32; } def truncstoref16 : PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr)> { - let IsStore = true; + let IsStore = true; let MemoryVT = f16; } def truncstoref32 : PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr)> { - let IsStore = true; + let IsStore = true; let MemoryVT = f32; } def truncstoref64 : PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr)> { - let IsStore = true; + let IsStore = true; let MemoryVT = f64; } def truncstorevi8 : PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr)> { - let IsStore = true; + let IsStore = true; let ScalarMemoryVT = i8; } def truncstorevi16 : PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr)> { - let IsStore = true; + let IsStore = true; let ScalarMemoryVT = i16; } def truncstorevi32 : PatFrag<(ops node:$val, node:$ptr), (truncstore node:$val, node:$ptr)> { - let IsStore = true; + let IsStore = true; let ScalarMemoryVT = i32; } // indexed store fragments. def istore : PatFrag<(ops node:$val, node:$base, node:$offset), (ist node:$val, node:$base, node:$offset)> { - let IsStore = true; - let IsTruncStore = false; + let IsStore = true; + let IsTruncStore = false; } def pre_store : PatFrag<(ops node:$val, node:$base, node:$offset), @@ -1161,8 +1161,8 @@ def pre_store : PatFrag<(ops node:$val, node:$base, node:$offset), def itruncstore : PatFrag<(ops node:$val, node:$base, node:$offset), (ist node:$val, node:$base, node:$offset)> { - let IsStore = true; - let IsTruncStore = true; + let IsStore = true; + let IsTruncStore = true; } def pre_truncst : PatFrag<(ops node:$val, node:$base, node:$offset), (itruncstore node:$val, node:$base, node:$offset), [{ @@ -1171,37 +1171,37 @@ def pre_truncst : PatFrag<(ops node:$val, node:$base, node:$offset), }]>; def pre_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset), (pre_truncst node:$val, node:$base, node:$offset)> { - let IsStore = true; + let IsStore = true; let MemoryVT = i1; } def pre_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset), (pre_truncst node:$val, node:$base, node:$offset)> { - let IsStore = true; + let IsStore = true; let MemoryVT = i8; } def pre_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset), (pre_truncst node:$val, node:$base, node:$offset)> { - let IsStore = true; + let IsStore = true; let MemoryVT = i16; } def pre_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset), (pre_truncst node:$val, node:$base, node:$offset)> { - let IsStore = true; + let IsStore = true; let MemoryVT = i32; } def pre_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset), (pre_truncst node:$val, node:$base, node:$offset)> { - let IsStore = true; + let IsStore = true; let MemoryVT = f32; } def pre_truncstvi8 : PatFrag<(ops node:$val, node:$base, node:$offset), (pre_truncst node:$val, node:$base, node:$offset)> { - let IsStore = true; + let IsStore = true; let ScalarMemoryVT = i8; } def pre_truncstvi16 : PatFrag<(ops node:$val, node:$base, node:$offset), (pre_truncst node:$val, node:$base, node:$offset)> { - let IsStore = true; + let IsStore = true; let ScalarMemoryVT = i16; } @@ -1218,37 +1218,37 @@ def post_truncst : PatFrag<(ops node:$val, node:$base, node:$offset), }]>; def post_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset), (post_truncst node:$val, node:$base, node:$offset)> { - let IsStore = true; + let IsStore = true; let MemoryVT = i1; } def post_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset), (post_truncst node:$val, node:$base, node:$offset)> { - let IsStore = true; + let IsStore = true; let MemoryVT = i8; } def post_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset), (post_truncst node:$val, node:$base, node:$offset)> { - let IsStore = true; + let IsStore = true; let MemoryVT = i16; } def post_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset), (post_truncst node:$val, node:$base, node:$offset)> { - let IsStore = true; + let IsStore = true; let MemoryVT = i32; } def post_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset), (post_truncst node:$val, node:$base, node:$offset)> { - let IsStore = true; + let IsStore = true; let MemoryVT = f32; } def post_truncstvi8 : PatFrag<(ops node:$val, node:$base, node:$offset), (post_truncst node:$val, node:$base, node:$offset)> { - let IsStore = true; + let IsStore = true; let ScalarMemoryVT = i8; } def post_truncstvi16 : PatFrag<(ops node:$val, node:$base, node:$offset), (post_truncst node:$val, node:$base, node:$offset)> { - let IsStore = true; + let IsStore = true; let ScalarMemoryVT = i16; } @@ -1445,88 +1445,88 @@ def any_sint_to_fp : PatFrags<(ops node:$src), def any_uint_to_fp : PatFrags<(ops node:$src), [(strict_uint_to_fp node:$src), (uint_to_fp node:$src)]>; -def any_fsetcc : PatFrags<(ops node:$lhs, node:$rhs, node:$pred), - [(strict_fsetcc node:$lhs, node:$rhs, node:$pred), - (setcc node:$lhs, node:$rhs, node:$pred)]>; -def any_fsetccs : PatFrags<(ops node:$lhs, node:$rhs, node:$pred), - [(strict_fsetccs node:$lhs, node:$rhs, node:$pred), - (setcc node:$lhs, node:$rhs, node:$pred)]>; +def any_fsetcc : PatFrags<(ops node:$lhs, node:$rhs, node:$pred), + [(strict_fsetcc node:$lhs, node:$rhs, node:$pred), + (setcc node:$lhs, node:$rhs, node:$pred)]>; +def any_fsetccs : PatFrags<(ops node:$lhs, node:$rhs, node:$pred), + [(strict_fsetccs node:$lhs, node:$rhs, node:$pred), + (setcc node:$lhs, node:$rhs, node:$pred)]>; multiclass binary_atomic_op_ord<SDNode atomic_op> { def NAME#_monotonic : PatFrag<(ops node:$ptr, node:$val), (!cast<SDPatternOperator>(NAME) node:$ptr, node:$val)> { - let IsAtomic = true; - let IsAtomicOrderingMonotonic = true; + let IsAtomic = true; + let IsAtomicOrderingMonotonic = true; } def NAME#_acquire : PatFrag<(ops node:$ptr, node:$val), (!cast<SDPatternOperator>(NAME) node:$ptr, node:$val)> { - let IsAtomic = true; - let IsAtomicOrderingAcquire = true; + let IsAtomic = true; + let IsAtomicOrderingAcquire = true; } def NAME#_release : PatFrag<(ops node:$ptr, node:$val), (!cast<SDPatternOperator>(NAME) node:$ptr, node:$val)> { - let IsAtomic = true; - let IsAtomicOrderingRelease = true; + let IsAtomic = true; + let IsAtomicOrderingRelease = true; } def NAME#_acq_rel : PatFrag<(ops node:$ptr, node:$val), (!cast<SDPatternOperator>(NAME) node:$ptr, node:$val)> { - let IsAtomic = true; - let IsAtomicOrderingAcquireRelease = true; + let IsAtomic = true; + let IsAtomicOrderingAcquireRelease = true; } def NAME#_seq_cst : PatFrag<(ops node:$ptr, node:$val), (!cast<SDPatternOperator>(NAME) node:$ptr, node:$val)> { - let IsAtomic = true; - let IsAtomicOrderingSequentiallyConsistent = true; + let IsAtomic = true; + let IsAtomicOrderingSequentiallyConsistent = true; } } multiclass ternary_atomic_op_ord<SDNode atomic_op> { def NAME#_monotonic : PatFrag<(ops node:$ptr, node:$cmp, node:$val), (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val)> { - let IsAtomic = true; - let IsAtomicOrderingMonotonic = true; + let IsAtomic = true; + let IsAtomicOrderingMonotonic = true; } def NAME#_acquire : PatFrag<(ops node:$ptr, node:$cmp, node:$val), (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val)> { - let IsAtomic = true; - let IsAtomicOrderingAcquire = true; + let IsAtomic = true; + let IsAtomicOrderingAcquire = true; } def NAME#_release : PatFrag<(ops node:$ptr, node:$cmp, node:$val), (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val)> { - let IsAtomic = true; - let IsAtomicOrderingRelease = true; + let IsAtomic = true; + let IsAtomicOrderingRelease = true; } def NAME#_acq_rel : PatFrag<(ops node:$ptr, node:$cmp, node:$val), (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val)> { - let IsAtomic = true; - let IsAtomicOrderingAcquireRelease = true; + let IsAtomic = true; + let IsAtomicOrderingAcquireRelease = true; } def NAME#_seq_cst : PatFrag<(ops node:$ptr, node:$cmp, node:$val), (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val)> { - let IsAtomic = true; - let IsAtomicOrderingSequentiallyConsistent = true; + let IsAtomic = true; + let IsAtomicOrderingSequentiallyConsistent = true; } } multiclass binary_atomic_op<SDNode atomic_op, bit IsInt = 1> { def _8 : PatFrag<(ops node:$ptr, node:$val), (atomic_op node:$ptr, node:$val)> { - let IsAtomic = true; + let IsAtomic = true; let MemoryVT = !if(IsInt, i8, ?); } def _16 : PatFrag<(ops node:$ptr, node:$val), (atomic_op node:$ptr, node:$val)> { - let IsAtomic = true; + let IsAtomic = true; let MemoryVT = !if(IsInt, i16, f16); } def _32 : PatFrag<(ops node:$ptr, node:$val), (atomic_op node:$ptr, node:$val)> { - let IsAtomic = true; + let IsAtomic = true; let MemoryVT = !if(IsInt, i32, f32); } def _64 : PatFrag<(ops node:$ptr, node:$val), (atomic_op node:$ptr, node:$val)> { - let IsAtomic = true; + let IsAtomic = true; let MemoryVT = !if(IsInt, i64, f64); } @@ -1539,22 +1539,22 @@ multiclass binary_atomic_op<SDNode atomic_op, bit IsInt = 1> { multiclass ternary_atomic_op<SDNode atomic_op> { def _8 : PatFrag<(ops node:$ptr, node:$cmp, node:$val), (atomic_op node:$ptr, node:$cmp, node:$val)> { - let IsAtomic = true; + let IsAtomic = true; let MemoryVT = i8; } def _16 : PatFrag<(ops node:$ptr, node:$cmp, node:$val), (atomic_op node:$ptr, node:$cmp, node:$val)> { - let IsAtomic = true; + let IsAtomic = true; let MemoryVT = i16; } def _32 : PatFrag<(ops node:$ptr, node:$cmp, node:$val), (atomic_op node:$ptr, node:$cmp, node:$val)> { - let IsAtomic = true; + let IsAtomic = true; let MemoryVT = i32; } def _64 : PatFrag<(ops node:$ptr, node:$cmp, node:$val), (atomic_op node:$ptr, node:$cmp, node:$val)> { - let IsAtomic = true; + let IsAtomic = true; let MemoryVT = i64; } @@ -1582,25 +1582,25 @@ defm atomic_cmp_swap : ternary_atomic_op<atomic_cmp_swap>; def atomic_load_8 : PatFrag<(ops node:$ptr), (atomic_load node:$ptr)> { - let IsAtomic = true; + let IsAtomic = true; let MemoryVT = i8; } def atomic_load_16 : PatFrag<(ops node:$ptr), (atomic_load node:$ptr)> { - let IsAtomic = true; + let IsAtomic = true; let MemoryVT = i16; } def atomic_load_32 : PatFrag<(ops node:$ptr), (atomic_load node:$ptr)> { - let IsAtomic = true; + let IsAtomic = true; let MemoryVT = i32; } def atomic_load_64 : PatFrag<(ops node:$ptr), (atomic_load node:$ptr)> { - let IsAtomic = true; + let IsAtomic = true; let MemoryVT = i64; } |