aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm12/include/llvm/CodeGen
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.ru>2022-02-10 16:44:30 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:30 +0300
commit2598ef1d0aee359b4b6d5fdd1758916d5907d04f (patch)
tree012bb94d777798f1f56ac1cec429509766d05181 /contrib/libs/llvm12/include/llvm/CodeGen
parent6751af0b0c1b952fede40b19b71da8025b5d8bcf (diff)
downloadydb-2598ef1d0aee359b4b6d5fdd1758916d5907d04f.tar.gz
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/libs/llvm12/include/llvm/CodeGen')
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/AsmPrinter.h206
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/AsmPrinterHandler.h12
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/BasicBlockSectionUtils.h82
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/BasicTTIImpl.h704
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/CalcSpillWeights.h68
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/CallingConvLower.h20
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/CodeGenPassBuilder.h2310
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/CommandFlags.h56
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/DIE.h18
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/DbgEntityHistoryCalculator.h48
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/DebugHandlerBase.h22
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/DwarfStringPoolEntry.h4
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/FastISel.h12
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/FunctionLoweringInfo.h56
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CSEInfo.h8
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CallLowering.h260
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CombinerHelper.h526
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h2
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelKnownBits.h16
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/IRTranslator.h160
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelector.h68
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h132
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h210
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h136
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h140
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Localizer.h10
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h202
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h320
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h24
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Utils.h208
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/ISDOpcodes.h202
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/LiveInterval.h30
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/LiveIntervalUnion.h6
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/LiveIntervals.h24
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/LiveRangeEdit.h8
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/LiveRegMatrix.h20
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/LiveRegUnits.h2
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/LiveVariables.h54
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/LowLevelType.h8
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MBFIWrapper.h4
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MIRFormatter.h10
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MIRYamlMapping.h100
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MachineBasicBlock.h58
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MachineBlockFrequencyInfo.h40
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MachineCombinerPattern.h10
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MachineConstantPool.h6
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MachineFrameInfo.h8
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MachineFunction.h106
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MachineInstr.h74
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MachineInstrBuilder.h54
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MachineJumpTableInfo.h6
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MachineLoopInfo.h12
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MachineModuleInfo.h46
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MachineOperand.h18
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MachineOutliner.h4
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MachinePassManager.h534
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MachinePassRegistry.def394
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MachinePipeliner.h8
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MachineRegisterInfo.h48
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MachineSSAUpdater.h2
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MachineStableHash.h82
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MachineTraceMetrics.h4
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/MultiHazardRecognizer.h116
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/NonRelocatableStringpool.h4
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/Passes.h40
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/RDFLiveness.h64
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/RDFRegisters.h82
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/ReachingDefAnalysis.h54
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/RegAllocPBQP.h30
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/Register.h40
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/RegisterPressure.h34
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/RegisterScavenging.h4
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/RuntimeLibcalls.h10
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/ScheduleDAGInstrs.h12
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/ScheduleHazardRecognizer.h18
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/SelectionDAG.h206
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/SelectionDAGNodes.h288
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/SelectionDAGTargetInfo.h2
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/SlotIndexes.h34
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/StableHashing.h246
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/StackMaps.h102
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/SwitchLoweringUtils.h16
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/TargetCallingConv.h96
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/TargetFrameLowering.h20
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/TargetInstrInfo.h220
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/TargetLowering.h190
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h38
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/TargetPassConfig.h30
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/TargetRegisterInfo.h152
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/TargetSubtargetInfo.h4
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/TileShapeInfo.h216
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/ValueTypes.h134
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/ValueTypes.td222
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/VirtRegMap.h58
-rw-r--r--contrib/libs/llvm12/include/llvm/CodeGen/WasmEHFuncInfo.h4
95 files changed, 5404 insertions, 5404 deletions
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/AsmPrinter.h b/contrib/libs/llvm12/include/llvm/CodeGen/AsmPrinter.h
index f95cbeb0e5..8686c464fc 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/AsmPrinter.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/AsmPrinter.h
@@ -73,7 +73,7 @@ class MCSymbol;
class MCTargetOptions;
class MDNode;
class Module;
-class PseudoProbeHandler;
+class PseudoProbeHandler;
class raw_ostream;
class StackMaps;
class StringRef;
@@ -147,31 +147,31 @@ public:
using GOTEquivUsePair = std::pair<const GlobalVariable *, unsigned>;
MapVector<const MCSymbol *, GOTEquivUsePair> GlobalGOTEquivs;
- /// struct HandlerInfo and Handlers permit users or target extended
- /// AsmPrinter to add their own handlers.
- struct HandlerInfo {
- std::unique_ptr<AsmPrinterHandler> Handler;
- const char *TimerName;
- const char *TimerDescription;
- const char *TimerGroupName;
- const char *TimerGroupDescription;
-
- HandlerInfo(std::unique_ptr<AsmPrinterHandler> Handler,
- const char *TimerName, const char *TimerDescription,
- const char *TimerGroupName, const char *TimerGroupDescription)
- : Handler(std::move(Handler)), TimerName(TimerName),
- TimerDescription(TimerDescription), TimerGroupName(TimerGroupName),
- TimerGroupDescription(TimerGroupDescription) {}
- };
-
+ /// struct HandlerInfo and Handlers permit users or target extended
+ /// AsmPrinter to add their own handlers.
+ struct HandlerInfo {
+ std::unique_ptr<AsmPrinterHandler> Handler;
+ const char *TimerName;
+ const char *TimerDescription;
+ const char *TimerGroupName;
+ const char *TimerGroupDescription;
+
+ HandlerInfo(std::unique_ptr<AsmPrinterHandler> Handler,
+ const char *TimerName, const char *TimerDescription,
+ const char *TimerGroupName, const char *TimerGroupDescription)
+ : Handler(std::move(Handler)), TimerName(TimerName),
+ TimerDescription(TimerDescription), TimerGroupName(TimerGroupName),
+ TimerGroupDescription(TimerGroupDescription) {}
+ };
+
private:
MCSymbol *CurrentFnEnd = nullptr;
- /// Map a basic block section ID to the exception symbol associated with that
- /// section. Map entries are assigned and looked up via
- /// AsmPrinter::getMBBExceptionSym.
- DenseMap<unsigned, MCSymbol *> MBBSectionExceptionSyms;
-
+ /// Map a basic block section ID to the exception symbol associated with that
+ /// section. Map entries are assigned and looked up via
+ /// AsmPrinter::getMBBExceptionSym.
+ DenseMap<unsigned, MCSymbol *> MBBSectionExceptionSyms;
+
// The symbol used to represent the start of the current BB section of the
// function. This is used to calculate the size of the BB section.
MCSymbol *CurrentSectionBeginSym = nullptr;
@@ -189,8 +189,8 @@ protected:
/// A vector of all debug/EH info emitters we should use. This vector
/// maintains ownership of the emitters.
- std::vector<HandlerInfo> Handlers;
- size_t NumUserHandlers = 0;
+ std::vector<HandlerInfo> Handlers;
+ size_t NumUserHandlers = 0;
public:
struct SrcMgrDiagInfo {
@@ -214,10 +214,10 @@ private:
/// If the target supports dwarf debug info, this pointer is non-null.
DwarfDebug *DD = nullptr;
- /// A handler that supports pseudo probe emission with embedded inline
- /// context.
- PseudoProbeHandler *PP = nullptr;
-
+ /// A handler that supports pseudo probe emission with embedded inline
+ /// context.
+ PseudoProbeHandler *PP = nullptr;
+
/// If the current module uses dwarf CFI annotations strictly for debugging.
bool isCFIMoveForDebugging = false;
@@ -233,14 +233,14 @@ public:
uint16_t getDwarfVersion() const;
void setDwarfVersion(uint16_t Version);
- bool isDwarf64() const;
-
- /// Returns 4 for DWARF32 and 8 for DWARF64.
- unsigned int getDwarfOffsetByteSize() const;
-
- /// Returns 4 for DWARF32 and 12 for DWARF64.
- unsigned int getUnitLengthFieldByteSize() const;
-
+ bool isDwarf64() const;
+
+ /// Returns 4 for DWARF32 and 8 for DWARF64.
+ unsigned int getDwarfOffsetByteSize() const;
+
+ /// Returns 4 for DWARF32 and 12 for DWARF64.
+ unsigned int getUnitLengthFieldByteSize() const;
+
bool isPositionIndependent() const;
/// Return true if assembly output should contain comments.
@@ -256,10 +256,10 @@ public:
MCSymbol *getFunctionBegin() const { return CurrentFnBegin; }
MCSymbol *getFunctionEnd() const { return CurrentFnEnd; }
- // Return the exception symbol associated with the MBB section containing a
- // given basic block.
- MCSymbol *getMBBExceptionSym(const MachineBasicBlock &MBB);
-
+ // Return the exception symbol associated with the MBB section containing a
+ // given basic block.
+ MCSymbol *getMBBExceptionSym(const MachineBasicBlock &MBB);
+
/// Return information about object file lowering.
const TargetLoweringObjectFile &getObjFileLowering() const;
@@ -370,10 +370,10 @@ public:
void emitStackSizeSection(const MachineFunction &MF);
- void emitBBAddrMapSection(const MachineFunction &MF);
-
- void emitPseudoProbe(const MachineInstr &MI);
-
+ void emitBBAddrMapSection(const MachineFunction &MF);
+
+ void emitPseudoProbe(const MachineInstr &MI);
+
void emitRemarksSection(remarks::RemarkStreamer &RS);
enum CFIMoveType { CFI_M_None, CFI_M_EH, CFI_M_Debug };
@@ -401,32 +401,32 @@ public:
/// so, emit it and return true, otherwise do nothing and return false.
bool emitSpecialLLVMGlobal(const GlobalVariable *GV);
- /// `llvm.global_ctors` and `llvm.global_dtors` are arrays of Structor
- /// structs.
- ///
- /// Priority - init priority
- /// Func - global initialization or global clean-up function
- /// ComdatKey - associated data
- struct Structor {
- int Priority = 0;
- Constant *Func = nullptr;
- GlobalValue *ComdatKey = nullptr;
-
- Structor() = default;
- };
-
- /// This method gathers an array of Structors and then sorts them out by
- /// Priority.
- /// @param List The initializer of `llvm.global_ctors` or `llvm.global_dtors`
- /// array.
- /// @param[out] Structors Sorted Structor structs by Priority.
- void preprocessXXStructorList(const DataLayout &DL, const Constant *List,
- SmallVector<Structor, 8> &Structors);
-
- /// This method emits `llvm.global_ctors` or `llvm.global_dtors` list.
- virtual void emitXXStructorList(const DataLayout &DL, const Constant *List,
- bool IsCtor);
-
+ /// `llvm.global_ctors` and `llvm.global_dtors` are arrays of Structor
+ /// structs.
+ ///
+ /// Priority - init priority
+ /// Func - global initialization or global clean-up function
+ /// ComdatKey - associated data
+ struct Structor {
+ int Priority = 0;
+ Constant *Func = nullptr;
+ GlobalValue *ComdatKey = nullptr;
+
+ Structor() = default;
+ };
+
+ /// This method gathers an array of Structors and then sorts them out by
+ /// Priority.
+ /// @param List The initializer of `llvm.global_ctors` or `llvm.global_dtors`
+ /// array.
+ /// @param[out] Structors Sorted Structor structs by Priority.
+ void preprocessXXStructorList(const DataLayout &DL, const Constant *List,
+ SmallVector<Structor, 8> &Structors);
+
+ /// This method emits `llvm.global_ctors` or `llvm.global_dtors` list.
+ virtual void emitXXStructorList(const DataLayout &DL, const Constant *List,
+ bool IsCtor);
+
/// Emit an alignment directive to the specified power of two boundary. If a
/// global value is specified, and if that global has an explicit alignment
/// requested, it will override the alignment request if required for
@@ -461,11 +461,11 @@ public:
// Overridable Hooks
//===------------------------------------------------------------------===//
- void addAsmPrinterHandler(HandlerInfo Handler) {
- Handlers.insert(Handlers.begin(), std::move(Handler));
- NumUserHandlers++;
- }
-
+ void addAsmPrinterHandler(HandlerInfo Handler) {
+ Handlers.insert(Handlers.begin(), std::move(Handler));
+ NumUserHandlers++;
+ }
+
// Targets can, or in the case of EmitInstruction, must implement these to
// customize output.
@@ -617,7 +617,7 @@ public:
unsigned GetSizeOfEncodedValue(unsigned Encoding) const;
/// Emit reference to a ttype global with a specified encoding.
- virtual void emitTTypeReference(const GlobalValue *GV, unsigned Encoding);
+ virtual void emitTTypeReference(const GlobalValue *GV, unsigned Encoding);
/// Emit a reference to a symbol for use in dwarf. Different object formats
/// represent this in different ways. Some use a relocation others encode
@@ -625,39 +625,39 @@ public:
void emitDwarfSymbolReference(const MCSymbol *Label,
bool ForceOffset = false) const;
- /// Emit the 4- or 8-byte offset of a string from the start of its section.
+ /// Emit the 4- or 8-byte offset of a string from the start of its section.
///
/// When possible, emit a DwarfStringPool section offset without any
/// relocations, and without using the symbol. Otherwise, defers to \a
/// emitDwarfSymbolReference().
- ///
- /// The length of the emitted value depends on the DWARF format.
+ ///
+ /// The length of the emitted value depends on the DWARF format.
void emitDwarfStringOffset(DwarfStringPoolEntry S) const;
- /// Emit the 4-or 8-byte offset of a string from the start of its section.
+ /// Emit the 4-or 8-byte offset of a string from the start of its section.
void emitDwarfStringOffset(DwarfStringPoolEntryRef S) const {
emitDwarfStringOffset(S.getEntry());
}
- /// Emit something like ".long Label + Offset" or ".quad Label + Offset"
- /// depending on the DWARF format.
- void emitDwarfOffset(const MCSymbol *Label, uint64_t Offset) const;
-
- /// Emit 32- or 64-bit value depending on the DWARF format.
- void emitDwarfLengthOrOffset(uint64_t Value) const;
-
- /// Emit a special value of 0xffffffff if producing 64-bit debugging info.
- void maybeEmitDwarf64Mark() const;
-
- /// Emit a unit length field. The actual format, DWARF32 or DWARF64, is chosen
- /// according to the settings.
- void emitDwarfUnitLength(uint64_t Length, const Twine &Comment) const;
-
- /// Emit a unit length field. The actual format, DWARF32 or DWARF64, is chosen
- /// according to the settings.
- void emitDwarfUnitLength(const MCSymbol *Hi, const MCSymbol *Lo,
- const Twine &Comment) const;
-
+ /// Emit something like ".long Label + Offset" or ".quad Label + Offset"
+ /// depending on the DWARF format.
+ void emitDwarfOffset(const MCSymbol *Label, uint64_t Offset) const;
+
+ /// Emit 32- or 64-bit value depending on the DWARF format.
+ void emitDwarfLengthOrOffset(uint64_t Value) const;
+
+ /// Emit a special value of 0xffffffff if producing 64-bit debugging info.
+ void maybeEmitDwarf64Mark() const;
+
+ /// Emit a unit length field. The actual format, DWARF32 or DWARF64, is chosen
+ /// according to the settings.
+ void emitDwarfUnitLength(uint64_t Length, const Twine &Comment) const;
+
+ /// Emit a unit length field. The actual format, DWARF32 or DWARF64, is chosen
+ /// according to the settings.
+ void emitDwarfUnitLength(const MCSymbol *Hi, const MCSymbol *Lo,
+ const Twine &Comment) const;
+
/// Emit reference to a call site with a specified encoding
void emitCallSiteOffset(const MCSymbol *Hi, const MCSymbol *Lo,
unsigned Encoding) const;
@@ -798,9 +798,9 @@ private:
GCMetadataPrinter *GetOrCreateGCPrinter(GCStrategy &S);
/// Emit GlobalAlias or GlobalIFunc.
void emitGlobalIndirectSymbol(Module &M, const GlobalIndirectSymbol &GIS);
-
- /// This method decides whether the specified basic block requires a label.
- bool shouldEmitLabelForBasicBlock(const MachineBasicBlock &MBB) const;
+
+ /// This method decides whether the specified basic block requires a label.
+ bool shouldEmitLabelForBasicBlock(const MachineBasicBlock &MBB) const;
};
} // end namespace llvm
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/AsmPrinterHandler.h b/contrib/libs/llvm12/include/llvm/CodeGen/AsmPrinterHandler.h
index 6b5bda2d8d..8346f98dd9 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/AsmPrinterHandler.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/AsmPrinterHandler.h
@@ -30,10 +30,10 @@ class MachineBasicBlock;
class MachineFunction;
class MachineInstr;
class MCSymbol;
-class Module;
+class Module;
-typedef MCSymbol *ExceptionSymbolProvider(AsmPrinter *Asm,
- const MachineBasicBlock *MBB);
+typedef MCSymbol *ExceptionSymbolProvider(AsmPrinter *Asm,
+ const MachineBasicBlock *MBB);
/// Collects and handles AsmPrinter objects required to build debug
/// or EH information.
@@ -45,8 +45,8 @@ public:
/// this tracks that size.
virtual void setSymbolSize(const MCSymbol *Sym, uint64_t Size) = 0;
- virtual void beginModule(Module *M) {}
-
+ virtual void beginModule(Module *M) {}
+
/// Emit all sections that should come after the content.
virtual void endModule() = 0;
@@ -85,7 +85,7 @@ public:
/// Process end of a basic block during basic block sections.
virtual void endBasicBlock(const MachineBasicBlock &MBB) {}
};
-
+
} // End of namespace llvm
#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/BasicBlockSectionUtils.h b/contrib/libs/llvm12/include/llvm/CodeGen/BasicBlockSectionUtils.h
index 8018d0ff62..b9783cbc79 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/BasicBlockSectionUtils.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/BasicBlockSectionUtils.h
@@ -1,41 +1,41 @@
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-parameter"
-#endif
-
-//===- BasicBlockSectionUtils.h - Utilities for basic block sections --===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_BASICBLOCKSECTIONUTILS_H
-#define LLVM_CODEGEN_BASICBLOCKSECTIONUTILS_H
-
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/Support/CommandLine.h"
-
-namespace llvm {
-
-extern cl::opt<std::string> BBSectionsColdTextPrefix;
-
-class MachineFunction;
-class MachineBasicBlock;
-
-using MachineBasicBlockComparator =
- function_ref<bool(const MachineBasicBlock &, const MachineBasicBlock &)>;
-
-void sortBasicBlocksAndUpdateBranches(MachineFunction &MF,
- MachineBasicBlockComparator MBBCmp);
-
-} // end namespace llvm
-
-#endif // LLVM_CODEGEN_BASICBLOCKSECTIONUTILS_H
-
-#ifdef __GNUC__
-#pragma GCC diagnostic pop
-#endif
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- BasicBlockSectionUtils.h - Utilities for basic block sections --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_BASICBLOCKSECTIONUTILS_H
+#define LLVM_CODEGEN_BASICBLOCKSECTIONUTILS_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/CommandLine.h"
+
+namespace llvm {
+
+extern cl::opt<std::string> BBSectionsColdTextPrefix;
+
+class MachineFunction;
+class MachineBasicBlock;
+
+using MachineBasicBlockComparator =
+ function_ref<bool(const MachineBasicBlock &, const MachineBasicBlock &)>;
+
+void sortBasicBlocksAndUpdateBranches(MachineFunction &MF,
+ MachineBasicBlockComparator MBBCmp);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_BASICBLOCKSECTIONUTILS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/BasicTTIImpl.h b/contrib/libs/llvm12/include/llvm/CodeGen/BasicTTIImpl.h
index 85ca4b303c..9d2c9571a5 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/BasicTTIImpl.h
@@ -121,14 +121,14 @@ private:
/// Estimate a cost of subvector extraction as a sequence of extract and
/// insert operations.
- unsigned getExtractSubvectorOverhead(VectorType *VTy, int Index,
+ unsigned getExtractSubvectorOverhead(VectorType *VTy, int Index,
FixedVectorType *SubVTy) {
assert(VTy && SubVTy &&
"Can only extract subvectors from vectors");
int NumSubElts = SubVTy->getNumElements();
- assert((!isa<FixedVectorType>(VTy) ||
- (Index + NumSubElts) <=
- (int)cast<FixedVectorType>(VTy)->getNumElements()) &&
+ assert((!isa<FixedVectorType>(VTy) ||
+ (Index + NumSubElts) <=
+ (int)cast<FixedVectorType>(VTy)->getNumElements()) &&
"SK_ExtractSubvector index out of range");
unsigned Cost = 0;
@@ -146,14 +146,14 @@ private:
/// Estimate a cost of subvector insertion as a sequence of extract and
/// insert operations.
- unsigned getInsertSubvectorOverhead(VectorType *VTy, int Index,
+ unsigned getInsertSubvectorOverhead(VectorType *VTy, int Index,
FixedVectorType *SubVTy) {
assert(VTy && SubVTy &&
"Can only insert subvectors into vectors");
int NumSubElts = SubVTy->getNumElements();
- assert((!isa<FixedVectorType>(VTy) ||
- (Index + NumSubElts) <=
- (int)cast<FixedVectorType>(VTy)->getNumElements()) &&
+ assert((!isa<FixedVectorType>(VTy) ||
+ (Index + NumSubElts) <=
+ (int)cast<FixedVectorType>(VTy)->getNumElements()) &&
"SK_InsertSubvector index out of range");
unsigned Cost = 0;
@@ -232,13 +232,13 @@ public:
}
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
- return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
- }
-
- unsigned getAssumedAddrSpace(const Value *V) const {
- return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
+ return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
}
+ unsigned getAssumedAddrSpace(const Value *V) const {
+ return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
+ }
+
Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
Value *NewV) const {
return nullptr;
@@ -279,10 +279,10 @@ public:
return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);
}
- bool isNumRegsMajorCostOfLSR() {
- return TargetTransformInfoImplBase::isNumRegsMajorCostOfLSR();
- }
-
+ bool isNumRegsMajorCostOfLSR() {
+ return TargetTransformInfoImplBase::isNumRegsMajorCostOfLSR();
+ }
+
bool isProfitableLSRChainElement(Instruction *I) {
return TargetTransformInfoImplBase::isProfitableLSRChainElement(I);
}
@@ -312,10 +312,10 @@ public:
return getTLI()->isTypeLegal(VT);
}
- unsigned getRegUsageForType(Type *Ty) {
- return getTLI()->getTypeLegalizationCost(DL, Ty).first;
- }
-
+ unsigned getRegUsageForType(Type *Ty) {
+ return getTLI()->getTypeLegalizationCost(DL, Ty).first;
+ }
+
int getGEPCost(Type *PointeeType, const Value *Ptr,
ArrayRef<const Value *> Operands) {
return BaseT::getGEPCost(PointeeType, Ptr, Operands);
@@ -408,7 +408,7 @@ public:
}
unsigned getInliningThresholdMultiplier() { return 1; }
- unsigned adjustInliningThreshold(const CallBase *CB) { return 0; }
+ unsigned adjustInliningThreshold(const CallBase *CB) { return 0; }
int getInlinerVectorBonusPercent() { return 150; }
@@ -500,30 +500,30 @@ public:
return BaseT::emitGetActiveLaneMask();
}
- Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
- IntrinsicInst &II) {
- return BaseT::instCombineIntrinsic(IC, II);
- }
-
- Optional<Value *> simplifyDemandedUseBitsIntrinsic(InstCombiner &IC,
- IntrinsicInst &II,
- APInt DemandedMask,
- KnownBits &Known,
- bool &KnownBitsComputed) {
- return BaseT::simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
- KnownBitsComputed);
- }
-
- Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
- InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
- APInt &UndefElts2, APInt &UndefElts3,
- std::function<void(Instruction *, unsigned, APInt, APInt &)>
- SimplifyAndSetOp) {
- return BaseT::simplifyDemandedVectorEltsIntrinsic(
- IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
- SimplifyAndSetOp);
- }
-
+ Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
+ IntrinsicInst &II) {
+ return BaseT::instCombineIntrinsic(IC, II);
+ }
+
+ Optional<Value *> simplifyDemandedUseBitsIntrinsic(InstCombiner &IC,
+ IntrinsicInst &II,
+ APInt DemandedMask,
+ KnownBits &Known,
+ bool &KnownBitsComputed) {
+ return BaseT::simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
+ KnownBitsComputed);
+ }
+
+ Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
+ InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
+ APInt &UndefElts2, APInt &UndefElts3,
+ std::function<void(Instruction *, unsigned, APInt, APInt &)>
+ SimplifyAndSetOp) {
+ return BaseT::simplifyDemandedVectorEltsIntrinsic(
+ IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
+ SimplifyAndSetOp);
+ }
+
int getInstructionLatency(const Instruction *I) {
if (isa<LoadInst>(I))
return getST()->getSchedModel().DefaultLoadLatency;
@@ -579,8 +579,8 @@ public:
unsigned getRegisterBitWidth(bool Vector) const { return 32; }
- Optional<unsigned> getMaxVScale() const { return None; }
-
+ Optional<unsigned> getMaxVScale() const { return None; }
+
/// Estimate the overhead of scalarizing an instruction. Insert and Extract
/// are set if the demanded result elements need to be inserted and/or
/// extracted from vectors.
@@ -616,7 +616,7 @@ public:
return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
}
- /// Estimate the overhead of scalarizing an instruction's unique
+ /// Estimate the overhead of scalarizing an instruction's unique
/// non-constant operands. The types of the arguments are ordinarily
/// scalar, in which case the costs are multiplied with VF.
unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
@@ -624,14 +624,14 @@ public:
unsigned Cost = 0;
SmallPtrSet<const Value*, 4> UniqueOperands;
for (const Value *A : Args) {
- // Disregard things like metadata arguments.
- Type *Ty = A->getType();
- if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&
- !Ty->isPtrOrPtrVectorTy())
- continue;
-
+ // Disregard things like metadata arguments.
+ Type *Ty = A->getType();
+ if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&
+ !Ty->isPtrOrPtrVectorTy())
+ continue;
+
if (!isa<Constant>(A) && UniqueOperands.insert(A).second) {
- auto *VecTy = dyn_cast<VectorType>(Ty);
+ auto *VecTy = dyn_cast<VectorType>(Ty);
if (VecTy) {
// If A is a vector operand, VF should be 1 or correspond to A.
assert((VF == 1 ||
@@ -639,7 +639,7 @@ public:
"Vector argument does not match VF");
}
else
- VecTy = FixedVectorType::get(Ty, VF);
+ VecTy = FixedVectorType::get(Ty, VF);
Cost += getScalarizationOverhead(VecTy, false, true);
}
@@ -713,8 +713,8 @@ public:
if (auto *VTy = dyn_cast<VectorType>(Ty)) {
unsigned Num = cast<FixedVectorType>(VTy)->getNumElements();
unsigned Cost = thisT()->getArithmeticInstrCost(
- Opcode, VTy->getScalarType(), CostKind, Opd1Info, Opd2Info,
- Opd1PropInfo, Opd2PropInfo, Args, CxtI);
+ Opcode, VTy->getScalarType(), CostKind, Opd1Info, Opd2Info,
+ Opd1PropInfo, Opd2PropInfo, Args, CxtI);
// Return the cost of multiple scalar invocation plus the cost of
// inserting and extracting the values.
return getScalarizationOverhead(VTy, Args) + Num * Cost;
@@ -737,20 +737,20 @@ public:
case TTI::SK_PermuteTwoSrc:
return getPermuteShuffleOverhead(cast<FixedVectorType>(Tp));
case TTI::SK_ExtractSubvector:
- return getExtractSubvectorOverhead(Tp, Index,
+ return getExtractSubvectorOverhead(Tp, Index,
cast<FixedVectorType>(SubTp));
case TTI::SK_InsertSubvector:
- return getInsertSubvectorOverhead(Tp, Index,
+ return getInsertSubvectorOverhead(Tp, Index,
cast<FixedVectorType>(SubTp));
}
llvm_unreachable("Unknown TTI::ShuffleKind");
}
unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
- TTI::CastContextHint CCH,
+ TTI::CastContextHint CCH,
TTI::TargetCostKind CostKind,
const Instruction *I = nullptr) {
- if (BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I) == 0)
+ if (BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I) == 0)
return 0;
const TargetLoweringBase *TLI = getTLI();
@@ -788,12 +788,12 @@ public:
return 0;
LLVM_FALLTHROUGH;
case Instruction::SExt:
- if (I && getTLI()->isExtFree(I))
+ if (I && getTLI()->isExtFree(I))
return 0;
// If this is a zext/sext of a load, return 0 if the corresponding
// extending load exists on target.
- if (CCH == TTI::CastContextHint::Normal) {
+ if (CCH == TTI::CastContextHint::Normal) {
EVT ExtVT = EVT::getEVT(Dst);
EVT LoadVT = EVT::getEVT(Src);
unsigned LType =
@@ -868,7 +868,7 @@ public:
unsigned SplitCost =
(!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0;
return SplitCost +
- (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH,
+ (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH,
CostKind, I));
}
@@ -876,7 +876,7 @@ public:
// the operation will get scalarized.
unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements();
unsigned Cost = thisT()->getCastInstrCost(
- Opcode, Dst->getScalarType(), Src->getScalarType(), CCH, CostKind, I);
+ Opcode, Dst->getScalarType(), Src->getScalarType(), CCH, CostKind, I);
// Return the cost of multiple scalar invocation plus the cost of
// inserting and extracting the values.
@@ -901,7 +901,7 @@ public:
return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
Index) +
thisT()->getCastInstrCost(Opcode, Dst, VecTy->getElementType(),
- TTI::CastContextHint::None, TTI::TCK_RecipThroughput);
+ TTI::CastContextHint::None, TTI::TCK_RecipThroughput);
}
unsigned getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) {
@@ -909,7 +909,7 @@ public:
}
unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
- CmpInst::Predicate VecPred,
+ CmpInst::Predicate VecPred,
TTI::TargetCostKind CostKind,
const Instruction *I = nullptr) {
const TargetLoweringBase *TLI = getTLI();
@@ -918,8 +918,8 @@ public:
// TODO: Handle other cost kinds.
if (CostKind != TTI::TCK_RecipThroughput)
- return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
- I);
+ return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
+ I);
// Selects on vectors are actually vector selects.
if (ISD == ISD::SELECT) {
@@ -944,7 +944,7 @@ public:
if (CondTy)
CondTy = CondTy->getScalarType();
unsigned Cost = thisT()->getCmpSelInstrCost(
- Opcode, ValVTy->getScalarType(), CondTy, VecPred, CostKind, I);
+ Opcode, ValVTy->getScalarType(), CondTy, VecPred, CostKind, I);
// Return the cost of multiple scalar invocation plus the cost of
// inserting and extracting the values.
@@ -978,11 +978,11 @@ public:
return Cost;
if (Src->isVectorTy() &&
- // In practice it's not currently possible to have a change in lane
- // length for extending loads or truncating stores so both types should
- // have the same scalable property.
- TypeSize::isKnownLT(Src->getPrimitiveSizeInBits(),
- LT.second.getSizeInBits())) {
+ // In practice it's not currently possible to have a change in lane
+ // length for extending loads or truncating stores so both types should
+ // have the same scalable property.
+ TypeSize::isKnownLT(Src->getPrimitiveSizeInBits(),
+ LT.second.getSizeInBits())) {
// This is a vector load that legalizes to a larger type than the vector
// itself. Unless the corresponding extending load or truncating store is
// legal, then this will scalarize.
@@ -1005,51 +1005,51 @@ public:
return Cost;
}
- unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
- const Value *Ptr, bool VariableMask,
- Align Alignment, TTI::TargetCostKind CostKind,
- const Instruction *I = nullptr) {
- auto *VT = cast<FixedVectorType>(DataTy);
- // Assume the target does not have support for gather/scatter operations
- // and provide a rough estimate.
- //
- // First, compute the cost of extracting the individual addresses and the
- // individual memory operations.
- int LoadCost =
- VT->getNumElements() *
- (getVectorInstrCost(
- Instruction::ExtractElement,
- FixedVectorType::get(PointerType::get(VT->getElementType(), 0),
- VT->getNumElements()),
- -1) +
- getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind));
-
- // Next, compute the cost of packing the result in a vector.
- int PackingCost = getScalarizationOverhead(VT, Opcode != Instruction::Store,
- Opcode == Instruction::Store);
-
- int ConditionalCost = 0;
- if (VariableMask) {
- // Compute the cost of conditionally executing the memory operations with
- // variable masks. This includes extracting the individual conditions, a
- // branches and PHIs to combine the results.
- // NOTE: Estimating the cost of conditionally executing the memory
- // operations accurately is quite difficult and the current solution
- // provides a very rough estimate only.
- ConditionalCost =
- VT->getNumElements() *
- (getVectorInstrCost(
- Instruction::ExtractElement,
- FixedVectorType::get(Type::getInt1Ty(DataTy->getContext()),
- VT->getNumElements()),
- -1) +
- getCFInstrCost(Instruction::Br, CostKind) +
- getCFInstrCost(Instruction::PHI, CostKind));
- }
-
- return LoadCost + PackingCost + ConditionalCost;
- }
-
+ unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
+ const Value *Ptr, bool VariableMask,
+ Align Alignment, TTI::TargetCostKind CostKind,
+ const Instruction *I = nullptr) {
+ auto *VT = cast<FixedVectorType>(DataTy);
+ // Assume the target does not have support for gather/scatter operations
+ // and provide a rough estimate.
+ //
+ // First, compute the cost of extracting the individual addresses and the
+ // individual memory operations.
+ int LoadCost =
+ VT->getNumElements() *
+ (getVectorInstrCost(
+ Instruction::ExtractElement,
+ FixedVectorType::get(PointerType::get(VT->getElementType(), 0),
+ VT->getNumElements()),
+ -1) +
+ getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind));
+
+ // Next, compute the cost of packing the result in a vector.
+ int PackingCost = getScalarizationOverhead(VT, Opcode != Instruction::Store,
+ Opcode == Instruction::Store);
+
+ int ConditionalCost = 0;
+ if (VariableMask) {
+ // Compute the cost of conditionally executing the memory operations with
+ // variable masks. This includes extracting the individual conditions, a
+ // branches and PHIs to combine the results.
+ // NOTE: Estimating the cost of conditionally executing the memory
+ // operations accurately is quite difficult and the current solution
+ // provides a very rough estimate only.
+ ConditionalCost =
+ VT->getNumElements() *
+ (getVectorInstrCost(
+ Instruction::ExtractElement,
+ FixedVectorType::get(Type::getInt1Ty(DataTy->getContext()),
+ VT->getNumElements()),
+ -1) +
+ getCFInstrCost(Instruction::Br, CostKind) +
+ getCFInstrCost(Instruction::PHI, CostKind));
+ }
+
+ return LoadCost + PackingCost + ConditionalCost;
+ }
+
unsigned getInterleavedMemoryOpCost(
unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
@@ -1204,52 +1204,52 @@ public:
/// Get intrinsic cost based on arguments.
unsigned getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
TTI::TargetCostKind CostKind) {
- // Check for generically free intrinsics.
+ // Check for generically free intrinsics.
if (BaseT::getIntrinsicInstrCost(ICA, CostKind) == 0)
return 0;
- // Assume that target intrinsics are cheap.
- Intrinsic::ID IID = ICA.getID();
- if (Function::isTargetIntrinsic(IID))
- return TargetTransformInfo::TCC_Basic;
-
+ // Assume that target intrinsics are cheap.
+ Intrinsic::ID IID = ICA.getID();
+ if (Function::isTargetIntrinsic(IID))
+ return TargetTransformInfo::TCC_Basic;
+
if (ICA.isTypeBasedOnly())
return getTypeBasedIntrinsicInstrCost(ICA, CostKind);
Type *RetTy = ICA.getReturnType();
-
- ElementCount VF = ICA.getVectorFactor();
- ElementCount RetVF =
- (RetTy->isVectorTy() ? cast<VectorType>(RetTy)->getElementCount()
- : ElementCount::getFixed(1));
- assert((RetVF.isScalar() || VF.isScalar()) &&
- "VF > 1 and RetVF is a vector type");
+
+ ElementCount VF = ICA.getVectorFactor();
+ ElementCount RetVF =
+ (RetTy->isVectorTy() ? cast<VectorType>(RetTy)->getElementCount()
+ : ElementCount::getFixed(1));
+ assert((RetVF.isScalar() || VF.isScalar()) &&
+ "VF > 1 and RetVF is a vector type");
const IntrinsicInst *I = ICA.getInst();
const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
FastMathFlags FMF = ICA.getFlags();
switch (IID) {
- default:
- break;
-
- case Intrinsic::cttz:
- // FIXME: If necessary, this should go in target-specific overrides.
- if (VF.isScalar() && RetVF.isScalar() &&
- getTLI()->isCheapToSpeculateCttz())
- return TargetTransformInfo::TCC_Basic;
- break;
-
- case Intrinsic::ctlz:
- // FIXME: If necessary, this should go in target-specific overrides.
- if (VF.isScalar() && RetVF.isScalar() &&
- getTLI()->isCheapToSpeculateCtlz())
- return TargetTransformInfo::TCC_Basic;
- break;
-
- case Intrinsic::memcpy:
- return thisT()->getMemcpyCost(ICA.getInst());
-
+ default:
+ break;
+
+ case Intrinsic::cttz:
+ // FIXME: If necessary, this should go in target-specific overrides.
+ if (VF.isScalar() && RetVF.isScalar() &&
+ getTLI()->isCheapToSpeculateCttz())
+ return TargetTransformInfo::TCC_Basic;
+ break;
+
+ case Intrinsic::ctlz:
+ // FIXME: If necessary, this should go in target-specific overrides.
+ if (VF.isScalar() && RetVF.isScalar() &&
+ getTLI()->isCheapToSpeculateCtlz())
+ return TargetTransformInfo::TCC_Basic;
+ break;
+
+ case Intrinsic::memcpy:
+ return thisT()->getMemcpyCost(ICA.getInst());
+
case Intrinsic::masked_scatter: {
- assert(VF.isScalar() && "Can't vectorize types here.");
+ assert(VF.isScalar() && "Can't vectorize types here.");
const Value *Mask = Args[3];
bool VarMask = !isa<Constant>(Mask);
Align Alignment = cast<ConstantInt>(Args[2])->getAlignValue();
@@ -1258,57 +1258,57 @@ public:
VarMask, Alignment, CostKind, I);
}
case Intrinsic::masked_gather: {
- assert(VF.isScalar() && "Can't vectorize types here.");
+ assert(VF.isScalar() && "Can't vectorize types here.");
const Value *Mask = Args[2];
bool VarMask = !isa<Constant>(Mask);
Align Alignment = cast<ConstantInt>(Args[1])->getAlignValue();
return thisT()->getGatherScatterOpCost(Instruction::Load, RetTy, Args[0],
VarMask, Alignment, CostKind, I);
}
- case Intrinsic::experimental_vector_extract: {
- // FIXME: Handle case where a scalable vector is extracted from a scalable
- // vector
- if (isa<ScalableVectorType>(RetTy))
- return BaseT::getIntrinsicInstrCost(ICA, CostKind);
- unsigned Index = cast<ConstantInt>(Args[1])->getZExtValue();
- return thisT()->getShuffleCost(TTI::SK_ExtractSubvector,
- cast<VectorType>(Args[0]->getType()),
- Index, cast<VectorType>(RetTy));
- }
- case Intrinsic::experimental_vector_insert: {
- // FIXME: Handle case where a scalable vector is inserted into a scalable
- // vector
- if (isa<ScalableVectorType>(Args[1]->getType()))
- return BaseT::getIntrinsicInstrCost(ICA, CostKind);
- unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
- return thisT()->getShuffleCost(
- TTI::SK_InsertSubvector, cast<VectorType>(Args[0]->getType()), Index,
- cast<VectorType>(Args[1]->getType()));
- }
- case Intrinsic::vector_reduce_add:
- case Intrinsic::vector_reduce_mul:
- case Intrinsic::vector_reduce_and:
- case Intrinsic::vector_reduce_or:
- case Intrinsic::vector_reduce_xor:
- case Intrinsic::vector_reduce_smax:
- case Intrinsic::vector_reduce_smin:
- case Intrinsic::vector_reduce_fmax:
- case Intrinsic::vector_reduce_fmin:
- case Intrinsic::vector_reduce_umax:
- case Intrinsic::vector_reduce_umin: {
+ case Intrinsic::experimental_vector_extract: {
+ // FIXME: Handle case where a scalable vector is extracted from a scalable
+ // vector
+ if (isa<ScalableVectorType>(RetTy))
+ return BaseT::getIntrinsicInstrCost(ICA, CostKind);
+ unsigned Index = cast<ConstantInt>(Args[1])->getZExtValue();
+ return thisT()->getShuffleCost(TTI::SK_ExtractSubvector,
+ cast<VectorType>(Args[0]->getType()),
+ Index, cast<VectorType>(RetTy));
+ }
+ case Intrinsic::experimental_vector_insert: {
+ // FIXME: Handle case where a scalable vector is inserted into a scalable
+ // vector
+ if (isa<ScalableVectorType>(Args[1]->getType()))
+ return BaseT::getIntrinsicInstrCost(ICA, CostKind);
+ unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
+ return thisT()->getShuffleCost(
+ TTI::SK_InsertSubvector, cast<VectorType>(Args[0]->getType()), Index,
+ cast<VectorType>(Args[1]->getType()));
+ }
+ case Intrinsic::vector_reduce_add:
+ case Intrinsic::vector_reduce_mul:
+ case Intrinsic::vector_reduce_and:
+ case Intrinsic::vector_reduce_or:
+ case Intrinsic::vector_reduce_xor:
+ case Intrinsic::vector_reduce_smax:
+ case Intrinsic::vector_reduce_smin:
+ case Intrinsic::vector_reduce_fmax:
+ case Intrinsic::vector_reduce_fmin:
+ case Intrinsic::vector_reduce_umax:
+ case Intrinsic::vector_reduce_umin: {
IntrinsicCostAttributes Attrs(IID, RetTy, Args[0]->getType(), FMF, 1, I);
- return getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
- }
- case Intrinsic::vector_reduce_fadd:
- case Intrinsic::vector_reduce_fmul: {
- IntrinsicCostAttributes Attrs(
- IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF, 1, I);
- return getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
+ return getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
}
+ case Intrinsic::vector_reduce_fadd:
+ case Intrinsic::vector_reduce_fmul: {
+ IntrinsicCostAttributes Attrs(
+ IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF, 1, I);
+ return getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
+ }
case Intrinsic::fshl:
case Intrinsic::fshr: {
- if (isa<ScalableVectorType>(RetTy))
- return BaseT::getIntrinsicInstrCost(ICA, CostKind);
+ if (isa<ScalableVectorType>(RetTy))
+ return BaseT::getIntrinsicInstrCost(ICA, CostKind);
const Value *X = Args[0];
const Value *Y = Args[1];
const Value *Z = Args[2];
@@ -1339,48 +1339,48 @@ public:
// For non-rotates (X != Y) we must add shift-by-zero handling costs.
if (X != Y) {
Type *CondTy = RetTy->getWithNewBitWidth(1);
- Cost +=
- thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
- CmpInst::BAD_ICMP_PREDICATE, CostKind);
- Cost +=
- thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
- CmpInst::BAD_ICMP_PREDICATE, CostKind);
+ Cost +=
+ thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
+ CmpInst::BAD_ICMP_PREDICATE, CostKind);
+ Cost +=
+ thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
+ CmpInst::BAD_ICMP_PREDICATE, CostKind);
}
return Cost;
}
}
- // TODO: Handle the remaining intrinsic with scalable vector type
- if (isa<ScalableVectorType>(RetTy))
- return BaseT::getIntrinsicInstrCost(ICA, CostKind);
-
- // Assume that we need to scalarize this intrinsic.
- SmallVector<Type *, 4> Types;
- for (const Value *Op : Args) {
- Type *OpTy = Op->getType();
- assert(VF.isScalar() || !OpTy->isVectorTy());
- Types.push_back(VF.isScalar()
- ? OpTy
- : FixedVectorType::get(OpTy, VF.getKnownMinValue()));
- }
-
- if (VF.isVector() && !RetTy->isVoidTy())
- RetTy = FixedVectorType::get(RetTy, VF.getKnownMinValue());
-
- // Compute the scalarization overhead based on Args for a vector
- // intrinsic. A vectorizer will pass a scalar RetTy and VF > 1, while
- // CostModel will pass a vector RetTy and VF is 1.
- unsigned ScalarizationCost = std::numeric_limits<unsigned>::max();
- if (RetVF.isVector() || VF.isVector()) {
- ScalarizationCost = 0;
- if (!RetTy->isVoidTy())
- ScalarizationCost +=
- getScalarizationOverhead(cast<VectorType>(RetTy), true, false);
- ScalarizationCost +=
- getOperandsScalarizationOverhead(Args, VF.getKnownMinValue());
- }
-
- IntrinsicCostAttributes Attrs(IID, RetTy, Types, FMF, ScalarizationCost, I);
- return thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
+ // TODO: Handle the remaining intrinsic with scalable vector type
+ if (isa<ScalableVectorType>(RetTy))
+ return BaseT::getIntrinsicInstrCost(ICA, CostKind);
+
+ // Assume that we need to scalarize this intrinsic.
+ SmallVector<Type *, 4> Types;
+ for (const Value *Op : Args) {
+ Type *OpTy = Op->getType();
+ assert(VF.isScalar() || !OpTy->isVectorTy());
+ Types.push_back(VF.isScalar()
+ ? OpTy
+ : FixedVectorType::get(OpTy, VF.getKnownMinValue()));
+ }
+
+ if (VF.isVector() && !RetTy->isVoidTy())
+ RetTy = FixedVectorType::get(RetTy, VF.getKnownMinValue());
+
+ // Compute the scalarization overhead based on Args for a vector
+ // intrinsic. A vectorizer will pass a scalar RetTy and VF > 1, while
+ // CostModel will pass a vector RetTy and VF is 1.
+ unsigned ScalarizationCost = std::numeric_limits<unsigned>::max();
+ if (RetVF.isVector() || VF.isVector()) {
+ ScalarizationCost = 0;
+ if (!RetTy->isVoidTy())
+ ScalarizationCost +=
+ getScalarizationOverhead(cast<VectorType>(RetTy), true, false);
+ ScalarizationCost +=
+ getOperandsScalarizationOverhead(Args, VF.getKnownMinValue());
+ }
+
+ IntrinsicCostAttributes Attrs(IID, RetTy, Types, FMF, ScalarizationCost, I);
+ return thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
}
/// Get intrinsic cost based on argument types.
@@ -1396,20 +1396,20 @@ public:
unsigned ScalarizationCostPassed = ICA.getScalarizationCost();
bool SkipScalarizationCost = ICA.skipScalarizationCost();
- VectorType *VecOpTy = nullptr;
- if (!Tys.empty()) {
- // The vector reduction operand is operand 0 except for fadd/fmul.
- // Their operand 0 is a scalar start value, so the vector op is operand 1.
- unsigned VecTyIndex = 0;
- if (IID == Intrinsic::vector_reduce_fadd ||
- IID == Intrinsic::vector_reduce_fmul)
- VecTyIndex = 1;
- assert(Tys.size() > VecTyIndex && "Unexpected IntrinsicCostAttributes");
- VecOpTy = dyn_cast<VectorType>(Tys[VecTyIndex]);
- }
-
- // Library call cost - other than size, make it expensive.
- unsigned SingleCallCost = CostKind == TTI::TCK_CodeSize ? 1 : 10;
+ VectorType *VecOpTy = nullptr;
+ if (!Tys.empty()) {
+ // The vector reduction operand is operand 0 except for fadd/fmul.
+ // Their operand 0 is a scalar start value, so the vector op is operand 1.
+ unsigned VecTyIndex = 0;
+ if (IID == Intrinsic::vector_reduce_fadd ||
+ IID == Intrinsic::vector_reduce_fmul)
+ VecTyIndex = 1;
+ assert(Tys.size() > VecTyIndex && "Unexpected IntrinsicCostAttributes");
+ VecOpTy = dyn_cast<VectorType>(Tys[VecTyIndex]);
+ }
+
+ // Library call cost - other than size, make it expensive.
+ unsigned SingleCallCost = CostKind == TTI::TCK_CodeSize ? 1 : 10;
SmallVector<unsigned, 2> ISDs;
switch (IID) {
default: {
@@ -1483,12 +1483,12 @@ public:
case Intrinsic::maxnum:
ISDs.push_back(ISD::FMAXNUM);
break;
- case Intrinsic::minimum:
- ISDs.push_back(ISD::FMINIMUM);
- break;
- case Intrinsic::maximum:
- ISDs.push_back(ISD::FMAXIMUM);
- break;
+ case Intrinsic::minimum:
+ ISDs.push_back(ISD::FMINIMUM);
+ break;
+ case Intrinsic::maximum:
+ ISDs.push_back(ISD::FMAXIMUM);
+ break;
case Intrinsic::copysign:
ISDs.push_back(ISD::FCOPYSIGN);
break;
@@ -1529,7 +1529,7 @@ public:
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
case Intrinsic::sideeffect:
- case Intrinsic::pseudoprobe:
+ case Intrinsic::pseudoprobe:
return 0;
case Intrinsic::masked_store: {
Type *Ty = Tys[0];
@@ -1543,72 +1543,72 @@ public:
return thisT()->getMaskedMemoryOpCost(Instruction::Load, Ty, TyAlign, 0,
CostKind);
}
- case Intrinsic::vector_reduce_add:
+ case Intrinsic::vector_reduce_add:
return thisT()->getArithmeticReductionCost(Instruction::Add, VecOpTy,
/*IsPairwiseForm=*/false,
CostKind);
- case Intrinsic::vector_reduce_mul:
+ case Intrinsic::vector_reduce_mul:
return thisT()->getArithmeticReductionCost(Instruction::Mul, VecOpTy,
/*IsPairwiseForm=*/false,
CostKind);
- case Intrinsic::vector_reduce_and:
+ case Intrinsic::vector_reduce_and:
return thisT()->getArithmeticReductionCost(Instruction::And, VecOpTy,
/*IsPairwiseForm=*/false,
CostKind);
- case Intrinsic::vector_reduce_or:
+ case Intrinsic::vector_reduce_or:
return thisT()->getArithmeticReductionCost(Instruction::Or, VecOpTy,
/*IsPairwiseForm=*/false,
CostKind);
- case Intrinsic::vector_reduce_xor:
+ case Intrinsic::vector_reduce_xor:
return thisT()->getArithmeticReductionCost(Instruction::Xor, VecOpTy,
/*IsPairwiseForm=*/false,
CostKind);
- case Intrinsic::vector_reduce_fadd:
+ case Intrinsic::vector_reduce_fadd:
// FIXME: Add new flag for cost of strict reductions.
return thisT()->getArithmeticReductionCost(Instruction::FAdd, VecOpTy,
/*IsPairwiseForm=*/false,
CostKind);
- case Intrinsic::vector_reduce_fmul:
+ case Intrinsic::vector_reduce_fmul:
// FIXME: Add new flag for cost of strict reductions.
return thisT()->getArithmeticReductionCost(Instruction::FMul, VecOpTy,
/*IsPairwiseForm=*/false,
CostKind);
- case Intrinsic::vector_reduce_smax:
- case Intrinsic::vector_reduce_smin:
- case Intrinsic::vector_reduce_fmax:
- case Intrinsic::vector_reduce_fmin:
+ case Intrinsic::vector_reduce_smax:
+ case Intrinsic::vector_reduce_smin:
+ case Intrinsic::vector_reduce_fmax:
+ case Intrinsic::vector_reduce_fmin:
return thisT()->getMinMaxReductionCost(
VecOpTy, cast<VectorType>(CmpInst::makeCmpResultType(VecOpTy)),
/*IsPairwiseForm=*/false,
/*IsUnsigned=*/false, CostKind);
- case Intrinsic::vector_reduce_umax:
- case Intrinsic::vector_reduce_umin:
+ case Intrinsic::vector_reduce_umax:
+ case Intrinsic::vector_reduce_umin:
return thisT()->getMinMaxReductionCost(
VecOpTy, cast<VectorType>(CmpInst::makeCmpResultType(VecOpTy)),
/*IsPairwiseForm=*/false,
/*IsUnsigned=*/true, CostKind);
- case Intrinsic::abs:
- case Intrinsic::smax:
- case Intrinsic::smin:
- case Intrinsic::umax:
- case Intrinsic::umin: {
- // abs(X) = select(icmp(X,0),X,sub(0,X))
- // minmax(X,Y) = select(icmp(X,Y),X,Y)
- Type *CondTy = RetTy->getWithNewBitWidth(1);
- unsigned Cost = 0;
- // TODO: Ideally getCmpSelInstrCost would accept an icmp condition code.
- Cost +=
- thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
- CmpInst::BAD_ICMP_PREDICATE, CostKind);
- Cost +=
- thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
- CmpInst::BAD_ICMP_PREDICATE, CostKind);
- // TODO: Should we add an OperandValueProperties::OP_Zero property?
- if (IID == Intrinsic::abs)
- Cost += thisT()->getArithmeticInstrCost(
- BinaryOperator::Sub, RetTy, CostKind, TTI::OK_UniformConstantValue);
- return Cost;
- }
+ case Intrinsic::abs:
+ case Intrinsic::smax:
+ case Intrinsic::smin:
+ case Intrinsic::umax:
+ case Intrinsic::umin: {
+ // abs(X) = select(icmp(X,0),X,sub(0,X))
+ // minmax(X,Y) = select(icmp(X,Y),X,Y)
+ Type *CondTy = RetTy->getWithNewBitWidth(1);
+ unsigned Cost = 0;
+ // TODO: Ideally getCmpSelInstrCost would accept an icmp condition code.
+ Cost +=
+ thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
+ CmpInst::BAD_ICMP_PREDICATE, CostKind);
+ Cost +=
+ thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
+ CmpInst::BAD_ICMP_PREDICATE, CostKind);
+ // TODO: Should we add an OperandValueProperties::OP_Zero property?
+ if (IID == Intrinsic::abs)
+ Cost += thisT()->getArithmeticInstrCost(
+ BinaryOperator::Sub, RetTy, CostKind, TTI::OK_UniformConstantValue);
+ return Cost;
+ }
case Intrinsic::sadd_sat:
case Intrinsic::ssub_sat: {
Type *CondTy = RetTy->getWithNewBitWidth(1);
@@ -1624,12 +1624,12 @@ public:
IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
ScalarizationCostPassed);
Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
- Cost +=
- thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
- CmpInst::BAD_ICMP_PREDICATE, CostKind);
- Cost += 2 * thisT()->getCmpSelInstrCost(
- BinaryOperator::Select, RetTy, CondTy,
- CmpInst::BAD_ICMP_PREDICATE, CostKind);
+ Cost +=
+ thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
+ CmpInst::BAD_ICMP_PREDICATE, CostKind);
+ Cost += 2 * thisT()->getCmpSelInstrCost(
+ BinaryOperator::Select, RetTy, CondTy,
+ CmpInst::BAD_ICMP_PREDICATE, CostKind);
return Cost;
}
case Intrinsic::uadd_sat:
@@ -1645,9 +1645,9 @@ public:
IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
ScalarizationCostPassed);
Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
- Cost +=
- thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
- CmpInst::BAD_ICMP_PREDICATE, CostKind);
+ Cost +=
+ thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
+ CmpInst::BAD_ICMP_PREDICATE, CostKind);
return Cost;
}
case Intrinsic::smul_fix:
@@ -1657,14 +1657,14 @@ public:
unsigned ExtOp =
IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
- TTI::CastContextHint CCH = TTI::CastContextHint::None;
+ TTI::CastContextHint CCH = TTI::CastContextHint::None;
unsigned Cost = 0;
- Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CCH, CostKind);
+ Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CCH, CostKind);
Cost +=
thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy,
- CCH, CostKind);
+ CCH, CostKind);
Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, RetTy,
CostKind, TTI::OK_AnyValue,
TTI::OK_UniformConstantValue);
@@ -1692,12 +1692,12 @@ public:
// Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign)
unsigned Cost = 0;
Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
- Cost += 3 * thisT()->getCmpSelInstrCost(
- Instruction::ICmp, SumTy, OverflowTy,
- CmpInst::BAD_ICMP_PREDICATE, CostKind);
- Cost += 2 * thisT()->getCmpSelInstrCost(
- Instruction::Select, OverflowTy, OverflowTy,
- CmpInst::BAD_ICMP_PREDICATE, CostKind);
+ Cost += 3 * thisT()->getCmpSelInstrCost(
+ Instruction::ICmp, SumTy, OverflowTy,
+ CmpInst::BAD_ICMP_PREDICATE, CostKind);
+ Cost += 2 * thisT()->getCmpSelInstrCost(
+ Instruction::Select, OverflowTy, OverflowTy,
+ CmpInst::BAD_ICMP_PREDICATE, CostKind);
Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, OverflowTy,
CostKind);
return Cost;
@@ -1712,9 +1712,9 @@ public:
unsigned Cost = 0;
Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
- Cost +=
- thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy, OverflowTy,
- CmpInst::BAD_ICMP_PREDICATE, CostKind);
+ Cost +=
+ thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy, OverflowTy,
+ CmpInst::BAD_ICMP_PREDICATE, CostKind);
return Cost;
}
case Intrinsic::smul_with_overflow:
@@ -1726,14 +1726,14 @@ public:
unsigned ExtOp =
IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
- TTI::CastContextHint CCH = TTI::CastContextHint::None;
+ TTI::CastContextHint CCH = TTI::CastContextHint::None;
unsigned Cost = 0;
- Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH, CostKind);
+ Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH, CostKind);
Cost +=
thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
- CCH, CostKind);
+ CCH, CostKind);
Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, MulTy,
CostKind, TTI::OK_AnyValue,
TTI::OK_UniformConstantValue);
@@ -1743,9 +1743,9 @@ public:
CostKind, TTI::OK_AnyValue,
TTI::OK_UniformConstantValue);
- Cost +=
- thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, MulTy, OverflowTy,
- CmpInst::BAD_ICMP_PREDICATE, CostKind);
+ Cost +=
+ thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, MulTy, OverflowTy,
+ CmpInst::BAD_ICMP_PREDICATE, CostKind);
return Cost;
}
case Intrinsic::ctpop:
@@ -1754,12 +1754,12 @@ public:
// library call but still not a cheap instruction.
SingleCallCost = TargetTransformInfo::TCC_Expensive;
break;
- case Intrinsic::ctlz:
- ISDs.push_back(ISD::CTLZ);
- break;
- case Intrinsic::cttz:
- ISDs.push_back(ISD::CTTZ);
- break;
+ case Intrinsic::ctlz:
+ ISDs.push_back(ISD::CTLZ);
+ break;
+ case Intrinsic::cttz:
+ ISDs.push_back(ISD::CTTZ);
+ break;
case Intrinsic::bswap:
ISDs.push_back(ISD::BSWAP);
break;
@@ -1795,7 +1795,7 @@ public:
}
}
- auto *MinLegalCostI = std::min_element(LegalCost.begin(), LegalCost.end());
+ auto *MinLegalCostI = std::min_element(LegalCost.begin(), LegalCost.end());
if (MinLegalCostI != LegalCost.end())
return *MinLegalCostI;
@@ -1992,10 +1992,10 @@ public:
(IsPairwise + 1) * thisT()->getShuffleCost(TTI::SK_ExtractSubvector,
Ty, NumVecElts, SubTy);
MinMaxCost +=
- thisT()->getCmpSelInstrCost(CmpOpcode, SubTy, CondTy,
- CmpInst::BAD_ICMP_PREDICATE, CostKind) +
+ thisT()->getCmpSelInstrCost(CmpOpcode, SubTy, CondTy,
+ CmpInst::BAD_ICMP_PREDICATE, CostKind) +
thisT()->getCmpSelInstrCost(Instruction::Select, SubTy, CondTy,
- CmpInst::BAD_ICMP_PREDICATE, CostKind);
+ CmpInst::BAD_ICMP_PREDICATE, CostKind);
Ty = SubTy;
++LongVectorCount;
}
@@ -2017,37 +2017,37 @@ public:
thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty, 0, Ty);
MinMaxCost +=
NumReduxLevels *
- (thisT()->getCmpSelInstrCost(CmpOpcode, Ty, CondTy,
- CmpInst::BAD_ICMP_PREDICATE, CostKind) +
+ (thisT()->getCmpSelInstrCost(CmpOpcode, Ty, CondTy,
+ CmpInst::BAD_ICMP_PREDICATE, CostKind) +
thisT()->getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
- CmpInst::BAD_ICMP_PREDICATE, CostKind));
+ CmpInst::BAD_ICMP_PREDICATE, CostKind));
// The last min/max should be in vector registers and we counted it above.
// So just need a single extractelement.
return ShuffleCost + MinMaxCost +
thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
}
- InstructionCost getExtendedAddReductionCost(bool IsMLA, bool IsUnsigned,
- Type *ResTy, VectorType *Ty,
- TTI::TargetCostKind CostKind) {
- // Without any native support, this is equivalent to the cost of
- // vecreduce.add(ext) or if IsMLA vecreduce.add(mul(ext, ext))
- VectorType *ExtTy = VectorType::get(ResTy, Ty);
- unsigned RedCost = thisT()->getArithmeticReductionCost(
- Instruction::Add, ExtTy, false, CostKind);
- unsigned MulCost = 0;
- unsigned ExtCost = thisT()->getCastInstrCost(
- IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
- TTI::CastContextHint::None, CostKind);
- if (IsMLA) {
- MulCost =
- thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
- ExtCost *= 2;
- }
-
- return RedCost + MulCost + ExtCost;
- }
-
+ InstructionCost getExtendedAddReductionCost(bool IsMLA, bool IsUnsigned,
+ Type *ResTy, VectorType *Ty,
+ TTI::TargetCostKind CostKind) {
+ // Without any native support, this is equivalent to the cost of
+ // vecreduce.add(ext) or if IsMLA vecreduce.add(mul(ext, ext))
+ VectorType *ExtTy = VectorType::get(ResTy, Ty);
+ unsigned RedCost = thisT()->getArithmeticReductionCost(
+ Instruction::Add, ExtTy, false, CostKind);
+ unsigned MulCost = 0;
+ unsigned ExtCost = thisT()->getCastInstrCost(
+ IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
+ TTI::CastContextHint::None, CostKind);
+ if (IsMLA) {
+ MulCost =
+ thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
+ ExtCost *= 2;
+ }
+
+ return RedCost + MulCost + ExtCost;
+ }
+
unsigned getVectorSplitCost() { return 1; }
/// @}
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/CalcSpillWeights.h b/contrib/libs/llvm12/include/llvm/CodeGen/CalcSpillWeights.h
index defa1590c1..8b7fe2f307 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/CalcSpillWeights.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/CalcSpillWeights.h
@@ -53,57 +53,57 @@ class VirtRegMap;
class VirtRegAuxInfo {
MachineFunction &MF;
LiveIntervals &LIS;
- const VirtRegMap &VRM;
+ const VirtRegMap &VRM;
const MachineLoopInfo &Loops;
const MachineBlockFrequencyInfo &MBFI;
public:
- VirtRegAuxInfo(MachineFunction &MF, LiveIntervals &LIS,
- const VirtRegMap &VRM, const MachineLoopInfo &Loops,
- const MachineBlockFrequencyInfo &MBFI)
- : MF(MF), LIS(LIS), VRM(VRM), Loops(Loops), MBFI(MBFI) {}
-
- virtual ~VirtRegAuxInfo() = default;
+ VirtRegAuxInfo(MachineFunction &MF, LiveIntervals &LIS,
+ const VirtRegMap &VRM, const MachineLoopInfo &Loops,
+ const MachineBlockFrequencyInfo &MBFI)
+ : MF(MF), LIS(LIS), VRM(VRM), Loops(Loops), MBFI(MBFI) {}
+ virtual ~VirtRegAuxInfo() = default;
+
/// (re)compute li's spill weight and allocation hint.
- void calculateSpillWeightAndHint(LiveInterval &LI);
+ void calculateSpillWeightAndHint(LiveInterval &LI);
- /// Compute future expected spill weight of a split artifact of LI
+ /// Compute future expected spill weight of a split artifact of LI
/// that will span between start and end slot indexes.
- /// \param LI The live interval to be split.
- /// \param Start The expected beginning of the split artifact. Instructions
+ /// \param LI The live interval to be split.
+ /// \param Start The expected beginning of the split artifact. Instructions
/// before start will not affect the weight.
- /// \param End The expected end of the split artifact. Instructions
+ /// \param End The expected end of the split artifact. Instructions
/// after end will not affect the weight.
/// \return The expected spill weight of the split artifact. Returns
- /// negative weight for unspillable LI.
- float futureWeight(LiveInterval &LI, SlotIndex Start, SlotIndex End);
-
- /// Compute spill weights and allocation hints for all virtual register
- /// live intervals.
- void calculateSpillWeightsAndHints();
-
- protected:
+ /// negative weight for unspillable LI.
+ float futureWeight(LiveInterval &LI, SlotIndex Start, SlotIndex End);
+
+ /// Compute spill weights and allocation hints for all virtual register
+ /// live intervals.
+ void calculateSpillWeightsAndHints();
+
+ protected:
/// Helper function for weight calculations.
- /// (Re)compute LI's spill weight and allocation hint, or, for non null
+ /// (Re)compute LI's spill weight and allocation hint, or, for non null
/// start and end - compute future expected spill weight of a split
- /// artifact of LI that will span between start and end slot indexes.
- /// \param LI The live interval for which to compute the weight.
- /// \param Start The expected beginning of the split artifact. Instructions
+ /// artifact of LI that will span between start and end slot indexes.
+ /// \param LI The live interval for which to compute the weight.
+ /// \param Start The expected beginning of the split artifact. Instructions
/// before start will not affect the weight. Relevant for
/// weight calculation of future split artifact.
- /// \param End The expected end of the split artifact. Instructions
+ /// \param End The expected end of the split artifact. Instructions
/// after end will not affect the weight. Relevant for
/// weight calculation of future split artifact.
- /// \return The spill weight. Returns negative weight for unspillable LI.
- float weightCalcHelper(LiveInterval &LI, SlotIndex *Start = nullptr,
- SlotIndex *End = nullptr);
-
- /// Weight normalization function.
- virtual float normalize(float UseDefFreq, unsigned Size,
- unsigned NumInstr) {
- return normalizeSpillWeight(UseDefFreq, Size, NumInstr);
- }
+ /// \return The spill weight. Returns negative weight for unspillable LI.
+ float weightCalcHelper(LiveInterval &LI, SlotIndex *Start = nullptr,
+ SlotIndex *End = nullptr);
+
+ /// Weight normalization function.
+ virtual float normalize(float UseDefFreq, unsigned Size,
+ unsigned NumInstr) {
+ return normalizeSpillWeight(UseDefFreq, Size, NumInstr);
+ }
};
} // end namespace llvm
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/CallingConvLower.h b/contrib/libs/llvm12/include/llvm/CodeGen/CallingConvLower.h
index 8383d11725..85263986db 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/CallingConvLower.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/CallingConvLower.h
@@ -23,7 +23,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/Register.h"
+#include "llvm/CodeGen/Register.h"
#include "llvm/CodeGen/TargetCallingConv.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/MC/MCRegisterInfo.h"
@@ -32,7 +32,7 @@
namespace llvm {
class CCState;
-class MachineFunction;
+class MachineFunction;
class MVT;
class TargetRegisterInfo;
@@ -347,11 +347,11 @@ public:
return Regs.size();
}
- void DeallocateReg(MCPhysReg Reg) {
- assert(isAllocated(Reg) && "Trying to deallocate an unallocated register");
- MarkUnallocated(Reg);
- }
-
+ void DeallocateReg(MCPhysReg Reg) {
+ assert(isAllocated(Reg) && "Trying to deallocate an unallocated register");
+ MarkUnallocated(Reg);
+ }
+
/// AllocateReg - Attempt to allocate one register. If it is not available,
/// return zero. Otherwise, return the register, marking it and any aliases
/// as allocated.
@@ -445,7 +445,7 @@ public:
return AllocateStack(Size, Align(Alignment));
}
- void ensureMaxAlignment(Align Alignment);
+ void ensureMaxAlignment(Align Alignment);
/// Version of AllocateStack with extra register to be shadowed.
LLVM_ATTRIBUTE_DEPRECATED(unsigned AllocateStack(unsigned Size,
@@ -582,8 +582,8 @@ public:
private:
/// MarkAllocated - Mark a register and all of its aliases as allocated.
void MarkAllocated(MCPhysReg Reg);
-
- void MarkUnallocated(MCPhysReg Reg);
+
+ void MarkUnallocated(MCPhysReg Reg);
};
} // end namespace llvm
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/CodeGenPassBuilder.h b/contrib/libs/llvm12/include/llvm/CodeGen/CodeGenPassBuilder.h
index cf1633258f..e23ff00b9f 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/CodeGenPassBuilder.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/CodeGenPassBuilder.h
@@ -1,1155 +1,1155 @@
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-parameter"
-#endif
-
-//===- Construction of codegen pass pipelines ------------------*- C++ -*--===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-/// \file
-///
-/// Interfaces for registering analysis passes, producing common pass manager
-/// configurations, and parsing of pass pipelines.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_CODEGENPASSBUILDER_H
-#define LLVM_CODEGEN_CODEGENPASSBUILDER_H
-
-#include "llvm/ADT/FunctionExtras.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/BasicAliasAnalysis.h"
-#include "llvm/Analysis/CFLAndersAliasAnalysis.h"
-#include "llvm/Analysis/CFLSteensAliasAnalysis.h"
-#include "llvm/Analysis/ScopedNoAliasAA.h"
-#include "llvm/Analysis/TargetTransformInfo.h"
-#include "llvm/Analysis/TypeBasedAliasAnalysis.h"
-#include "llvm/CodeGen/ExpandReductions.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/CodeGen/MachinePassManager.h"
-#include "llvm/CodeGen/PreISelIntrinsicLowering.h"
-#include "llvm/CodeGen/UnreachableBlockElim.h"
-#include "llvm/IR/IRPrintingPasses.h"
-#include "llvm/IR/PassManager.h"
-#include "llvm/IR/Verifier.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCTargetOptions.h"
-#include "llvm/Support/CodeGen.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/Error.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Target/CGPassBuilderOption.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Transforms/Scalar.h"
-#include "llvm/Transforms/Scalar/ConstantHoisting.h"
-#include "llvm/Transforms/Scalar/LoopPassManager.h"
-#include "llvm/Transforms/Scalar/LoopStrengthReduce.h"
-#include "llvm/Transforms/Scalar/LowerConstantIntrinsics.h"
-#include "llvm/Transforms/Scalar/MergeICmps.h"
-#include "llvm/Transforms/Scalar/PartiallyInlineLibCalls.h"
-#include "llvm/Transforms/Scalar/ScalarizeMaskedMemIntrin.h"
-#include "llvm/Transforms/Utils.h"
-#include "llvm/Transforms/Utils/EntryExitInstrumenter.h"
-#include "llvm/Transforms/Utils/LowerInvoke.h"
-#include <cassert>
-#include <string>
-#include <type_traits>
-#include <utility>
-
-namespace llvm {
-
-// FIXME: Dummy target independent passes definitions that have not yet been
-// ported to new pass manager. Once they do, remove these.
-#define DUMMY_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
- struct PASS_NAME : public PassInfoMixin<PASS_NAME> { \
- template <typename... Ts> PASS_NAME(Ts &&...) {} \
- PreservedAnalyses run(Function &, FunctionAnalysisManager &) { \
- return PreservedAnalyses::all(); \
- } \
- };
-#define DUMMY_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
- struct PASS_NAME : public PassInfoMixin<PASS_NAME> { \
- template <typename... Ts> PASS_NAME(Ts &&...) {} \
- PreservedAnalyses run(Module &, ModuleAnalysisManager &) { \
- return PreservedAnalyses::all(); \
- } \
- };
-#define DUMMY_MACHINE_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
- struct PASS_NAME : public PassInfoMixin<PASS_NAME> { \
- template <typename... Ts> PASS_NAME(Ts &&...) {} \
- Error run(Module &, MachineFunctionAnalysisManager &) { \
- return Error::success(); \
- } \
- PreservedAnalyses run(MachineFunction &, \
- MachineFunctionAnalysisManager &) { \
- llvm_unreachable("this api is to make new PM api happy"); \
- } \
- static AnalysisKey Key; \
- };
-#define DUMMY_MACHINE_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
- struct PASS_NAME : public PassInfoMixin<PASS_NAME> { \
- template <typename... Ts> PASS_NAME(Ts &&...) {} \
- PreservedAnalyses run(MachineFunction &, \
- MachineFunctionAnalysisManager &) { \
- return PreservedAnalyses::all(); \
- } \
- static AnalysisKey Key; \
- };
-#include "MachinePassRegistry.def"
-
-/// This class provides access to building LLVM's passes.
-///
-/// Its members provide the baseline state available to passes during their
-/// construction. The \c MachinePassRegistry.def file specifies how to construct
-/// all of the built-in passes, and those may reference these members during
-/// construction.
-template <typename DerivedT> class CodeGenPassBuilder {
-public:
- explicit CodeGenPassBuilder(LLVMTargetMachine &TM, CGPassBuilderOption Opts,
- PassInstrumentationCallbacks *PIC)
- : TM(TM), Opt(Opts), PIC(PIC) {
- // Target could set CGPassBuilderOption::MISchedPostRA to true to achieve
- // substitutePass(&PostRASchedulerID, &PostMachineSchedulerID)
-
- // Target should override TM.Options.EnableIPRA in their target-specific
- // LLVMTM ctor. See TargetMachine::setGlobalISel for example.
- if (Opt.EnableIPRA)
- TM.Options.EnableIPRA = *Opt.EnableIPRA;
-
- if (Opt.EnableGlobalISelAbort)
- TM.Options.GlobalISelAbort = *Opt.EnableGlobalISelAbort;
-
- if (!Opt.OptimizeRegAlloc)
- Opt.OptimizeRegAlloc = getOptLevel() != CodeGenOpt::None;
- }
-
- Error buildPipeline(ModulePassManager &MPM, MachineFunctionPassManager &MFPM,
- raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut,
- CodeGenFileType FileType) const;
-
- void registerModuleAnalyses(ModuleAnalysisManager &) const;
- void registerFunctionAnalyses(FunctionAnalysisManager &) const;
- void registerMachineFunctionAnalyses(MachineFunctionAnalysisManager &) const;
- std::pair<StringRef, bool> getPassNameFromLegacyName(StringRef) const;
-
- void registerAnalyses(MachineFunctionAnalysisManager &MFAM) const {
- registerModuleAnalyses(*MFAM.MAM);
- registerFunctionAnalyses(*MFAM.FAM);
- registerMachineFunctionAnalyses(MFAM);
- }
-
- PassInstrumentationCallbacks *getPassInstrumentationCallbacks() const {
- return PIC;
- }
-
-protected:
- template <typename PassT> using has_key_t = decltype(PassT::Key);
-
- template <typename PassT>
- using is_module_pass_t = decltype(std::declval<PassT &>().run(
- std::declval<Module &>(), std::declval<ModuleAnalysisManager &>()));
-
- template <typename PassT>
- using is_function_pass_t = decltype(std::declval<PassT &>().run(
- std::declval<Function &>(), std::declval<FunctionAnalysisManager &>()));
-
- // Function object to maintain state while adding codegen IR passes.
- class AddIRPass {
- public:
- AddIRPass(ModulePassManager &MPM, bool DebugPM, bool Check = true)
- : MPM(MPM), FPM(DebugPM) {
- if (Check)
- AddingFunctionPasses = false;
- }
- ~AddIRPass() {
- MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
- }
-
- // Add Function Pass
- template <typename PassT>
- std::enable_if_t<is_detected<is_function_pass_t, PassT>::value>
- operator()(PassT &&Pass) {
- if (AddingFunctionPasses && !*AddingFunctionPasses)
- AddingFunctionPasses = true;
- FPM.addPass(std::forward<PassT>(Pass));
- }
-
- // Add Module Pass
- template <typename PassT>
- std::enable_if_t<is_detected<is_module_pass_t, PassT>::value &&
- !is_detected<is_function_pass_t, PassT>::value>
- operator()(PassT &&Pass) {
- assert((!AddingFunctionPasses || !*AddingFunctionPasses) &&
- "could not add module pass after adding function pass");
- MPM.addPass(std::forward<PassT>(Pass));
- }
-
- private:
- ModulePassManager &MPM;
- FunctionPassManager FPM;
- // The codegen IR pipeline are mostly function passes with the exceptions of
- // a few loop and module passes. `AddingFunctionPasses` make sures that
- // we could only add module passes at the beginning of the pipeline. Once
- // we begin adding function passes, we could no longer add module passes.
- // This special-casing introduces less adaptor passes. If we have the need
- // of adding module passes after function passes, we could change the
- // implementation to accommodate that.
- Optional<bool> AddingFunctionPasses;
- };
-
- // Function object to maintain state while adding codegen machine passes.
- class AddMachinePass {
- public:
- AddMachinePass(MachineFunctionPassManager &PM) : PM(PM) {}
-
- template <typename PassT> void operator()(PassT &&Pass) {
- static_assert(
- is_detected<has_key_t, PassT>::value,
- "Machine function pass must define a static member variable `Key`.");
- for (auto &C : BeforeCallbacks)
- if (!C(&PassT::Key))
- return;
- PM.addPass(std::forward<PassT>(Pass));
- for (auto &C : AfterCallbacks)
- C(&PassT::Key);
- }
-
- template <typename PassT> void insertPass(AnalysisKey *ID, PassT Pass) {
- AfterCallbacks.emplace_back(
- [this, ID, Pass = std::move(Pass)](AnalysisKey *PassID) {
- if (PassID == ID)
- this->PM.addPass(std::move(Pass));
- });
- }
-
- void disablePass(AnalysisKey *ID) {
- BeforeCallbacks.emplace_back(
- [ID](AnalysisKey *PassID) { return PassID != ID; });
- }
-
- MachineFunctionPassManager releasePM() { return std::move(PM); }
-
- private:
- MachineFunctionPassManager &PM;
- SmallVector<llvm::unique_function<bool(AnalysisKey *)>, 4> BeforeCallbacks;
- SmallVector<llvm::unique_function<void(AnalysisKey *)>, 4> AfterCallbacks;
- };
-
- LLVMTargetMachine &TM;
- CGPassBuilderOption Opt;
- PassInstrumentationCallbacks *PIC;
-
- /// Target override these hooks to parse target-specific analyses.
- void registerTargetAnalysis(ModuleAnalysisManager &) const {}
- void registerTargetAnalysis(FunctionAnalysisManager &) const {}
- void registerTargetAnalysis(MachineFunctionAnalysisManager &) const {}
- std::pair<StringRef, bool> getTargetPassNameFromLegacyName(StringRef) const {
- return {"", false};
- }
-
- template <typename TMC> TMC &getTM() const { return static_cast<TMC &>(TM); }
- CodeGenOpt::Level getOptLevel() const { return TM.getOptLevel(); }
-
- /// Check whether or not GlobalISel should abort on error.
- /// When this is disabled, GlobalISel will fall back on SDISel instead of
- /// erroring out.
- bool isGlobalISelAbortEnabled() const {
- return TM.Options.GlobalISelAbort == GlobalISelAbortMode::Enable;
- }
-
- /// Check whether or not a diagnostic should be emitted when GlobalISel
- /// uses the fallback path. In other words, it will emit a diagnostic
- /// when GlobalISel failed and isGlobalISelAbortEnabled is false.
- bool reportDiagnosticWhenGlobalISelFallback() const {
- return TM.Options.GlobalISelAbort == GlobalISelAbortMode::DisableWithDiag;
- }
-
- /// addInstSelector - This method should install an instruction selector pass,
- /// which converts from LLVM code to machine instructions.
- Error addInstSelector(AddMachinePass &) const {
- return make_error<StringError>("addInstSelector is not overridden",
- inconvertibleErrorCode());
- }
-
- /// Add passes that optimize instruction level parallelism for out-of-order
- /// targets. These passes are run while the machine code is still in SSA
- /// form, so they can use MachineTraceMetrics to control their heuristics.
- ///
- /// All passes added here should preserve the MachineDominatorTree,
- /// MachineLoopInfo, and MachineTraceMetrics analyses.
- void addILPOpts(AddMachinePass &) const {}
-
- /// This method may be implemented by targets that want to run passes
- /// immediately before register allocation.
- void addPreRegAlloc(AddMachinePass &) const {}
-
- /// addPreRewrite - Add passes to the optimized register allocation pipeline
- /// after register allocation is complete, but before virtual registers are
- /// rewritten to physical registers.
- ///
- /// These passes must preserve VirtRegMap and LiveIntervals, and when running
- /// after RABasic or RAGreedy, they should take advantage of LiveRegMatrix.
- /// When these passes run, VirtRegMap contains legal physreg assignments for
- /// all virtual registers.
- ///
- /// Note if the target overloads addRegAssignAndRewriteOptimized, this may not
- /// be honored. This is also not generally used for the the fast variant,
- /// where the allocation and rewriting are done in one pass.
- void addPreRewrite(AddMachinePass &) const {}
-
- /// Add passes to be run immediately after virtual registers are rewritten
- /// to physical registers.
- void addPostRewrite(AddMachinePass &) const {}
-
- /// This method may be implemented by targets that want to run passes after
- /// register allocation pass pipeline but before prolog-epilog insertion.
- void addPostRegAlloc(AddMachinePass &) const {}
-
- /// This method may be implemented by targets that want to run passes after
- /// prolog-epilog insertion and before the second instruction scheduling pass.
- void addPreSched2(AddMachinePass &) const {}
-
- /// This pass may be implemented by targets that want to run passes
- /// immediately before machine code is emitted.
- void addPreEmitPass(AddMachinePass &) const {}
-
- /// Targets may add passes immediately before machine code is emitted in this
- /// callback. This is called even later than `addPreEmitPass`.
- // FIXME: Rename `addPreEmitPass` to something more sensible given its actual
- // position and remove the `2` suffix here as this callback is what
- // `addPreEmitPass` *should* be but in reality isn't.
- void addPreEmitPass2(AddMachinePass &) const {}
-
- /// {{@ For GlobalISel
- ///
-
- /// addPreISel - This method should add any "last minute" LLVM->LLVM
- /// passes (which are run just before instruction selector).
- void addPreISel(AddIRPass &) const {
- llvm_unreachable("addPreISel is not overridden");
- }
-
- /// This method should install an IR translator pass, which converts from
- /// LLVM code to machine instructions with possibly generic opcodes.
- Error addIRTranslator(AddMachinePass &) const {
- return make_error<StringError>("addIRTranslator is not overridden",
- inconvertibleErrorCode());
- }
-
- /// This method may be implemented by targets that want to run passes
- /// immediately before legalization.
- void addPreLegalizeMachineIR(AddMachinePass &) const {}
-
- /// This method should install a legalize pass, which converts the instruction
- /// sequence into one that can be selected by the target.
- Error addLegalizeMachineIR(AddMachinePass &) const {
- return make_error<StringError>("addLegalizeMachineIR is not overridden",
- inconvertibleErrorCode());
- }
-
- /// This method may be implemented by targets that want to run passes
- /// immediately before the register bank selection.
- void addPreRegBankSelect(AddMachinePass &) const {}
-
- /// This method should install a register bank selector pass, which
- /// assigns register banks to virtual registers without a register
- /// class or register banks.
- Error addRegBankSelect(AddMachinePass &) const {
- return make_error<StringError>("addRegBankSelect is not overridden",
- inconvertibleErrorCode());
- }
-
- /// This method may be implemented by targets that want to run passes
- /// immediately before the (global) instruction selection.
- void addPreGlobalInstructionSelect(AddMachinePass &) const {}
-
- /// This method should install a (global) instruction selector pass, which
- /// converts possibly generic instructions to fully target-specific
- /// instructions, thereby constraining all generic virtual registers to
- /// register classes.
- Error addGlobalInstructionSelect(AddMachinePass &) const {
- return make_error<StringError>(
- "addGlobalInstructionSelect is not overridden",
- inconvertibleErrorCode());
- }
- /// @}}
-
- /// High level function that adds all passes necessary to go from llvm IR
- /// representation to the MI representation.
- /// Adds IR based lowering and target specific optimization passes and finally
- /// the core instruction selection passes.
- /// \returns true if an error occurred, false otherwise.
- void addISelPasses(AddIRPass &) const;
-
- /// Add the actual instruction selection passes. This does not include
- /// preparation passes on IR.
- Error addCoreISelPasses(AddMachinePass &) const;
-
- /// Add the complete, standard set of LLVM CodeGen passes.
- /// Fully developed targets will not generally override this.
- Error addMachinePasses(AddMachinePass &) const;
-
- /// Add passes to lower exception handling for the code generator.
- void addPassesToHandleExceptions(AddIRPass &) const;
-
- /// Add common target configurable passes that perform LLVM IR to IR
- /// transforms following machine independent optimization.
- void addIRPasses(AddIRPass &) const;
-
- /// Add pass to prepare the LLVM IR for code generation. This should be done
- /// before exception handling preparation passes.
- void addCodeGenPrepare(AddIRPass &) const;
-
- /// Add common passes that perform LLVM IR to IR transforms in preparation for
- /// instruction selection.
- void addISelPrepare(AddIRPass &) const;
-
- /// Methods with trivial inline returns are convenient points in the common
- /// codegen pass pipeline where targets may insert passes. Methods with
- /// out-of-line standard implementations are major CodeGen stages called by
- /// addMachinePasses. Some targets may override major stages when inserting
- /// passes is insufficient, but maintaining overriden stages is more work.
- ///
-
- /// addMachineSSAOptimization - Add standard passes that optimize machine
- /// instructions in SSA form.
- void addMachineSSAOptimization(AddMachinePass &) const;
-
- /// addFastRegAlloc - Add the minimum set of target-independent passes that
- /// are required for fast register allocation.
- Error addFastRegAlloc(AddMachinePass &) const;
-
- /// addOptimizedRegAlloc - Add passes related to register allocation.
- /// LLVMTargetMachine provides standard regalloc passes for most targets.
- void addOptimizedRegAlloc(AddMachinePass &) const;
-
- /// Add passes that optimize machine instructions after register allocation.
- void addMachineLateOptimization(AddMachinePass &) const;
-
- /// addGCPasses - Add late codegen passes that analyze code for garbage
- /// collection. This should return true if GC info should be printed after
- /// these passes.
- void addGCPasses(AddMachinePass &) const {}
-
- /// Add standard basic block placement passes.
- void addBlockPlacement(AddMachinePass &) const;
-
- using CreateMCStreamer =
- std::function<Expected<std::unique_ptr<MCStreamer>>(MCContext &)>;
- void addAsmPrinter(AddMachinePass &, CreateMCStreamer) const {
- llvm_unreachable("addAsmPrinter is not overridden");
- }
-
- /// Utilities for targets to add passes to the pass manager.
- ///
-
- /// createTargetRegisterAllocator - Create the register allocator pass for
- /// this target at the current optimization level.
- void addTargetRegisterAllocator(AddMachinePass &, bool Optimized) const;
-
- /// addMachinePasses helper to create the target-selected or overriden
- /// regalloc pass.
- void addRegAllocPass(AddMachinePass &, bool Optimized) const;
-
- /// Add core register alloator passes which do the actual register assignment
- /// and rewriting. \returns true if any passes were added.
- Error addRegAssignmentFast(AddMachinePass &) const;
- Error addRegAssignmentOptimized(AddMachinePass &) const;
-
-private:
- DerivedT &derived() { return static_cast<DerivedT &>(*this); }
- const DerivedT &derived() const {
- return static_cast<const DerivedT &>(*this);
- }
-};
-
-template <typename Derived>
-Error CodeGenPassBuilder<Derived>::buildPipeline(
- ModulePassManager &MPM, MachineFunctionPassManager &MFPM,
- raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut,
- CodeGenFileType FileType) const {
- AddIRPass addIRPass(MPM, Opt.DebugPM);
- addISelPasses(addIRPass);
-
- AddMachinePass addPass(MFPM);
- if (auto Err = addCoreISelPasses(addPass))
- return std::move(Err);
-
- if (auto Err = derived().addMachinePasses(addPass))
- return std::move(Err);
-
- derived().addAsmPrinter(
- addPass, [this, &Out, DwoOut, FileType](MCContext &Ctx) {
- return this->TM.createMCStreamer(Out, DwoOut, FileType, Ctx);
- });
-
- addPass(FreeMachineFunctionPass());
- return Error::success();
-}
-
-static inline AAManager registerAAAnalyses(CFLAAType UseCFLAA) {
- AAManager AA;
-
- // The order in which these are registered determines their priority when
- // being queried.
-
- switch (UseCFLAA) {
- case CFLAAType::Steensgaard:
- AA.registerFunctionAnalysis<CFLSteensAA>();
- break;
- case CFLAAType::Andersen:
- AA.registerFunctionAnalysis<CFLAndersAA>();
- break;
- case CFLAAType::Both:
- AA.registerFunctionAnalysis<CFLAndersAA>();
- AA.registerFunctionAnalysis<CFLSteensAA>();
- break;
- default:
- break;
- }
-
- // Basic AliasAnalysis support.
- // Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that
- // BasicAliasAnalysis wins if they disagree. This is intended to help
- // support "obvious" type-punning idioms.
- AA.registerFunctionAnalysis<TypeBasedAA>();
- AA.registerFunctionAnalysis<ScopedNoAliasAA>();
- AA.registerFunctionAnalysis<BasicAA>();
-
- return AA;
-}
-
-template <typename Derived>
-void CodeGenPassBuilder<Derived>::registerModuleAnalyses(
- ModuleAnalysisManager &MAM) const {
-#define MODULE_ANALYSIS(NAME, PASS_NAME, CONSTRUCTOR) \
- MAM.registerPass([&] { return PASS_NAME CONSTRUCTOR; });
-#include "MachinePassRegistry.def"
- derived().registerTargetAnalysis(MAM);
-}
-
-template <typename Derived>
-void CodeGenPassBuilder<Derived>::registerFunctionAnalyses(
- FunctionAnalysisManager &FAM) const {
- FAM.registerPass([this] { return registerAAAnalyses(this->Opt.UseCFLAA); });
-
-#define FUNCTION_ANALYSIS(NAME, PASS_NAME, CONSTRUCTOR) \
- FAM.registerPass([&] { return PASS_NAME CONSTRUCTOR; });
-#include "MachinePassRegistry.def"
- derived().registerTargetAnalysis(FAM);
-}
-
-template <typename Derived>
-void CodeGenPassBuilder<Derived>::registerMachineFunctionAnalyses(
- MachineFunctionAnalysisManager &MFAM) const {
-#define MACHINE_FUNCTION_ANALYSIS(NAME, PASS_NAME, CONSTRUCTOR) \
- MFAM.registerPass([&] { return PASS_NAME CONSTRUCTOR; });
-#include "MachinePassRegistry.def"
- derived().registerTargetAnalysis(MFAM);
-}
-
-// FIXME: For new PM, use pass name directly in commandline seems good.
-// Translate stringfied pass name to its old commandline name. Returns the
-// matching legacy name and a boolean value indicating if the pass is a machine
-// pass.
-template <typename Derived>
-std::pair<StringRef, bool>
-CodeGenPassBuilder<Derived>::getPassNameFromLegacyName(StringRef Name) const {
- std::pair<StringRef, bool> Ret;
- if (Name.empty())
- return Ret;
-
-#define FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
- if (Name == NAME) \
- Ret = {#PASS_NAME, false};
-#define DUMMY_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
- if (Name == NAME) \
- Ret = {#PASS_NAME, false};
-#define MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
- if (Name == NAME) \
- Ret = {#PASS_NAME, false};
-#define DUMMY_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
- if (Name == NAME) \
- Ret = {#PASS_NAME, false};
-#define MACHINE_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
- if (Name == NAME) \
- Ret = {#PASS_NAME, true};
-#define DUMMY_MACHINE_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
- if (Name == NAME) \
- Ret = {#PASS_NAME, true};
-#define MACHINE_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
- if (Name == NAME) \
- Ret = {#PASS_NAME, true};
-#define DUMMY_MACHINE_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
- if (Name == NAME) \
- Ret = {#PASS_NAME, true};
-#include "llvm/CodeGen/MachinePassRegistry.def"
-
- if (Ret.first.empty())
- Ret = derived().getTargetPassNameFromLegacyName(Name);
-
- if (Ret.first.empty())
- report_fatal_error(Twine('\"') + Twine(Name) +
- Twine("\" pass could not be found."));
-
- return Ret;
-}
-
-template <typename Derived>
-void CodeGenPassBuilder<Derived>::addISelPasses(AddIRPass &addPass) const {
- if (TM.useEmulatedTLS())
- addPass(LowerEmuTLSPass());
-
- addPass(PreISelIntrinsicLoweringPass());
-
- derived().addIRPasses(addPass);
- derived().addCodeGenPrepare(addPass);
- addPassesToHandleExceptions(addPass);
- derived().addISelPrepare(addPass);
-}
-
-/// Add common target configurable passes that perform LLVM IR to IR transforms
-/// following machine independent optimization.
-template <typename Derived>
-void CodeGenPassBuilder<Derived>::addIRPasses(AddIRPass &addPass) const {
- // Before running any passes, run the verifier to determine if the input
- // coming from the front-end and/or optimizer is valid.
- if (!Opt.DisableVerify)
- addPass(VerifierPass());
-
- // Run loop strength reduction before anything else.
- if (getOptLevel() != CodeGenOpt::None && !Opt.DisableLSR) {
- addPass(createFunctionToLoopPassAdaptor(
- LoopStrengthReducePass(), /*UseMemorySSA*/ true, Opt.DebugPM));
- // FIXME: use -stop-after so we could remove PrintLSR
- if (Opt.PrintLSR)
- addPass(PrintFunctionPass(dbgs(), "\n\n*** Code after LSR ***\n"));
- }
-
- if (getOptLevel() != CodeGenOpt::None) {
- // The MergeICmpsPass tries to create memcmp calls by grouping sequences of
- // loads and compares. ExpandMemCmpPass then tries to expand those calls
- // into optimally-sized loads and compares. The transforms are enabled by a
- // target lowering hook.
- if (!Opt.DisableMergeICmps)
- addPass(MergeICmpsPass());
- addPass(ExpandMemCmpPass());
- }
-
- // Run GC lowering passes for builtin collectors
- // TODO: add a pass insertion point here
- addPass(GCLoweringPass());
- addPass(ShadowStackGCLoweringPass());
- addPass(LowerConstantIntrinsicsPass());
-
- // Make sure that no unreachable blocks are instruction selected.
- addPass(UnreachableBlockElimPass());
-
- // Prepare expensive constants for SelectionDAG.
- if (getOptLevel() != CodeGenOpt::None && !Opt.DisableConstantHoisting)
- addPass(ConstantHoistingPass());
-
- if (getOptLevel() != CodeGenOpt::None && !Opt.DisablePartialLibcallInlining)
- addPass(PartiallyInlineLibCallsPass());
-
- // Instrument function entry and exit, e.g. with calls to mcount().
- addPass(EntryExitInstrumenterPass(/*PostInlining=*/true));
-
- // Add scalarization of target's unsupported masked memory intrinsics pass.
- // the unsupported intrinsic will be replaced with a chain of basic blocks,
- // that stores/loads element one-by-one if the appropriate mask bit is set.
- addPass(ScalarizeMaskedMemIntrinPass());
-
- // Expand reduction intrinsics into shuffle sequences if the target wants to.
- addPass(ExpandReductionsPass());
-}
-
-/// Turn exception handling constructs into something the code generators can
-/// handle.
-template <typename Derived>
-void CodeGenPassBuilder<Derived>::addPassesToHandleExceptions(
- AddIRPass &addPass) const {
- const MCAsmInfo *MCAI = TM.getMCAsmInfo();
- assert(MCAI && "No MCAsmInfo");
- switch (MCAI->getExceptionHandlingType()) {
- case ExceptionHandling::SjLj:
- // SjLj piggy-backs on dwarf for this bit. The cleanups done apply to both
- // Dwarf EH prepare needs to be run after SjLj prepare. Otherwise,
- // catch info can get misplaced when a selector ends up more than one block
- // removed from the parent invoke(s). This could happen when a landing
- // pad is shared by multiple invokes and is also a target of a normal
- // edge from elsewhere.
- addPass(SjLjEHPreparePass());
- LLVM_FALLTHROUGH;
- case ExceptionHandling::DwarfCFI:
- case ExceptionHandling::ARM:
- case ExceptionHandling::AIX:
- addPass(DwarfEHPass(getOptLevel()));
- break;
- case ExceptionHandling::WinEH:
- // We support using both GCC-style and MSVC-style exceptions on Windows, so
- // add both preparation passes. Each pass will only actually run if it
- // recognizes the personality function.
- addPass(WinEHPass());
- addPass(DwarfEHPass(getOptLevel()));
- break;
- case ExceptionHandling::Wasm:
- // Wasm EH uses Windows EH instructions, but it does not need to demote PHIs
- // on catchpads and cleanuppads because it does not outline them into
- // funclets. Catchswitch blocks are not lowered in SelectionDAG, so we
- // should remove PHIs there.
- addPass(WinEHPass(/*DemoteCatchSwitchPHIOnly=*/false));
- addPass(WasmEHPass());
- break;
- case ExceptionHandling::None:
- addPass(LowerInvokePass());
-
- // The lower invoke pass may create unreachable code. Remove it.
- addPass(UnreachableBlockElimPass());
- break;
- }
-}
-
-/// Add pass to prepare the LLVM IR for code generation. This should be done
-/// before exception handling preparation passes.
-template <typename Derived>
-void CodeGenPassBuilder<Derived>::addCodeGenPrepare(AddIRPass &addPass) const {
- if (getOptLevel() != CodeGenOpt::None && !Opt.DisableCGP)
- addPass(CodeGenPreparePass());
- // TODO: Default ctor'd RewriteSymbolPass is no-op.
- // addPass(RewriteSymbolPass());
-}
-
-/// Add common passes that perform LLVM IR to IR transforms in preparation for
-/// instruction selection.
-template <typename Derived>
-void CodeGenPassBuilder<Derived>::addISelPrepare(AddIRPass &addPass) const {
- derived().addPreISel(addPass);
-
- // Add both the safe stack and the stack protection passes: each of them will
- // only protect functions that have corresponding attributes.
- addPass(SafeStackPass());
- addPass(StackProtectorPass());
-
- if (Opt.PrintISelInput)
- addPass(PrintFunctionPass(dbgs(),
- "\n\n*** Final LLVM Code input to ISel ***\n"));
-
- // All passes which modify the LLVM IR are now complete; run the verifier
- // to ensure that the IR is valid.
- if (!Opt.DisableVerify)
- addPass(VerifierPass());
-}
-
-template <typename Derived>
-Error CodeGenPassBuilder<Derived>::addCoreISelPasses(
- AddMachinePass &addPass) const {
- // Enable FastISel with -fast-isel, but allow that to be overridden.
- TM.setO0WantsFastISel(Opt.EnableFastISelOption.getValueOr(true));
-
- // Determine an instruction selector.
- enum class SelectorType { SelectionDAG, FastISel, GlobalISel };
- SelectorType Selector;
-
- if (Opt.EnableFastISelOption && *Opt.EnableFastISelOption == true)
- Selector = SelectorType::FastISel;
- else if ((Opt.EnableGlobalISelOption &&
- *Opt.EnableGlobalISelOption == true) ||
- (TM.Options.EnableGlobalISel &&
- (!Opt.EnableGlobalISelOption ||
- *Opt.EnableGlobalISelOption == false)))
- Selector = SelectorType::GlobalISel;
- else if (TM.getOptLevel() == CodeGenOpt::None && TM.getO0WantsFastISel())
- Selector = SelectorType::FastISel;
- else
- Selector = SelectorType::SelectionDAG;
-
- // Set consistently TM.Options.EnableFastISel and EnableGlobalISel.
- if (Selector == SelectorType::FastISel) {
- TM.setFastISel(true);
- TM.setGlobalISel(false);
- } else if (Selector == SelectorType::GlobalISel) {
- TM.setFastISel(false);
- TM.setGlobalISel(true);
- }
-
- // Add instruction selector passes.
- if (Selector == SelectorType::GlobalISel) {
- if (auto Err = derived().addIRTranslator(addPass))
- return std::move(Err);
-
- derived().addPreLegalizeMachineIR(addPass);
-
- if (auto Err = derived().addLegalizeMachineIR(addPass))
- return std::move(Err);
-
- // Before running the register bank selector, ask the target if it
- // wants to run some passes.
- derived().addPreRegBankSelect(addPass);
-
- if (auto Err = derived().addRegBankSelect(addPass))
- return std::move(Err);
-
- derived().addPreGlobalInstructionSelect(addPass);
-
- if (auto Err = derived().addGlobalInstructionSelect(addPass))
- return std::move(Err);
-
- // Pass to reset the MachineFunction if the ISel failed.
- addPass(ResetMachineFunctionPass(reportDiagnosticWhenGlobalISelFallback(),
- isGlobalISelAbortEnabled()));
-
- // Provide a fallback path when we do not want to abort on
- // not-yet-supported input.
- if (!isGlobalISelAbortEnabled())
- if (auto Err = derived().addInstSelector(addPass))
- return std::move(Err);
-
- } else if (auto Err = derived().addInstSelector(addPass))
- return std::move(Err);
-
- // Expand pseudo-instructions emitted by ISel. Don't run the verifier before
- // FinalizeISel.
- addPass(FinalizeISelPass());
-
- // // Print the instruction selected machine code...
- // printAndVerify("After Instruction Selection");
-
- return Error::success();
-}
-
-/// Add the complete set of target-independent postISel code generator passes.
-///
-/// This can be read as the standard order of major LLVM CodeGen stages. Stages
-/// with nontrivial configuration or multiple passes are broken out below in
-/// add%Stage routines.
-///
-/// Any CodeGenPassBuilder<Derived>::addXX routine may be overriden by the
-/// Target. The addPre/Post methods with empty header implementations allow
-/// injecting target-specific fixups just before or after major stages.
-/// Additionally, targets have the flexibility to change pass order within a
-/// stage by overriding default implementation of add%Stage routines below. Each
-/// technique has maintainability tradeoffs because alternate pass orders are
-/// not well supported. addPre/Post works better if the target pass is easily
-/// tied to a common pass. But if it has subtle dependencies on multiple passes,
-/// the target should override the stage instead.
-template <typename Derived>
-Error CodeGenPassBuilder<Derived>::addMachinePasses(
- AddMachinePass &addPass) const {
- // Add passes that optimize machine instructions in SSA form.
- if (getOptLevel() != CodeGenOpt::None) {
- derived().addMachineSSAOptimization(addPass);
- } else {
- // If the target requests it, assign local variables to stack slots relative
- // to one another and simplify frame index references where possible.
- addPass(LocalStackSlotPass());
- }
-
- if (TM.Options.EnableIPRA)
- addPass(RegUsageInfoPropagationPass());
-
- // Run pre-ra passes.
- derived().addPreRegAlloc(addPass);
-
- // Run register allocation and passes that are tightly coupled with it,
- // including phi elimination and scheduling.
- if (*Opt.OptimizeRegAlloc) {
- derived().addOptimizedRegAlloc(addPass);
- } else {
- if (auto Err = derived().addFastRegAlloc(addPass))
- return Err;
- }
-
- // Run post-ra passes.
- derived().addPostRegAlloc(addPass);
-
- // Insert prolog/epilog code. Eliminate abstract frame index references...
- if (getOptLevel() != CodeGenOpt::None) {
- addPass(PostRAMachineSinkingPass());
- addPass(ShrinkWrapPass());
- }
-
- addPass(PrologEpilogInserterPass());
-
- /// Add passes that optimize machine instructions after register allocation.
- if (getOptLevel() != CodeGenOpt::None)
- derived().addMachineLateOptimization(addPass);
-
- // Expand pseudo instructions before second scheduling pass.
- addPass(ExpandPostRAPseudosPass());
-
- // Run pre-sched2 passes.
- derived().addPreSched2(addPass);
-
- if (Opt.EnableImplicitNullChecks)
- addPass(ImplicitNullChecksPass());
-
- // Second pass scheduler.
- // Let Target optionally insert this pass by itself at some other
- // point.
- if (getOptLevel() != CodeGenOpt::None &&
- !TM.targetSchedulesPostRAScheduling()) {
- if (Opt.MISchedPostRA)
- addPass(PostMachineSchedulerPass());
- else
- addPass(PostRASchedulerPass());
- }
-
- // GC
- derived().addGCPasses(addPass);
-
- // Basic block placement.
- if (getOptLevel() != CodeGenOpt::None)
- derived().addBlockPlacement(addPass);
-
- // Insert before XRay Instrumentation.
- addPass(FEntryInserterPass());
-
- addPass(XRayInstrumentationPass());
- addPass(PatchableFunctionPass());
-
- derived().addPreEmitPass(addPass);
-
- if (TM.Options.EnableIPRA)
- // Collect register usage information and produce a register mask of
- // clobbered registers, to be used to optimize call sites.
- addPass(RegUsageInfoCollectorPass());
-
- addPass(FuncletLayoutPass());
-
- addPass(StackMapLivenessPass());
- addPass(LiveDebugValuesPass());
-
- if (TM.Options.EnableMachineOutliner && getOptLevel() != CodeGenOpt::None &&
- Opt.EnableMachineOutliner != RunOutliner::NeverOutline) {
- bool RunOnAllFunctions =
- (Opt.EnableMachineOutliner == RunOutliner::AlwaysOutline);
- bool AddOutliner = RunOnAllFunctions || TM.Options.SupportsDefaultOutlining;
- if (AddOutliner)
- addPass(MachineOutlinerPass(RunOnAllFunctions));
- }
-
- // Add passes that directly emit MI after all other MI passes.
- derived().addPreEmitPass2(addPass);
-
- return Error::success();
-}
-
-/// Add passes that optimize machine instructions in SSA form.
-template <typename Derived>
-void CodeGenPassBuilder<Derived>::addMachineSSAOptimization(
- AddMachinePass &addPass) const {
- // Pre-ra tail duplication.
- addPass(EarlyTailDuplicatePass());
-
- // Optimize PHIs before DCE: removing dead PHI cycles may make more
- // instructions dead.
- addPass(OptimizePHIsPass());
-
- // This pass merges large allocas. StackSlotColoring is a different pass
- // which merges spill slots.
- addPass(StackColoringPass());
-
- // If the target requests it, assign local variables to stack slots relative
- // to one another and simplify frame index references where possible.
- addPass(LocalStackSlotPass());
-
- // With optimization, dead code should already be eliminated. However
- // there is one known exception: lowered code for arguments that are only
- // used by tail calls, where the tail calls reuse the incoming stack
- // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll).
- addPass(DeadMachineInstructionElimPass());
-
- // Allow targets to insert passes that improve instruction level parallelism,
- // like if-conversion. Such passes will typically need dominator trees and
- // loop info, just like LICM and CSE below.
- derived().addILPOpts(addPass);
-
- addPass(EarlyMachineLICMPass());
- addPass(MachineCSEPass());
-
- addPass(MachineSinkingPass());
-
- addPass(PeepholeOptimizerPass());
- // Clean-up the dead code that may have been generated by peephole
- // rewriting.
- addPass(DeadMachineInstructionElimPass());
-}
-
-//===---------------------------------------------------------------------===//
-/// Register Allocation Pass Configuration
-//===---------------------------------------------------------------------===//
-
-/// Instantiate the default register allocator pass for this target for either
-/// the optimized or unoptimized allocation path. This will be added to the pass
-/// manager by addFastRegAlloc in the unoptimized case or addOptimizedRegAlloc
-/// in the optimized case.
-///
-/// A target that uses the standard regalloc pass order for fast or optimized
-/// allocation may still override this for per-target regalloc
-/// selection. But -regalloc=... always takes precedence.
-template <typename Derived>
-void CodeGenPassBuilder<Derived>::addTargetRegisterAllocator(
- AddMachinePass &addPass, bool Optimized) const {
- if (Optimized)
- addPass(RAGreedyPass());
- else
- addPass(RAFastPass());
-}
-
-/// Find and instantiate the register allocation pass requested by this target
-/// at the current optimization level. Different register allocators are
-/// defined as separate passes because they may require different analysis.
-template <typename Derived>
-void CodeGenPassBuilder<Derived>::addRegAllocPass(AddMachinePass &addPass,
- bool Optimized) const {
- if (Opt.RegAlloc == RegAllocType::Default)
- // With no -regalloc= override, ask the target for a regalloc pass.
- derived().addTargetRegisterAllocator(addPass, Optimized);
- else if (Opt.RegAlloc == RegAllocType::Basic)
- addPass(RABasicPass());
- else if (Opt.RegAlloc == RegAllocType::Fast)
- addPass(RAFastPass());
- else if (Opt.RegAlloc == RegAllocType::Greedy)
- addPass(RAGreedyPass());
- else if (Opt.RegAlloc == RegAllocType::PBQP)
- addPass(RAPBQPPass());
- else
- llvm_unreachable("unknonwn register allocator type");
-}
-
-template <typename Derived>
-Error CodeGenPassBuilder<Derived>::addRegAssignmentFast(
- AddMachinePass &addPass) const {
- if (Opt.RegAlloc != RegAllocType::Default &&
- Opt.RegAlloc != RegAllocType::Fast)
- return make_error<StringError>(
- "Must use fast (default) register allocator for unoptimized regalloc.",
- inconvertibleErrorCode());
-
- addRegAllocPass(addPass, false);
- return Error::success();
-}
-
-template <typename Derived>
-Error CodeGenPassBuilder<Derived>::addRegAssignmentOptimized(
- AddMachinePass &addPass) const {
- // Add the selected register allocation pass.
- addRegAllocPass(addPass, true);
-
- // Allow targets to change the register assignments before rewriting.
- derived().addPreRewrite(addPass);
-
- // Finally rewrite virtual registers.
- addPass(VirtRegRewriterPass());
- // Perform stack slot coloring and post-ra machine LICM.
- //
- // FIXME: Re-enable coloring with register when it's capable of adding
- // kill markers.
- addPass(StackSlotColoringPass());
-
- return Error::success();
-}
-
-/// Add the minimum set of target-independent passes that are required for
-/// register allocation. No coalescing or scheduling.
-template <typename Derived>
-Error CodeGenPassBuilder<Derived>::addFastRegAlloc(
- AddMachinePass &addPass) const {
- addPass(PHIEliminationPass());
- addPass(TwoAddressInstructionPass());
- return derived().addRegAssignmentFast(addPass);
-}
-
-/// Add standard target-independent passes that are tightly coupled with
-/// optimized register allocation, including coalescing, machine instruction
-/// scheduling, and register allocation itself.
-template <typename Derived>
-void CodeGenPassBuilder<Derived>::addOptimizedRegAlloc(
- AddMachinePass &addPass) const {
- addPass(DetectDeadLanesPass());
-
- addPass(ProcessImplicitDefsPass());
-
- // Edge splitting is smarter with machine loop info.
- addPass(PHIEliminationPass());
-
- // Eventually, we want to run LiveIntervals before PHI elimination.
- if (Opt.EarlyLiveIntervals)
- addPass(LiveIntervalsPass());
-
- addPass(TwoAddressInstructionPass());
- addPass(RegisterCoalescerPass());
-
- // The machine scheduler may accidentally create disconnected components
- // when moving subregister definitions around, avoid this by splitting them to
- // separate vregs before. Splitting can also improve reg. allocation quality.
- addPass(RenameIndependentSubregsPass());
-
- // PreRA instruction scheduling.
- addPass(MachineSchedulerPass());
-
- if (derived().addRegAssignmentOptimized(addPass)) {
- // Allow targets to expand pseudo instructions depending on the choice of
- // registers before MachineCopyPropagation.
- derived().addPostRewrite(addPass);
-
- // Copy propagate to forward register uses and try to eliminate COPYs that
- // were not coalesced.
- addPass(MachineCopyPropagationPass());
-
- // Run post-ra machine LICM to hoist reloads / remats.
- //
- // FIXME: can this move into MachineLateOptimization?
- addPass(MachineLICMPass());
- }
-}
-
-//===---------------------------------------------------------------------===//
-/// Post RegAlloc Pass Configuration
-//===---------------------------------------------------------------------===//
-
-/// Add passes that optimize machine instructions after register allocation.
-template <typename Derived>
-void CodeGenPassBuilder<Derived>::addMachineLateOptimization(
- AddMachinePass &addPass) const {
- // Branch folding must be run after regalloc and prolog/epilog insertion.
- addPass(BranchFolderPass());
-
- // Tail duplication.
- // Note that duplicating tail just increases code size and degrades
- // performance for targets that require Structured Control Flow.
- // In addition it can also make CFG irreducible. Thus we disable it.
- if (!TM.requiresStructuredCFG())
- addPass(TailDuplicatePass());
-
- // Copy propagation.
- addPass(MachineCopyPropagationPass());
-}
-
-/// Add standard basic block placement passes.
-template <typename Derived>
-void CodeGenPassBuilder<Derived>::addBlockPlacement(
- AddMachinePass &addPass) const {
- addPass(MachineBlockPlacementPass());
- // Run a separate pass to collect block placement statistics.
- if (Opt.EnableBlockPlacementStats)
- addPass(MachineBlockPlacementStatsPass());
-}
-
-} // namespace llvm
-
-#endif // LLVM_CODEGEN_CODEGENPASSBUILDER_H
-
-#ifdef __GNUC__
-#pragma GCC diagnostic pop
-#endif
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- Construction of codegen pass pipelines ------------------*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// Interfaces for registering analysis passes, producing common pass manager
+/// configurations, and parsing of pass pipelines.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_CODEGENPASSBUILDER_H
+#define LLVM_CODEGEN_CODEGENPASSBUILDER_H
+
+#include "llvm/ADT/FunctionExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/BasicAliasAnalysis.h"
+#include "llvm/Analysis/CFLAndersAliasAnalysis.h"
+#include "llvm/Analysis/CFLSteensAliasAnalysis.h"
+#include "llvm/Analysis/ScopedNoAliasAA.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Analysis/TypeBasedAliasAnalysis.h"
+#include "llvm/CodeGen/ExpandReductions.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachinePassManager.h"
+#include "llvm/CodeGen/PreISelIntrinsicLowering.h"
+#include "llvm/CodeGen/UnreachableBlockElim.h"
+#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCTargetOptions.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Target/CGPassBuilderOption.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Scalar/ConstantHoisting.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+#include "llvm/Transforms/Scalar/LoopStrengthReduce.h"
+#include "llvm/Transforms/Scalar/LowerConstantIntrinsics.h"
+#include "llvm/Transforms/Scalar/MergeICmps.h"
+#include "llvm/Transforms/Scalar/PartiallyInlineLibCalls.h"
+#include "llvm/Transforms/Scalar/ScalarizeMaskedMemIntrin.h"
+#include "llvm/Transforms/Utils.h"
+#include "llvm/Transforms/Utils/EntryExitInstrumenter.h"
+#include "llvm/Transforms/Utils/LowerInvoke.h"
+#include <cassert>
+#include <string>
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+
+// FIXME: Dummy target independent passes definitions that have not yet been
+// ported to new pass manager. Once they do, remove these.
+#define DUMMY_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
+ struct PASS_NAME : public PassInfoMixin<PASS_NAME> { \
+ template <typename... Ts> PASS_NAME(Ts &&...) {} \
+ PreservedAnalyses run(Function &, FunctionAnalysisManager &) { \
+ return PreservedAnalyses::all(); \
+ } \
+ };
+#define DUMMY_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
+ struct PASS_NAME : public PassInfoMixin<PASS_NAME> { \
+ template <typename... Ts> PASS_NAME(Ts &&...) {} \
+ PreservedAnalyses run(Module &, ModuleAnalysisManager &) { \
+ return PreservedAnalyses::all(); \
+ } \
+ };
+#define DUMMY_MACHINE_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
+ struct PASS_NAME : public PassInfoMixin<PASS_NAME> { \
+ template <typename... Ts> PASS_NAME(Ts &&...) {} \
+ Error run(Module &, MachineFunctionAnalysisManager &) { \
+ return Error::success(); \
+ } \
+ PreservedAnalyses run(MachineFunction &, \
+ MachineFunctionAnalysisManager &) { \
+ llvm_unreachable("this api is to make new PM api happy"); \
+ } \
+ static AnalysisKey Key; \
+ };
+#define DUMMY_MACHINE_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
+ struct PASS_NAME : public PassInfoMixin<PASS_NAME> { \
+ template <typename... Ts> PASS_NAME(Ts &&...) {} \
+ PreservedAnalyses run(MachineFunction &, \
+ MachineFunctionAnalysisManager &) { \
+ return PreservedAnalyses::all(); \
+ } \
+ static AnalysisKey Key; \
+ };
+#include "MachinePassRegistry.def"
+
+/// This class provides access to building LLVM's passes.
+///
+/// Its members provide the baseline state available to passes during their
+/// construction. The \c MachinePassRegistry.def file specifies how to construct
+/// all of the built-in passes, and those may reference these members during
+/// construction.
+template <typename DerivedT> class CodeGenPassBuilder {
+public:
+ explicit CodeGenPassBuilder(LLVMTargetMachine &TM, CGPassBuilderOption Opts,
+ PassInstrumentationCallbacks *PIC)
+ : TM(TM), Opt(Opts), PIC(PIC) {
+ // Target could set CGPassBuilderOption::MISchedPostRA to true to achieve
+ // substitutePass(&PostRASchedulerID, &PostMachineSchedulerID)
+
+ // Target should override TM.Options.EnableIPRA in their target-specific
+ // LLVMTM ctor. See TargetMachine::setGlobalISel for example.
+ if (Opt.EnableIPRA)
+ TM.Options.EnableIPRA = *Opt.EnableIPRA;
+
+ if (Opt.EnableGlobalISelAbort)
+ TM.Options.GlobalISelAbort = *Opt.EnableGlobalISelAbort;
+
+ if (!Opt.OptimizeRegAlloc)
+ Opt.OptimizeRegAlloc = getOptLevel() != CodeGenOpt::None;
+ }
+
+ Error buildPipeline(ModulePassManager &MPM, MachineFunctionPassManager &MFPM,
+ raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut,
+ CodeGenFileType FileType) const;
+
+ void registerModuleAnalyses(ModuleAnalysisManager &) const;
+ void registerFunctionAnalyses(FunctionAnalysisManager &) const;
+ void registerMachineFunctionAnalyses(MachineFunctionAnalysisManager &) const;
+ std::pair<StringRef, bool> getPassNameFromLegacyName(StringRef) const;
+
+ void registerAnalyses(MachineFunctionAnalysisManager &MFAM) const {
+ registerModuleAnalyses(*MFAM.MAM);
+ registerFunctionAnalyses(*MFAM.FAM);
+ registerMachineFunctionAnalyses(MFAM);
+ }
+
+ PassInstrumentationCallbacks *getPassInstrumentationCallbacks() const {
+ return PIC;
+ }
+
+protected:
+ template <typename PassT> using has_key_t = decltype(PassT::Key);
+
+ template <typename PassT>
+ using is_module_pass_t = decltype(std::declval<PassT &>().run(
+ std::declval<Module &>(), std::declval<ModuleAnalysisManager &>()));
+
+ template <typename PassT>
+ using is_function_pass_t = decltype(std::declval<PassT &>().run(
+ std::declval<Function &>(), std::declval<FunctionAnalysisManager &>()));
+
+ // Function object to maintain state while adding codegen IR passes.
+ class AddIRPass {
+ public:
+ AddIRPass(ModulePassManager &MPM, bool DebugPM, bool Check = true)
+ : MPM(MPM), FPM(DebugPM) {
+ if (Check)
+ AddingFunctionPasses = false;
+ }
+ ~AddIRPass() {
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
+ }
+
+ // Add Function Pass
+ template <typename PassT>
+ std::enable_if_t<is_detected<is_function_pass_t, PassT>::value>
+ operator()(PassT &&Pass) {
+ if (AddingFunctionPasses && !*AddingFunctionPasses)
+ AddingFunctionPasses = true;
+ FPM.addPass(std::forward<PassT>(Pass));
+ }
+
+ // Add Module Pass
+ template <typename PassT>
+ std::enable_if_t<is_detected<is_module_pass_t, PassT>::value &&
+ !is_detected<is_function_pass_t, PassT>::value>
+ operator()(PassT &&Pass) {
+ assert((!AddingFunctionPasses || !*AddingFunctionPasses) &&
+ "could not add module pass after adding function pass");
+ MPM.addPass(std::forward<PassT>(Pass));
+ }
+
+ private:
+ ModulePassManager &MPM;
+ FunctionPassManager FPM;
+ // The codegen IR pipeline are mostly function passes with the exceptions of
+ // a few loop and module passes. `AddingFunctionPasses` make sures that
+ // we could only add module passes at the beginning of the pipeline. Once
+ // we begin adding function passes, we could no longer add module passes.
+ // This special-casing introduces less adaptor passes. If we have the need
+ // of adding module passes after function passes, we could change the
+ // implementation to accommodate that.
+ Optional<bool> AddingFunctionPasses;
+ };
+
+ // Function object to maintain state while adding codegen machine passes.
+ class AddMachinePass {
+ public:
+ AddMachinePass(MachineFunctionPassManager &PM) : PM(PM) {}
+
+ template <typename PassT> void operator()(PassT &&Pass) {
+ static_assert(
+ is_detected<has_key_t, PassT>::value,
+ "Machine function pass must define a static member variable `Key`.");
+ for (auto &C : BeforeCallbacks)
+ if (!C(&PassT::Key))
+ return;
+ PM.addPass(std::forward<PassT>(Pass));
+ for (auto &C : AfterCallbacks)
+ C(&PassT::Key);
+ }
+
+ template <typename PassT> void insertPass(AnalysisKey *ID, PassT Pass) {
+ AfterCallbacks.emplace_back(
+ [this, ID, Pass = std::move(Pass)](AnalysisKey *PassID) {
+ if (PassID == ID)
+ this->PM.addPass(std::move(Pass));
+ });
+ }
+
+ void disablePass(AnalysisKey *ID) {
+ BeforeCallbacks.emplace_back(
+ [ID](AnalysisKey *PassID) { return PassID != ID; });
+ }
+
+ MachineFunctionPassManager releasePM() { return std::move(PM); }
+
+ private:
+ MachineFunctionPassManager &PM;
+ SmallVector<llvm::unique_function<bool(AnalysisKey *)>, 4> BeforeCallbacks;
+ SmallVector<llvm::unique_function<void(AnalysisKey *)>, 4> AfterCallbacks;
+ };
+
+ LLVMTargetMachine &TM;
+ CGPassBuilderOption Opt;
+ PassInstrumentationCallbacks *PIC;
+
+ /// Target override these hooks to parse target-specific analyses.
+ void registerTargetAnalysis(ModuleAnalysisManager &) const {}
+ void registerTargetAnalysis(FunctionAnalysisManager &) const {}
+ void registerTargetAnalysis(MachineFunctionAnalysisManager &) const {}
+ std::pair<StringRef, bool> getTargetPassNameFromLegacyName(StringRef) const {
+ return {"", false};
+ }
+
+ template <typename TMC> TMC &getTM() const { return static_cast<TMC &>(TM); }
+ CodeGenOpt::Level getOptLevel() const { return TM.getOptLevel(); }
+
+ /// Check whether or not GlobalISel should abort on error.
+ /// When this is disabled, GlobalISel will fall back on SDISel instead of
+ /// erroring out.
+ bool isGlobalISelAbortEnabled() const {
+ return TM.Options.GlobalISelAbort == GlobalISelAbortMode::Enable;
+ }
+
+ /// Check whether or not a diagnostic should be emitted when GlobalISel
+ /// uses the fallback path. In other words, it will emit a diagnostic
+ /// when GlobalISel failed and isGlobalISelAbortEnabled is false.
+ bool reportDiagnosticWhenGlobalISelFallback() const {
+ return TM.Options.GlobalISelAbort == GlobalISelAbortMode::DisableWithDiag;
+ }
+
+ /// addInstSelector - This method should install an instruction selector pass,
+ /// which converts from LLVM code to machine instructions.
+ Error addInstSelector(AddMachinePass &) const {
+ return make_error<StringError>("addInstSelector is not overridden",
+ inconvertibleErrorCode());
+ }
+
+ /// Add passes that optimize instruction level parallelism for out-of-order
+ /// targets. These passes are run while the machine code is still in SSA
+ /// form, so they can use MachineTraceMetrics to control their heuristics.
+ ///
+ /// All passes added here should preserve the MachineDominatorTree,
+ /// MachineLoopInfo, and MachineTraceMetrics analyses.
+ void addILPOpts(AddMachinePass &) const {}
+
+ /// This method may be implemented by targets that want to run passes
+ /// immediately before register allocation.
+ void addPreRegAlloc(AddMachinePass &) const {}
+
+ /// addPreRewrite - Add passes to the optimized register allocation pipeline
+ /// after register allocation is complete, but before virtual registers are
+ /// rewritten to physical registers.
+ ///
+ /// These passes must preserve VirtRegMap and LiveIntervals, and when running
+ /// after RABasic or RAGreedy, they should take advantage of LiveRegMatrix.
+ /// When these passes run, VirtRegMap contains legal physreg assignments for
+ /// all virtual registers.
+ ///
+ /// Note if the target overloads addRegAssignAndRewriteOptimized, this may not
+ /// be honored. This is also not generally used for the the fast variant,
+ /// where the allocation and rewriting are done in one pass.
+ void addPreRewrite(AddMachinePass &) const {}
+
+ /// Add passes to be run immediately after virtual registers are rewritten
+ /// to physical registers.
+ void addPostRewrite(AddMachinePass &) const {}
+
+ /// This method may be implemented by targets that want to run passes after
+ /// register allocation pass pipeline but before prolog-epilog insertion.
+ void addPostRegAlloc(AddMachinePass &) const {}
+
+ /// This method may be implemented by targets that want to run passes after
+ /// prolog-epilog insertion and before the second instruction scheduling pass.
+ void addPreSched2(AddMachinePass &) const {}
+
+ /// This pass may be implemented by targets that want to run passes
+ /// immediately before machine code is emitted.
+ void addPreEmitPass(AddMachinePass &) const {}
+
+ /// Targets may add passes immediately before machine code is emitted in this
+ /// callback. This is called even later than `addPreEmitPass`.
+ // FIXME: Rename `addPreEmitPass` to something more sensible given its actual
+ // position and remove the `2` suffix here as this callback is what
+ // `addPreEmitPass` *should* be but in reality isn't.
+ void addPreEmitPass2(AddMachinePass &) const {}
+
+ /// {{@ For GlobalISel
+ ///
+
+ /// addPreISel - This method should add any "last minute" LLVM->LLVM
+ /// passes (which are run just before instruction selector).
+ void addPreISel(AddIRPass &) const {
+ llvm_unreachable("addPreISel is not overridden");
+ }
+
+ /// This method should install an IR translator pass, which converts from
+ /// LLVM code to machine instructions with possibly generic opcodes.
+ Error addIRTranslator(AddMachinePass &) const {
+ return make_error<StringError>("addIRTranslator is not overridden",
+ inconvertibleErrorCode());
+ }
+
+ /// This method may be implemented by targets that want to run passes
+ /// immediately before legalization.
+ void addPreLegalizeMachineIR(AddMachinePass &) const {}
+
+ /// This method should install a legalize pass, which converts the instruction
+ /// sequence into one that can be selected by the target.
+ Error addLegalizeMachineIR(AddMachinePass &) const {
+ return make_error<StringError>("addLegalizeMachineIR is not overridden",
+ inconvertibleErrorCode());
+ }
+
+ /// This method may be implemented by targets that want to run passes
+ /// immediately before the register bank selection.
+ void addPreRegBankSelect(AddMachinePass &) const {}
+
+ /// This method should install a register bank selector pass, which
+ /// assigns register banks to virtual registers without a register
+ /// class or register banks.
+ Error addRegBankSelect(AddMachinePass &) const {
+ return make_error<StringError>("addRegBankSelect is not overridden",
+ inconvertibleErrorCode());
+ }
+
+ /// This method may be implemented by targets that want to run passes
+ /// immediately before the (global) instruction selection.
+ void addPreGlobalInstructionSelect(AddMachinePass &) const {}
+
+ /// This method should install a (global) instruction selector pass, which
+ /// converts possibly generic instructions to fully target-specific
+ /// instructions, thereby constraining all generic virtual registers to
+ /// register classes.
+ Error addGlobalInstructionSelect(AddMachinePass &) const {
+ return make_error<StringError>(
+ "addGlobalInstructionSelect is not overridden",
+ inconvertibleErrorCode());
+ }
+ /// @}}
+
+ /// High level function that adds all passes necessary to go from llvm IR
+ /// representation to the MI representation.
+ /// Adds IR based lowering and target specific optimization passes and finally
+ /// the core instruction selection passes.
+ /// \returns true if an error occurred, false otherwise.
+ void addISelPasses(AddIRPass &) const;
+
+ /// Add the actual instruction selection passes. This does not include
+ /// preparation passes on IR.
+ Error addCoreISelPasses(AddMachinePass &) const;
+
+ /// Add the complete, standard set of LLVM CodeGen passes.
+ /// Fully developed targets will not generally override this.
+ Error addMachinePasses(AddMachinePass &) const;
+
+ /// Add passes to lower exception handling for the code generator.
+ void addPassesToHandleExceptions(AddIRPass &) const;
+
+ /// Add common target configurable passes that perform LLVM IR to IR
+ /// transforms following machine independent optimization.
+ void addIRPasses(AddIRPass &) const;
+
+ /// Add pass to prepare the LLVM IR for code generation. This should be done
+ /// before exception handling preparation passes.
+ void addCodeGenPrepare(AddIRPass &) const;
+
+ /// Add common passes that perform LLVM IR to IR transforms in preparation for
+ /// instruction selection.
+ void addISelPrepare(AddIRPass &) const;
+
+ /// Methods with trivial inline returns are convenient points in the common
+ /// codegen pass pipeline where targets may insert passes. Methods with
+ /// out-of-line standard implementations are major CodeGen stages called by
+ /// addMachinePasses. Some targets may override major stages when inserting
+ /// passes is insufficient, but maintaining overriden stages is more work.
+ ///
+
+ /// addMachineSSAOptimization - Add standard passes that optimize machine
+ /// instructions in SSA form.
+ void addMachineSSAOptimization(AddMachinePass &) const;
+
+ /// addFastRegAlloc - Add the minimum set of target-independent passes that
+ /// are required for fast register allocation.
+ Error addFastRegAlloc(AddMachinePass &) const;
+
+ /// addOptimizedRegAlloc - Add passes related to register allocation.
+ /// LLVMTargetMachine provides standard regalloc passes for most targets.
+ void addOptimizedRegAlloc(AddMachinePass &) const;
+
+ /// Add passes that optimize machine instructions after register allocation.
+ void addMachineLateOptimization(AddMachinePass &) const;
+
+ /// addGCPasses - Add late codegen passes that analyze code for garbage
+ /// collection. This should return true if GC info should be printed after
+ /// these passes.
+ void addGCPasses(AddMachinePass &) const {}
+
+ /// Add standard basic block placement passes.
+ void addBlockPlacement(AddMachinePass &) const;
+
+ using CreateMCStreamer =
+ std::function<Expected<std::unique_ptr<MCStreamer>>(MCContext &)>;
+ void addAsmPrinter(AddMachinePass &, CreateMCStreamer) const {
+ llvm_unreachable("addAsmPrinter is not overridden");
+ }
+
+ /// Utilities for targets to add passes to the pass manager.
+ ///
+
+ /// createTargetRegisterAllocator - Create the register allocator pass for
+ /// this target at the current optimization level.
+ void addTargetRegisterAllocator(AddMachinePass &, bool Optimized) const;
+
+ /// addMachinePasses helper to create the target-selected or overriden
+ /// regalloc pass.
+ void addRegAllocPass(AddMachinePass &, bool Optimized) const;
+
+ /// Add core register alloator passes which do the actual register assignment
+ /// and rewriting. \returns true if any passes were added.
+ Error addRegAssignmentFast(AddMachinePass &) const;
+ Error addRegAssignmentOptimized(AddMachinePass &) const;
+
+private:
+ DerivedT &derived() { return static_cast<DerivedT &>(*this); }
+ const DerivedT &derived() const {
+ return static_cast<const DerivedT &>(*this);
+ }
+};
+
+template <typename Derived>
+Error CodeGenPassBuilder<Derived>::buildPipeline(
+ ModulePassManager &MPM, MachineFunctionPassManager &MFPM,
+ raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut,
+ CodeGenFileType FileType) const {
+ AddIRPass addIRPass(MPM, Opt.DebugPM);
+ addISelPasses(addIRPass);
+
+ AddMachinePass addPass(MFPM);
+ if (auto Err = addCoreISelPasses(addPass))
+ return std::move(Err);
+
+ if (auto Err = derived().addMachinePasses(addPass))
+ return std::move(Err);
+
+ derived().addAsmPrinter(
+ addPass, [this, &Out, DwoOut, FileType](MCContext &Ctx) {
+ return this->TM.createMCStreamer(Out, DwoOut, FileType, Ctx);
+ });
+
+ addPass(FreeMachineFunctionPass());
+ return Error::success();
+}
+
+static inline AAManager registerAAAnalyses(CFLAAType UseCFLAA) {
+ AAManager AA;
+
+ // The order in which these are registered determines their priority when
+ // being queried.
+
+ switch (UseCFLAA) {
+ case CFLAAType::Steensgaard:
+ AA.registerFunctionAnalysis<CFLSteensAA>();
+ break;
+ case CFLAAType::Andersen:
+ AA.registerFunctionAnalysis<CFLAndersAA>();
+ break;
+ case CFLAAType::Both:
+ AA.registerFunctionAnalysis<CFLAndersAA>();
+ AA.registerFunctionAnalysis<CFLSteensAA>();
+ break;
+ default:
+ break;
+ }
+
+ // Basic AliasAnalysis support.
+ // Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that
+ // BasicAliasAnalysis wins if they disagree. This is intended to help
+ // support "obvious" type-punning idioms.
+ AA.registerFunctionAnalysis<TypeBasedAA>();
+ AA.registerFunctionAnalysis<ScopedNoAliasAA>();
+ AA.registerFunctionAnalysis<BasicAA>();
+
+ return AA;
+}
+
+template <typename Derived>
+void CodeGenPassBuilder<Derived>::registerModuleAnalyses(
+ ModuleAnalysisManager &MAM) const {
+#define MODULE_ANALYSIS(NAME, PASS_NAME, CONSTRUCTOR) \
+ MAM.registerPass([&] { return PASS_NAME CONSTRUCTOR; });
+#include "MachinePassRegistry.def"
+ derived().registerTargetAnalysis(MAM);
+}
+
+template <typename Derived>
+void CodeGenPassBuilder<Derived>::registerFunctionAnalyses(
+ FunctionAnalysisManager &FAM) const {
+ FAM.registerPass([this] { return registerAAAnalyses(this->Opt.UseCFLAA); });
+
+#define FUNCTION_ANALYSIS(NAME, PASS_NAME, CONSTRUCTOR) \
+ FAM.registerPass([&] { return PASS_NAME CONSTRUCTOR; });
+#include "MachinePassRegistry.def"
+ derived().registerTargetAnalysis(FAM);
+}
+
+template <typename Derived>
+void CodeGenPassBuilder<Derived>::registerMachineFunctionAnalyses(
+ MachineFunctionAnalysisManager &MFAM) const {
+#define MACHINE_FUNCTION_ANALYSIS(NAME, PASS_NAME, CONSTRUCTOR) \
+ MFAM.registerPass([&] { return PASS_NAME CONSTRUCTOR; });
+#include "MachinePassRegistry.def"
+ derived().registerTargetAnalysis(MFAM);
+}
+
+// FIXME: For new PM, use pass name directly in commandline seems good.
+// Translate stringfied pass name to its old commandline name. Returns the
+// matching legacy name and a boolean value indicating if the pass is a machine
+// pass.
+template <typename Derived>
+std::pair<StringRef, bool>
+CodeGenPassBuilder<Derived>::getPassNameFromLegacyName(StringRef Name) const {
+ std::pair<StringRef, bool> Ret;
+ if (Name.empty())
+ return Ret;
+
+#define FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
+ if (Name == NAME) \
+ Ret = {#PASS_NAME, false};
+#define DUMMY_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
+ if (Name == NAME) \
+ Ret = {#PASS_NAME, false};
+#define MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
+ if (Name == NAME) \
+ Ret = {#PASS_NAME, false};
+#define DUMMY_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
+ if (Name == NAME) \
+ Ret = {#PASS_NAME, false};
+#define MACHINE_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
+ if (Name == NAME) \
+ Ret = {#PASS_NAME, true};
+#define DUMMY_MACHINE_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
+ if (Name == NAME) \
+ Ret = {#PASS_NAME, true};
+#define MACHINE_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
+ if (Name == NAME) \
+ Ret = {#PASS_NAME, true};
+#define DUMMY_MACHINE_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR) \
+ if (Name == NAME) \
+ Ret = {#PASS_NAME, true};
+#include "llvm/CodeGen/MachinePassRegistry.def"
+
+ if (Ret.first.empty())
+ Ret = derived().getTargetPassNameFromLegacyName(Name);
+
+ if (Ret.first.empty())
+ report_fatal_error(Twine('\"') + Twine(Name) +
+ Twine("\" pass could not be found."));
+
+ return Ret;
+}
+
+template <typename Derived>
+void CodeGenPassBuilder<Derived>::addISelPasses(AddIRPass &addPass) const {
+ if (TM.useEmulatedTLS())
+ addPass(LowerEmuTLSPass());
+
+ addPass(PreISelIntrinsicLoweringPass());
+
+ derived().addIRPasses(addPass);
+ derived().addCodeGenPrepare(addPass);
+ addPassesToHandleExceptions(addPass);
+ derived().addISelPrepare(addPass);
+}
+
+/// Add common target configurable passes that perform LLVM IR to IR transforms
+/// following machine independent optimization.
+template <typename Derived>
+void CodeGenPassBuilder<Derived>::addIRPasses(AddIRPass &addPass) const {
+ // Before running any passes, run the verifier to determine if the input
+ // coming from the front-end and/or optimizer is valid.
+ if (!Opt.DisableVerify)
+ addPass(VerifierPass());
+
+ // Run loop strength reduction before anything else.
+ if (getOptLevel() != CodeGenOpt::None && !Opt.DisableLSR) {
+ addPass(createFunctionToLoopPassAdaptor(
+ LoopStrengthReducePass(), /*UseMemorySSA*/ true, Opt.DebugPM));
+ // FIXME: use -stop-after so we could remove PrintLSR
+ if (Opt.PrintLSR)
+ addPass(PrintFunctionPass(dbgs(), "\n\n*** Code after LSR ***\n"));
+ }
+
+ if (getOptLevel() != CodeGenOpt::None) {
+ // The MergeICmpsPass tries to create memcmp calls by grouping sequences of
+ // loads and compares. ExpandMemCmpPass then tries to expand those calls
+ // into optimally-sized loads and compares. The transforms are enabled by a
+ // target lowering hook.
+ if (!Opt.DisableMergeICmps)
+ addPass(MergeICmpsPass());
+ addPass(ExpandMemCmpPass());
+ }
+
+ // Run GC lowering passes for builtin collectors
+ // TODO: add a pass insertion point here
+ addPass(GCLoweringPass());
+ addPass(ShadowStackGCLoweringPass());
+ addPass(LowerConstantIntrinsicsPass());
+
+ // Make sure that no unreachable blocks are instruction selected.
+ addPass(UnreachableBlockElimPass());
+
+ // Prepare expensive constants for SelectionDAG.
+ if (getOptLevel() != CodeGenOpt::None && !Opt.DisableConstantHoisting)
+ addPass(ConstantHoistingPass());
+
+ if (getOptLevel() != CodeGenOpt::None && !Opt.DisablePartialLibcallInlining)
+ addPass(PartiallyInlineLibCallsPass());
+
+ // Instrument function entry and exit, e.g. with calls to mcount().
+ addPass(EntryExitInstrumenterPass(/*PostInlining=*/true));
+
+ // Add scalarization of target's unsupported masked memory intrinsics pass.
+ // the unsupported intrinsic will be replaced with a chain of basic blocks,
+ // that stores/loads element one-by-one if the appropriate mask bit is set.
+ addPass(ScalarizeMaskedMemIntrinPass());
+
+ // Expand reduction intrinsics into shuffle sequences if the target wants to.
+ addPass(ExpandReductionsPass());
+}
+
+/// Turn exception handling constructs into something the code generators can
+/// handle.
+template <typename Derived>
+void CodeGenPassBuilder<Derived>::addPassesToHandleExceptions(
+ AddIRPass &addPass) const {
+ const MCAsmInfo *MCAI = TM.getMCAsmInfo();
+ assert(MCAI && "No MCAsmInfo");
+ switch (MCAI->getExceptionHandlingType()) {
+ case ExceptionHandling::SjLj:
+ // SjLj piggy-backs on dwarf for this bit. The cleanups done apply to both
+ // Dwarf EH prepare needs to be run after SjLj prepare. Otherwise,
+ // catch info can get misplaced when a selector ends up more than one block
+ // removed from the parent invoke(s). This could happen when a landing
+ // pad is shared by multiple invokes and is also a target of a normal
+ // edge from elsewhere.
+ addPass(SjLjEHPreparePass());
+ LLVM_FALLTHROUGH;
+ case ExceptionHandling::DwarfCFI:
+ case ExceptionHandling::ARM:
+ case ExceptionHandling::AIX:
+ addPass(DwarfEHPass(getOptLevel()));
+ break;
+ case ExceptionHandling::WinEH:
+ // We support using both GCC-style and MSVC-style exceptions on Windows, so
+ // add both preparation passes. Each pass will only actually run if it
+ // recognizes the personality function.
+ addPass(WinEHPass());
+ addPass(DwarfEHPass(getOptLevel()));
+ break;
+ case ExceptionHandling::Wasm:
+ // Wasm EH uses Windows EH instructions, but it does not need to demote PHIs
+ // on catchpads and cleanuppads because it does not outline them into
+ // funclets. Catchswitch blocks are not lowered in SelectionDAG, so we
+ // should remove PHIs there.
+ addPass(WinEHPass(/*DemoteCatchSwitchPHIOnly=*/false));
+ addPass(WasmEHPass());
+ break;
+ case ExceptionHandling::None:
+ addPass(LowerInvokePass());
+
+ // The lower invoke pass may create unreachable code. Remove it.
+ addPass(UnreachableBlockElimPass());
+ break;
+ }
+}
+
+/// Add pass to prepare the LLVM IR for code generation. This should be done
+/// before exception handling preparation passes.
+template <typename Derived>
+void CodeGenPassBuilder<Derived>::addCodeGenPrepare(AddIRPass &addPass) const {
+ if (getOptLevel() != CodeGenOpt::None && !Opt.DisableCGP)
+ addPass(CodeGenPreparePass());
+ // TODO: Default ctor'd RewriteSymbolPass is no-op.
+ // addPass(RewriteSymbolPass());
+}
+
+/// Add common passes that perform LLVM IR to IR transforms in preparation for
+/// instruction selection.
+template <typename Derived>
+void CodeGenPassBuilder<Derived>::addISelPrepare(AddIRPass &addPass) const {
+ derived().addPreISel(addPass);
+
+ // Add both the safe stack and the stack protection passes: each of them will
+ // only protect functions that have corresponding attributes.
+ addPass(SafeStackPass());
+ addPass(StackProtectorPass());
+
+ if (Opt.PrintISelInput)
+ addPass(PrintFunctionPass(dbgs(),
+ "\n\n*** Final LLVM Code input to ISel ***\n"));
+
+ // All passes which modify the LLVM IR are now complete; run the verifier
+ // to ensure that the IR is valid.
+ if (!Opt.DisableVerify)
+ addPass(VerifierPass());
+}
+
+template <typename Derived>
+Error CodeGenPassBuilder<Derived>::addCoreISelPasses(
+ AddMachinePass &addPass) const {
+ // Enable FastISel with -fast-isel, but allow that to be overridden.
+ TM.setO0WantsFastISel(Opt.EnableFastISelOption.getValueOr(true));
+
+ // Determine an instruction selector.
+ enum class SelectorType { SelectionDAG, FastISel, GlobalISel };
+ SelectorType Selector;
+
+ if (Opt.EnableFastISelOption && *Opt.EnableFastISelOption == true)
+ Selector = SelectorType::FastISel;
+ else if ((Opt.EnableGlobalISelOption &&
+ *Opt.EnableGlobalISelOption == true) ||
+ (TM.Options.EnableGlobalISel &&
+ (!Opt.EnableGlobalISelOption ||
+ *Opt.EnableGlobalISelOption == false)))
+ Selector = SelectorType::GlobalISel;
+ else if (TM.getOptLevel() == CodeGenOpt::None && TM.getO0WantsFastISel())
+ Selector = SelectorType::FastISel;
+ else
+ Selector = SelectorType::SelectionDAG;
+
+ // Set consistently TM.Options.EnableFastISel and EnableGlobalISel.
+ if (Selector == SelectorType::FastISel) {
+ TM.setFastISel(true);
+ TM.setGlobalISel(false);
+ } else if (Selector == SelectorType::GlobalISel) {
+ TM.setFastISel(false);
+ TM.setGlobalISel(true);
+ }
+
+ // Add instruction selector passes.
+ if (Selector == SelectorType::GlobalISel) {
+ if (auto Err = derived().addIRTranslator(addPass))
+ return std::move(Err);
+
+ derived().addPreLegalizeMachineIR(addPass);
+
+ if (auto Err = derived().addLegalizeMachineIR(addPass))
+ return std::move(Err);
+
+ // Before running the register bank selector, ask the target if it
+ // wants to run some passes.
+ derived().addPreRegBankSelect(addPass);
+
+ if (auto Err = derived().addRegBankSelect(addPass))
+ return std::move(Err);
+
+ derived().addPreGlobalInstructionSelect(addPass);
+
+ if (auto Err = derived().addGlobalInstructionSelect(addPass))
+ return std::move(Err);
+
+ // Pass to reset the MachineFunction if the ISel failed.
+ addPass(ResetMachineFunctionPass(reportDiagnosticWhenGlobalISelFallback(),
+ isGlobalISelAbortEnabled()));
+
+ // Provide a fallback path when we do not want to abort on
+ // not-yet-supported input.
+ if (!isGlobalISelAbortEnabled())
+ if (auto Err = derived().addInstSelector(addPass))
+ return std::move(Err);
+
+ } else if (auto Err = derived().addInstSelector(addPass))
+ return std::move(Err);
+
+ // Expand pseudo-instructions emitted by ISel. Don't run the verifier before
+ // FinalizeISel.
+ addPass(FinalizeISelPass());
+
+ // // Print the instruction selected machine code...
+ // printAndVerify("After Instruction Selection");
+
+ return Error::success();
+}
+
+/// Add the complete set of target-independent postISel code generator passes.
+///
+/// This can be read as the standard order of major LLVM CodeGen stages. Stages
+/// with nontrivial configuration or multiple passes are broken out below in
+/// add%Stage routines.
+///
+/// Any CodeGenPassBuilder<Derived>::addXX routine may be overriden by the
+/// Target. The addPre/Post methods with empty header implementations allow
+/// injecting target-specific fixups just before or after major stages.
+/// Additionally, targets have the flexibility to change pass order within a
+/// stage by overriding default implementation of add%Stage routines below. Each
+/// technique has maintainability tradeoffs because alternate pass orders are
+/// not well supported. addPre/Post works better if the target pass is easily
+/// tied to a common pass. But if it has subtle dependencies on multiple passes,
+/// the target should override the stage instead.
+template <typename Derived>
+Error CodeGenPassBuilder<Derived>::addMachinePasses(
+ AddMachinePass &addPass) const {
+ // Add passes that optimize machine instructions in SSA form.
+ if (getOptLevel() != CodeGenOpt::None) {
+ derived().addMachineSSAOptimization(addPass);
+ } else {
+ // If the target requests it, assign local variables to stack slots relative
+ // to one another and simplify frame index references where possible.
+ addPass(LocalStackSlotPass());
+ }
+
+ if (TM.Options.EnableIPRA)
+ addPass(RegUsageInfoPropagationPass());
+
+ // Run pre-ra passes.
+ derived().addPreRegAlloc(addPass);
+
+ // Run register allocation and passes that are tightly coupled with it,
+ // including phi elimination and scheduling.
+ if (*Opt.OptimizeRegAlloc) {
+ derived().addOptimizedRegAlloc(addPass);
+ } else {
+ if (auto Err = derived().addFastRegAlloc(addPass))
+ return Err;
+ }
+
+ // Run post-ra passes.
+ derived().addPostRegAlloc(addPass);
+
+ // Insert prolog/epilog code. Eliminate abstract frame index references...
+ if (getOptLevel() != CodeGenOpt::None) {
+ addPass(PostRAMachineSinkingPass());
+ addPass(ShrinkWrapPass());
+ }
+
+ addPass(PrologEpilogInserterPass());
+
+ /// Add passes that optimize machine instructions after register allocation.
+ if (getOptLevel() != CodeGenOpt::None)
+ derived().addMachineLateOptimization(addPass);
+
+ // Expand pseudo instructions before second scheduling pass.
+ addPass(ExpandPostRAPseudosPass());
+
+ // Run pre-sched2 passes.
+ derived().addPreSched2(addPass);
+
+ if (Opt.EnableImplicitNullChecks)
+ addPass(ImplicitNullChecksPass());
+
+ // Second pass scheduler.
+ // Let Target optionally insert this pass by itself at some other
+ // point.
+ if (getOptLevel() != CodeGenOpt::None &&
+ !TM.targetSchedulesPostRAScheduling()) {
+ if (Opt.MISchedPostRA)
+ addPass(PostMachineSchedulerPass());
+ else
+ addPass(PostRASchedulerPass());
+ }
+
+ // GC
+ derived().addGCPasses(addPass);
+
+ // Basic block placement.
+ if (getOptLevel() != CodeGenOpt::None)
+ derived().addBlockPlacement(addPass);
+
+ // Insert before XRay Instrumentation.
+ addPass(FEntryInserterPass());
+
+ addPass(XRayInstrumentationPass());
+ addPass(PatchableFunctionPass());
+
+ derived().addPreEmitPass(addPass);
+
+ if (TM.Options.EnableIPRA)
+ // Collect register usage information and produce a register mask of
+ // clobbered registers, to be used to optimize call sites.
+ addPass(RegUsageInfoCollectorPass());
+
+ addPass(FuncletLayoutPass());
+
+ addPass(StackMapLivenessPass());
+ addPass(LiveDebugValuesPass());
+
+ if (TM.Options.EnableMachineOutliner && getOptLevel() != CodeGenOpt::None &&
+ Opt.EnableMachineOutliner != RunOutliner::NeverOutline) {
+ bool RunOnAllFunctions =
+ (Opt.EnableMachineOutliner == RunOutliner::AlwaysOutline);
+ bool AddOutliner = RunOnAllFunctions || TM.Options.SupportsDefaultOutlining;
+ if (AddOutliner)
+ addPass(MachineOutlinerPass(RunOnAllFunctions));
+ }
+
+ // Add passes that directly emit MI after all other MI passes.
+ derived().addPreEmitPass2(addPass);
+
+ return Error::success();
+}
+
+/// Add passes that optimize machine instructions in SSA form.
+template <typename Derived>
+void CodeGenPassBuilder<Derived>::addMachineSSAOptimization(
+ AddMachinePass &addPass) const {
+ // Pre-ra tail duplication.
+ addPass(EarlyTailDuplicatePass());
+
+ // Optimize PHIs before DCE: removing dead PHI cycles may make more
+ // instructions dead.
+ addPass(OptimizePHIsPass());
+
+ // This pass merges large allocas. StackSlotColoring is a different pass
+ // which merges spill slots.
+ addPass(StackColoringPass());
+
+ // If the target requests it, assign local variables to stack slots relative
+ // to one another and simplify frame index references where possible.
+ addPass(LocalStackSlotPass());
+
+ // With optimization, dead code should already be eliminated. However
+ // there is one known exception: lowered code for arguments that are only
+ // used by tail calls, where the tail calls reuse the incoming stack
+ // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll).
+ addPass(DeadMachineInstructionElimPass());
+
+ // Allow targets to insert passes that improve instruction level parallelism,
+ // like if-conversion. Such passes will typically need dominator trees and
+ // loop info, just like LICM and CSE below.
+ derived().addILPOpts(addPass);
+
+ addPass(EarlyMachineLICMPass());
+ addPass(MachineCSEPass());
+
+ addPass(MachineSinkingPass());
+
+ addPass(PeepholeOptimizerPass());
+ // Clean-up the dead code that may have been generated by peephole
+ // rewriting.
+ addPass(DeadMachineInstructionElimPass());
+}
+
+//===---------------------------------------------------------------------===//
+/// Register Allocation Pass Configuration
+//===---------------------------------------------------------------------===//
+
+/// Instantiate the default register allocator pass for this target for either
+/// the optimized or unoptimized allocation path. This will be added to the pass
+/// manager by addFastRegAlloc in the unoptimized case or addOptimizedRegAlloc
+/// in the optimized case.
+///
+/// A target that uses the standard regalloc pass order for fast or optimized
+/// allocation may still override this for per-target regalloc
+/// selection. But -regalloc=... always takes precedence.
+template <typename Derived>
+void CodeGenPassBuilder<Derived>::addTargetRegisterAllocator(
+ AddMachinePass &addPass, bool Optimized) const {
+ if (Optimized)
+ addPass(RAGreedyPass());
+ else
+ addPass(RAFastPass());
+}
+
+/// Find and instantiate the register allocation pass requested by this target
+/// at the current optimization level. Different register allocators are
+/// defined as separate passes because they may require different analysis.
+template <typename Derived>
+void CodeGenPassBuilder<Derived>::addRegAllocPass(AddMachinePass &addPass,
+ bool Optimized) const {
+ if (Opt.RegAlloc == RegAllocType::Default)
+ // With no -regalloc= override, ask the target for a regalloc pass.
+ derived().addTargetRegisterAllocator(addPass, Optimized);
+ else if (Opt.RegAlloc == RegAllocType::Basic)
+ addPass(RABasicPass());
+ else if (Opt.RegAlloc == RegAllocType::Fast)
+ addPass(RAFastPass());
+ else if (Opt.RegAlloc == RegAllocType::Greedy)
+ addPass(RAGreedyPass());
+ else if (Opt.RegAlloc == RegAllocType::PBQP)
+ addPass(RAPBQPPass());
+ else
+ llvm_unreachable("unknonwn register allocator type");
+}
+
+template <typename Derived>
+Error CodeGenPassBuilder<Derived>::addRegAssignmentFast(
+ AddMachinePass &addPass) const {
+ if (Opt.RegAlloc != RegAllocType::Default &&
+ Opt.RegAlloc != RegAllocType::Fast)
+ return make_error<StringError>(
+ "Must use fast (default) register allocator for unoptimized regalloc.",
+ inconvertibleErrorCode());
+
+ addRegAllocPass(addPass, false);
+ return Error::success();
+}
+
+template <typename Derived>
+Error CodeGenPassBuilder<Derived>::addRegAssignmentOptimized(
+ AddMachinePass &addPass) const {
+ // Add the selected register allocation pass.
+ addRegAllocPass(addPass, true);
+
+ // Allow targets to change the register assignments before rewriting.
+ derived().addPreRewrite(addPass);
+
+ // Finally rewrite virtual registers.
+ addPass(VirtRegRewriterPass());
+ // Perform stack slot coloring and post-ra machine LICM.
+ //
+ // FIXME: Re-enable coloring with register when it's capable of adding
+ // kill markers.
+ addPass(StackSlotColoringPass());
+
+ return Error::success();
+}
+
+/// Add the minimum set of target-independent passes that are required for
+/// register allocation. No coalescing or scheduling.
+template <typename Derived>
+Error CodeGenPassBuilder<Derived>::addFastRegAlloc(
+ AddMachinePass &addPass) const {
+ addPass(PHIEliminationPass());
+ addPass(TwoAddressInstructionPass());
+ return derived().addRegAssignmentFast(addPass);
+}
+
+/// Add standard target-independent passes that are tightly coupled with
+/// optimized register allocation, including coalescing, machine instruction
+/// scheduling, and register allocation itself.
+template <typename Derived>
+void CodeGenPassBuilder<Derived>::addOptimizedRegAlloc(
+ AddMachinePass &addPass) const {
+ addPass(DetectDeadLanesPass());
+
+ addPass(ProcessImplicitDefsPass());
+
+ // Edge splitting is smarter with machine loop info.
+ addPass(PHIEliminationPass());
+
+ // Eventually, we want to run LiveIntervals before PHI elimination.
+ if (Opt.EarlyLiveIntervals)
+ addPass(LiveIntervalsPass());
+
+ addPass(TwoAddressInstructionPass());
+ addPass(RegisterCoalescerPass());
+
+ // The machine scheduler may accidentally create disconnected components
+ // when moving subregister definitions around, avoid this by splitting them to
+ // separate vregs before. Splitting can also improve reg. allocation quality.
+ addPass(RenameIndependentSubregsPass());
+
+ // PreRA instruction scheduling.
+ addPass(MachineSchedulerPass());
+
+ if (derived().addRegAssignmentOptimized(addPass)) {
+ // Allow targets to expand pseudo instructions depending on the choice of
+ // registers before MachineCopyPropagation.
+ derived().addPostRewrite(addPass);
+
+ // Copy propagate to forward register uses and try to eliminate COPYs that
+ // were not coalesced.
+ addPass(MachineCopyPropagationPass());
+
+ // Run post-ra machine LICM to hoist reloads / remats.
+ //
+ // FIXME: can this move into MachineLateOptimization?
+ addPass(MachineLICMPass());
+ }
+}
+
+//===---------------------------------------------------------------------===//
+/// Post RegAlloc Pass Configuration
+//===---------------------------------------------------------------------===//
+
+/// Add passes that optimize machine instructions after register allocation.
+template <typename Derived>
+void CodeGenPassBuilder<Derived>::addMachineLateOptimization(
+ AddMachinePass &addPass) const {
+ // Branch folding must be run after regalloc and prolog/epilog insertion.
+ addPass(BranchFolderPass());
+
+ // Tail duplication.
+ // Note that duplicating tail just increases code size and degrades
+ // performance for targets that require Structured Control Flow.
+ // In addition it can also make CFG irreducible. Thus we disable it.
+ if (!TM.requiresStructuredCFG())
+ addPass(TailDuplicatePass());
+
+ // Copy propagation.
+ addPass(MachineCopyPropagationPass());
+}
+
+/// Add standard basic block placement passes.
+template <typename Derived>
+void CodeGenPassBuilder<Derived>::addBlockPlacement(
+ AddMachinePass &addPass) const {
+ addPass(MachineBlockPlacementPass());
+ // Run a separate pass to collect block placement statistics.
+ if (Opt.EnableBlockPlacementStats)
+ addPass(MachineBlockPlacementStatsPass());
+}
+
+} // namespace llvm
+
+#endif // LLVM_CODEGEN_CODEGENPASSBUILDER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/CommandFlags.h b/contrib/libs/llvm12/include/llvm/CodeGen/CommandFlags.h
index 8e7b01c552..761d8f14cb 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/CommandFlags.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/CommandFlags.h
@@ -21,7 +21,7 @@
#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/MC/MCTargetOptionsCommandFlags.h"
@@ -82,8 +82,8 @@ bool getDontPlaceZerosInBSS();
bool getEnableGuaranteedTailCallOpt();
-bool getEnableAIXExtendedAltivecABI();
-
+bool getEnableAIXExtendedAltivecABI();
+
bool getDisableTailCalls();
bool getStackSymbolOrdering();
@@ -104,16 +104,16 @@ Optional<bool> getExplicitDataSections();
bool getFunctionSections();
Optional<bool> getExplicitFunctionSections();
-bool getIgnoreXCOFFVisibility();
-
-bool getXCOFFTracebackTable();
-
+bool getIgnoreXCOFFVisibility();
+
+bool getXCOFFTracebackTable();
+
std::string getBBSections();
-std::string getStackProtectorGuard();
-unsigned getStackProtectorGuardOffset();
-std::string getStackProtectorGuardReg();
-
+std::string getStackProtectorGuard();
+unsigned getStackProtectorGuardOffset();
+std::string getStackProtectorGuardReg();
+
unsigned getTLSSize();
bool getEmulatedTLS();
@@ -132,14 +132,14 @@ bool getEnableAddrsig();
bool getEmitCallSiteInfo();
-bool getEnableMachineFunctionSplitter();
-
+bool getEnableMachineFunctionSplitter();
+
bool getEnableDebugEntryValues();
-bool getPseudoProbeForProfiling();
-
-bool getValueTrackingVariableLocations();
-
+bool getPseudoProbeForProfiling();
+
+bool getValueTrackingVariableLocations();
+
bool getForceDwarfFrameSection();
bool getXRayOmitFunctionIndex();
@@ -152,17 +152,17 @@ struct RegisterCodeGenFlags {
llvm::BasicBlockSection getBBSectionsMode(llvm::TargetOptions &Options);
-llvm::StackProtectorGuards
-getStackProtectorGuardMode(llvm::TargetOptions &Options);
-
-/// Common utility function tightly tied to the options listed here. Initializes
-/// a TargetOptions object with CodeGen flags and returns it.
-/// \p TheTriple is used to determine the default value for options if
-/// options are not explicitly specified. If those triple dependant options
-/// value do not have effect for your component, a default Triple() could be
-/// passed in.
-TargetOptions InitTargetOptionsFromCodeGenFlags(const llvm::Triple &TheTriple);
-
+llvm::StackProtectorGuards
+getStackProtectorGuardMode(llvm::TargetOptions &Options);
+
+/// Common utility function tightly tied to the options listed here. Initializes
+/// a TargetOptions object with CodeGen flags and returns it.
+/// \p TheTriple is used to determine the default value for options if
+/// options are not explicitly specified. If those triple dependant options
+/// value do not have effect for your component, a default Triple() could be
+/// passed in.
+TargetOptions InitTargetOptionsFromCodeGenFlags(const llvm::Triple &TheTriple);
+
std::string getCPUStr();
std::string getFeaturesStr();
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/DIE.h b/contrib/libs/llvm12/include/llvm/CodeGen/DIE.h
index dda38928e5..8e2c9a52eb 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/DIE.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/DIE.h
@@ -254,7 +254,7 @@ public:
unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
void print(raw_ostream &O) const;
- uint64_t getIndex() const { return Index; }
+ uint64_t getIndex() const { return Index; }
};
//===--------------------------------------------------------------------===//
@@ -390,12 +390,12 @@ private:
static_assert(std::is_standard_layout<T>::value ||
std::is_pointer<T>::value,
"Expected standard layout or pointer");
- new (reinterpret_cast<void *>(&Val)) T(V);
+ new (reinterpret_cast<void *>(&Val)) T(V);
}
- template <class T> T *get() { return reinterpret_cast<T *>(&Val); }
+ template <class T> T *get() { return reinterpret_cast<T *>(&Val); }
template <class T> const T *get() const {
- return reinterpret_cast<const T *>(&Val);
+ return reinterpret_cast<const T *>(&Val);
}
template <class T> void destruct() { get<T>()->~T(); }
@@ -794,7 +794,7 @@ public:
/// Get the absolute offset within the .debug_info or .debug_types section
/// for this DIE.
- uint64_t getDebugSectionOffset() const;
+ uint64_t getDebugSectionOffset() const;
/// Compute the offset of this DIE and all its children.
///
@@ -874,7 +874,7 @@ protected:
virtual ~DIEUnit() = default;
public:
- explicit DIEUnit(dwarf::Tag UnitTag);
+ explicit DIEUnit(dwarf::Tag UnitTag);
DIEUnit(const DIEUnit &RHS) = delete;
DIEUnit(DIEUnit &&RHS) = delete;
void operator=(const DIEUnit &RHS) = delete;
@@ -896,14 +896,14 @@ public:
///
/// \returns Section pointer which can be NULL.
MCSection *getSection() const { return Section; }
- void setDebugSectionOffset(uint64_t O) { Offset = O; }
- uint64_t getDebugSectionOffset() const { return Offset; }
+ void setDebugSectionOffset(uint64_t O) { Offset = O; }
+ uint64_t getDebugSectionOffset() const { return Offset; }
DIE &getUnitDie() { return Die; }
const DIE &getUnitDie() const { return Die; }
};
struct BasicDIEUnit final : DIEUnit {
- explicit BasicDIEUnit(dwarf::Tag UnitTag) : DIEUnit(UnitTag) {}
+ explicit BasicDIEUnit(dwarf::Tag UnitTag) : DIEUnit(UnitTag) {}
};
//===--------------------------------------------------------------------===//
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/DbgEntityHistoryCalculator.h b/contrib/libs/llvm12/include/llvm/CodeGen/DbgEntityHistoryCalculator.h
index 2062bdea72..7b6dd22d4c 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/DbgEntityHistoryCalculator.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/DbgEntityHistoryCalculator.h
@@ -19,7 +19,7 @@
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/CodeGen/LexicalScopes.h"
+#include "llvm/CodeGen/LexicalScopes.h"
#include <utility>
namespace llvm {
@@ -31,24 +31,24 @@ class MachineFunction;
class MachineInstr;
class TargetRegisterInfo;
-/// Record instruction ordering so we can query their relative positions within
-/// a function. Meta instructions are given the same ordinal as the preceding
-/// non-meta instruction. Class state is invalid if MF is modified after
-/// calling initialize.
-class InstructionOrdering {
-public:
- void initialize(const MachineFunction &MF);
- void clear() { InstNumberMap.clear(); }
-
- /// Check if instruction \p A comes before \p B, where \p A and \p B both
- /// belong to the MachineFunction passed to initialize().
- bool isBefore(const MachineInstr *A, const MachineInstr *B) const;
-
-private:
- /// Each instruction is assigned an order number.
- DenseMap<const MachineInstr *, unsigned> InstNumberMap;
-};
-
+/// Record instruction ordering so we can query their relative positions within
+/// a function. Meta instructions are given the same ordinal as the preceding
+/// non-meta instruction. Class state is invalid if MF is modified after
+/// calling initialize.
+class InstructionOrdering {
+public:
+ void initialize(const MachineFunction &MF);
+ void clear() { InstNumberMap.clear(); }
+
+ /// Check if instruction \p A comes before \p B, where \p A and \p B both
+ /// belong to the MachineFunction passed to initialize().
+ bool isBefore(const MachineInstr *A, const MachineInstr *B) const;
+
+private:
+ /// Each instruction is assigned an order number.
+ DenseMap<const MachineInstr *, unsigned> InstNumberMap;
+};
+
/// For each user variable, keep a list of instruction ranges where this
/// variable is accessible. The variables are listed in order of appearance.
class DbgValueHistoryMap {
@@ -78,8 +78,8 @@ public:
/// register-described debug values that have their end index
/// set to this entry's position in the entry vector.
class Entry {
- friend DbgValueHistoryMap;
-
+ friend DbgValueHistoryMap;
+
public:
enum EntryKind { DbgValue, Clobber };
@@ -117,9 +117,9 @@ public:
return Entries[Index];
}
- /// Drop location ranges which exist entirely outside each variable's scope.
- void trimLocationRanges(const MachineFunction &MF, LexicalScopes &LScopes,
- const InstructionOrdering &Ordering);
+ /// Drop location ranges which exist entirely outside each variable's scope.
+ void trimLocationRanges(const MachineFunction &MF, LexicalScopes &LScopes,
+ const InstructionOrdering &Ordering);
bool empty() const { return VarEntries.empty(); }
void clear() { VarEntries.clear(); }
EntriesMap::const_iterator begin() const { return VarEntries.begin(); }
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/DebugHandlerBase.h b/contrib/libs/llvm12/include/llvm/CodeGen/DebugHandlerBase.h
index c61873c8ea..8f8d2ed909 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/DebugHandlerBase.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/DebugHandlerBase.h
@@ -117,13 +117,13 @@ protected:
virtual void endFunctionImpl(const MachineFunction *MF) = 0;
virtual void skippedNonDebugFunction() {}
-private:
- InstructionOrdering InstOrdering;
-
+private:
+ InstructionOrdering InstOrdering;
+
// AsmPrinterHandler overrides.
public:
- void beginModule(Module *M) override;
-
+ void beginModule(Module *M) override;
+
void beginInstruction(const MachineInstr *MI) override;
void endInstruction() override;
@@ -141,14 +141,14 @@ public:
/// If this type is derived from a base type then return base type size.
static uint64_t getBaseTypeSize(const DIType *Ty);
-
- /// Return true if type encoding is unsigned.
- static bool isUnsignedDIType(const DIType *Ty);
-
- const InstructionOrdering &getInstOrdering() const { return InstOrdering; }
+
+ /// Return true if type encoding is unsigned.
+ static bool isUnsignedDIType(const DIType *Ty);
+
+ const InstructionOrdering &getInstOrdering() const { return InstOrdering; }
};
-} // namespace llvm
+} // namespace llvm
#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/DwarfStringPoolEntry.h b/contrib/libs/llvm12/include/llvm/CodeGen/DwarfStringPoolEntry.h
index abdf6f026f..131fadea32 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/DwarfStringPoolEntry.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/DwarfStringPoolEntry.h
@@ -28,7 +28,7 @@ struct DwarfStringPoolEntry {
static constexpr unsigned NotIndexed = -1;
MCSymbol *Symbol;
- uint64_t Offset;
+ uint64_t Offset;
unsigned Index;
bool isIndexed() const { return Index != NotIndexed; }
@@ -54,7 +54,7 @@ public:
assert(getMapEntry()->second.Symbol && "No symbol available!");
return getMapEntry()->second.Symbol;
}
- uint64_t getOffset() const { return getMapEntry()->second.Offset; }
+ uint64_t getOffset() const { return getMapEntry()->second.Offset; }
bool isIndexed() const { return MapEntryAndIndexed.getInt(); }
unsigned getIndex() const {
assert(isIndexed());
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/FastISel.h b/contrib/libs/llvm12/include/llvm/CodeGen/FastISel.h
index d879fb1388..30885f0ad8 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/FastISel.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/FastISel.h
@@ -249,7 +249,7 @@ public:
/// be appended.
void startNewBlock();
- /// Flush the local value map.
+ /// Flush the local value map.
void finishBasicBlock();
/// Return current debug location information.
@@ -316,7 +316,7 @@ public:
void removeDeadCode(MachineBasicBlock::iterator I,
MachineBasicBlock::iterator E);
- using SavePoint = MachineBasicBlock::iterator;
+ using SavePoint = MachineBasicBlock::iterator;
/// Prepare InsertPt to begin inserting instructions into the local
/// value area and return the old insert position.
@@ -497,10 +497,10 @@ protected:
/// - \c Add has a constant operand.
bool canFoldAddIntoGEP(const User *GEP, const Value *Add);
- /// Test whether the register associated with this value has exactly one use,
- /// in which case that single use is killing. Note that multiple IR values
- /// may map onto the same register, in which case this is not the same as
- /// checking that an IR value has one use.
+ /// Test whether the register associated with this value has exactly one use,
+ /// in which case that single use is killing. Note that multiple IR values
+ /// may map onto the same register, in which case this is not the same as
+ /// checking that an IR value has one use.
bool hasTrivialKill(const Value *V);
/// Create a machine mem operand from the given instruction.
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/FunctionLoweringInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/FunctionLoweringInfo.h
index 9fe48ef5ff..665c243054 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/FunctionLoweringInfo.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/FunctionLoweringInfo.h
@@ -98,34 +98,34 @@ public:
/// Track virtual registers created for exception pointers.
DenseMap<const Value *, Register> CatchPadExceptionPointers;
- /// Helper object to track which of three possible relocation mechanisms are
- /// used for a particular value being relocated over a statepoint.
- struct StatepointRelocationRecord {
- enum RelocType {
- // Value did not need to be relocated and can be used directly.
- NoRelocate,
- // Value was spilled to stack and needs filled at the gc.relocate.
- Spill,
- // Value was lowered to tied def and gc.relocate should be replaced with
- // copy from vreg.
- VReg,
- } type = NoRelocate;
- // Payload contains either frame index of the stack slot in which the value
- // was spilled, or virtual register which contains the re-definition.
- union payload_t {
- payload_t() : FI(-1) {}
- int FI;
- Register Reg;
- } payload;
- };
-
- /// Keep track of each value which was relocated and the strategy used to
- /// relocate that value. This information is required when visiting
- /// gc.relocates which may appear in following blocks.
- using StatepointSpillMapTy =
- DenseMap<const Value *, StatepointRelocationRecord>;
- DenseMap<const Instruction *, StatepointSpillMapTy> StatepointRelocationMaps;
-
+ /// Helper object to track which of three possible relocation mechanisms are
+ /// used for a particular value being relocated over a statepoint.
+ struct StatepointRelocationRecord {
+ enum RelocType {
+ // Value did not need to be relocated and can be used directly.
+ NoRelocate,
+ // Value was spilled to stack and needs filled at the gc.relocate.
+ Spill,
+ // Value was lowered to tied def and gc.relocate should be replaced with
+ // copy from vreg.
+ VReg,
+ } type = NoRelocate;
+ // Payload contains either frame index of the stack slot in which the value
+ // was spilled, or virtual register which contains the re-definition.
+ union payload_t {
+ payload_t() : FI(-1) {}
+ int FI;
+ Register Reg;
+ } payload;
+ };
+
+ /// Keep track of each value which was relocated and the strategy used to
+ /// relocate that value. This information is required when visiting
+ /// gc.relocates which may appear in following blocks.
+ using StatepointSpillMapTy =
+ DenseMap<const Value *, StatepointRelocationRecord>;
+ DenseMap<const Instruction *, StatepointSpillMapTy> StatepointRelocationMaps;
+
/// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
/// the entry block. This allows the allocas to be efficiently referenced
/// anywhere in the function.
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CSEInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CSEInfo.h
index 83db4418b3..92da54451d 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CSEInfo.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CSEInfo.h
@@ -25,10 +25,10 @@
#include "llvm/CodeGen/GlobalISel/GISelWorkList.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/Support/Allocator.h"
-#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/CodeGen.h"
namespace llvm {
-class MachineBasicBlock;
+class MachineBasicBlock;
/// A class that wraps MachineInstrs and derives from FoldingSetNode in order to
/// be uniqued in a CSEMap. The tradeoff here is extra memory allocations for
@@ -189,8 +189,8 @@ public:
const GISelInstProfileBuilder &addNodeIDRegNum(Register Reg) const;
- const GISelInstProfileBuilder &addNodeIDReg(Register Reg) const;
-
+ const GISelInstProfileBuilder &addNodeIDReg(Register Reg) const;
+
const GISelInstProfileBuilder &addNodeIDImmediate(int64_t Imm) const;
const GISelInstProfileBuilder &
addNodeIDMBB(const MachineBasicBlock *MBB) const;
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CallLowering.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CallLowering.h
index 549f200269..842d7cf08b 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CallLowering.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CallLowering.h
@@ -24,11 +24,11 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/CallingConvLower.h"
-#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/TargetCallingConv.h"
-#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
-#include "llvm/IR/Type.h"
+#include "llvm/IR/Type.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MachineValueType.h"
#include <cstdint>
@@ -39,7 +39,7 @@ namespace llvm {
class CallBase;
class DataLayout;
class Function;
-class FunctionLoweringInfo;
+class FunctionLoweringInfo;
class MachineIRBuilder;
struct MachinePointerInfo;
class MachineRegisterInfo;
@@ -51,20 +51,20 @@ class CallLowering {
virtual void anchor();
public:
- struct BaseArgInfo {
- Type *Ty;
- SmallVector<ISD::ArgFlagsTy, 4> Flags;
- bool IsFixed;
-
- BaseArgInfo(Type *Ty,
- ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>(),
- bool IsFixed = true)
- : Ty(Ty), Flags(Flags.begin(), Flags.end()), IsFixed(IsFixed) {}
-
- BaseArgInfo() : Ty(nullptr), IsFixed(false) {}
- };
-
- struct ArgInfo : public BaseArgInfo {
+ struct BaseArgInfo {
+ Type *Ty;
+ SmallVector<ISD::ArgFlagsTy, 4> Flags;
+ bool IsFixed;
+
+ BaseArgInfo(Type *Ty,
+ ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>(),
+ bool IsFixed = true)
+ : Ty(Ty), Flags(Flags.begin(), Flags.end()), IsFixed(IsFixed) {}
+
+ BaseArgInfo() : Ty(nullptr), IsFixed(false) {}
+ };
+
+ struct ArgInfo : public BaseArgInfo {
SmallVector<Register, 4> Regs;
// If the argument had to be split into multiple parts according to the
// target calling convention, then this contains the original vregs
@@ -74,7 +74,7 @@ public:
ArgInfo(ArrayRef<Register> Regs, Type *Ty,
ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>(),
bool IsFixed = true)
- : BaseArgInfo(Ty, Flags, IsFixed), Regs(Regs.begin(), Regs.end()) {
+ : BaseArgInfo(Ty, Flags, IsFixed), Regs(Regs.begin(), Regs.end()) {
if (!Regs.empty() && Flags.empty())
this->Flags.push_back(ISD::ArgFlagsTy());
// FIXME: We should have just one way of saying "no register".
@@ -83,7 +83,7 @@ public:
"only void types should have no register");
}
- ArgInfo() : BaseArgInfo() {}
+ ArgInfo() : BaseArgInfo() {}
};
struct CallLoweringInfo {
@@ -119,15 +119,15 @@ public:
/// True if the call is to a vararg function.
bool IsVarArg = false;
-
- /// True if the function's return value can be lowered to registers.
- bool CanLowerReturn = true;
-
- /// VReg to hold the hidden sret parameter.
- Register DemoteRegister;
-
- /// The stack index for sret demotion.
- int DemoteStackIndex;
+
+ /// True if the function's return value can be lowered to registers.
+ bool CanLowerReturn = true;
+
+ /// VReg to hold the hidden sret parameter.
+ Register DemoteRegister;
+
+ /// The stack index for sret demotion.
+ int DemoteStackIndex;
};
/// Argument handling is mostly uniform between the four places that
@@ -137,18 +137,18 @@ public:
/// argument should go, exactly what happens can vary slightly. This
/// class abstracts the differences.
struct ValueHandler {
- ValueHandler(bool IsIncoming, MachineIRBuilder &MIRBuilder,
- MachineRegisterInfo &MRI, CCAssignFn *AssignFn)
- : MIRBuilder(MIRBuilder), MRI(MRI), AssignFn(AssignFn),
- IsIncomingArgumentHandler(IsIncoming) {}
+ ValueHandler(bool IsIncoming, MachineIRBuilder &MIRBuilder,
+ MachineRegisterInfo &MRI, CCAssignFn *AssignFn)
+ : MIRBuilder(MIRBuilder), MRI(MRI), AssignFn(AssignFn),
+ IsIncomingArgumentHandler(IsIncoming) {}
virtual ~ValueHandler() = default;
/// Returns true if the handler is dealing with incoming arguments,
/// i.e. those that move values from some physical location to vregs.
- bool isIncomingArgumentHandler() const {
- return IsIncomingArgumentHandler;
- }
+ bool isIncomingArgumentHandler() const {
+ return IsIncomingArgumentHandler;
+ }
/// Materialize a VReg containing the address of the specified
/// stack-based object. This is either based on a FrameIndex or
@@ -176,7 +176,7 @@ public:
virtual void assignValueToAddress(const ArgInfo &Arg, Register Addr,
uint64_t Size, MachinePointerInfo &MPO,
CCValAssign &VA) {
- assert(Arg.Regs.size() == 1);
+ assert(Arg.Regs.size() == 1);
assignValueToAddress(Arg.Regs[0], Addr, Size, MPO, VA);
}
@@ -207,22 +207,22 @@ public:
CCAssignFn *AssignFn;
private:
- bool IsIncomingArgumentHandler;
+ bool IsIncomingArgumentHandler;
virtual void anchor();
};
- struct IncomingValueHandler : public ValueHandler {
- IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
- CCAssignFn *AssignFn)
- : ValueHandler(true, MIRBuilder, MRI, AssignFn) {}
- };
-
- struct OutgoingValueHandler : public ValueHandler {
- OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
- CCAssignFn *AssignFn)
- : ValueHandler(false, MIRBuilder, MRI, AssignFn) {}
- };
-
+ struct IncomingValueHandler : public ValueHandler {
+ IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
+ CCAssignFn *AssignFn)
+ : ValueHandler(true, MIRBuilder, MRI, AssignFn) {}
+ };
+
+ struct OutgoingValueHandler : public ValueHandler {
+ OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
+ CCAssignFn *AssignFn)
+ : ValueHandler(false, MIRBuilder, MRI, AssignFn) {}
+ };
+
protected:
/// Getter for generic TargetLowering class.
const TargetLowering *getTLI() const {
@@ -235,17 +235,17 @@ protected:
return static_cast<const XXXTargetLowering *>(TLI);
}
- /// \returns Flags corresponding to the attributes on the \p ArgIdx-th
- /// parameter of \p Call.
- ISD::ArgFlagsTy getAttributesForArgIdx(const CallBase &Call,
- unsigned ArgIdx) const;
-
- /// Adds flags to \p Flags based off of the attributes in \p Attrs.
- /// \p OpIdx is the index in \p Attrs to add flags from.
- void addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags,
- const AttributeList &Attrs,
- unsigned OpIdx) const;
-
+ /// \returns Flags corresponding to the attributes on the \p ArgIdx-th
+ /// parameter of \p Call.
+ ISD::ArgFlagsTy getAttributesForArgIdx(const CallBase &Call,
+ unsigned ArgIdx) const;
+
+ /// Adds flags to \p Flags based off of the attributes in \p Attrs.
+ /// \p OpIdx is the index in \p Attrs to add flags from.
+ void addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags,
+ const AttributeList &Attrs,
+ unsigned OpIdx) const;
+
template <typename FuncInfoTy>
void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL,
const FuncInfoTy &FuncInfo) const;
@@ -269,7 +269,7 @@ protected:
MachineIRBuilder &MIRBuilder) const;
/// Invoke Handler::assignArg on each of the given \p Args and then use
- /// \p Handler to move them to the assigned locations.
+ /// \p Handler to move them to the assigned locations.
///
/// \return True if everything has succeeded, false otherwise.
bool handleAssignments(MachineIRBuilder &MIRBuilder,
@@ -289,14 +289,14 @@ protected:
CCAssignFn &AssignFnFixed,
CCAssignFn &AssignFnVarArg) const;
- /// Check whether parameters to a call that are passed in callee saved
- /// registers are the same as from the calling function. This needs to be
- /// checked for tail call eligibility.
- bool parametersInCSRMatch(const MachineRegisterInfo &MRI,
- const uint32_t *CallerPreservedMask,
- const SmallVectorImpl<CCValAssign> &ArgLocs,
- const SmallVectorImpl<ArgInfo> &OutVals) const;
-
+ /// Check whether parameters to a call that are passed in callee saved
+ /// registers are the same as from the calling function. This needs to be
+ /// checked for tail call eligibility.
+ bool parametersInCSRMatch(const MachineRegisterInfo &MRI,
+ const uint32_t *CallerPreservedMask,
+ const SmallVectorImpl<CCValAssign> &ArgLocs,
+ const SmallVectorImpl<ArgInfo> &OutVals) const;
+
/// \returns True if the calling convention for a callee and its caller pass
/// results in the same way. Typically used for tail call eligibility checks.
///
@@ -327,73 +327,73 @@ public:
return false;
}
- /// Load the returned value from the stack into virtual registers in \p VRegs.
- /// It uses the frame index \p FI and the start offset from \p DemoteReg.
- /// The loaded data size will be determined from \p RetTy.
- void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy,
- ArrayRef<Register> VRegs, Register DemoteReg,
- int FI) const;
-
- /// Store the return value given by \p VRegs into stack starting at the offset
- /// specified in \p DemoteReg.
- void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy,
- ArrayRef<Register> VRegs, Register DemoteReg) const;
-
- /// Insert the hidden sret ArgInfo to the beginning of \p SplitArgs.
- /// This function should be called from the target specific
- /// lowerFormalArguments when \p F requires the sret demotion.
- void insertSRetIncomingArgument(const Function &F,
- SmallVectorImpl<ArgInfo> &SplitArgs,
- Register &DemoteReg, MachineRegisterInfo &MRI,
- const DataLayout &DL) const;
-
- /// For the call-base described by \p CB, insert the hidden sret ArgInfo to
- /// the OrigArgs field of \p Info.
- void insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder,
- const CallBase &CB,
- CallLoweringInfo &Info) const;
-
- /// \return True if the return type described by \p Outs can be returned
- /// without performing sret demotion.
- bool checkReturn(CCState &CCInfo, SmallVectorImpl<BaseArgInfo> &Outs,
- CCAssignFn *Fn) const;
-
- /// Get the type and the ArgFlags for the split components of \p RetTy as
- /// returned by \c ComputeValueVTs.
- void getReturnInfo(CallingConv::ID CallConv, Type *RetTy, AttributeList Attrs,
- SmallVectorImpl<BaseArgInfo> &Outs,
- const DataLayout &DL) const;
-
- /// Toplevel function to check the return type based on the target calling
- /// convention. \return True if the return value of \p MF can be returned
- /// without performing sret demotion.
- bool checkReturnTypeForCallConv(MachineFunction &MF) const;
-
- /// This hook must be implemented to check whether the return values
- /// described by \p Outs can fit into the return registers. If false
- /// is returned, an sret-demotion is performed.
- virtual bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv,
- SmallVectorImpl<BaseArgInfo> &Outs,
- bool IsVarArg) const {
- return true;
- }
-
+ /// Load the returned value from the stack into virtual registers in \p VRegs.
+ /// It uses the frame index \p FI and the start offset from \p DemoteReg.
+ /// The loaded data size will be determined from \p RetTy.
+ void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy,
+ ArrayRef<Register> VRegs, Register DemoteReg,
+ int FI) const;
+
+ /// Store the return value given by \p VRegs into stack starting at the offset
+ /// specified in \p DemoteReg.
+ void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy,
+ ArrayRef<Register> VRegs, Register DemoteReg) const;
+
+ /// Insert the hidden sret ArgInfo to the beginning of \p SplitArgs.
+ /// This function should be called from the target specific
+ /// lowerFormalArguments when \p F requires the sret demotion.
+ void insertSRetIncomingArgument(const Function &F,
+ SmallVectorImpl<ArgInfo> &SplitArgs,
+ Register &DemoteReg, MachineRegisterInfo &MRI,
+ const DataLayout &DL) const;
+
+ /// For the call-base described by \p CB, insert the hidden sret ArgInfo to
+ /// the OrigArgs field of \p Info.
+ void insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder,
+ const CallBase &CB,
+ CallLoweringInfo &Info) const;
+
+ /// \return True if the return type described by \p Outs can be returned
+ /// without performing sret demotion.
+ bool checkReturn(CCState &CCInfo, SmallVectorImpl<BaseArgInfo> &Outs,
+ CCAssignFn *Fn) const;
+
+ /// Get the type and the ArgFlags for the split components of \p RetTy as
+ /// returned by \c ComputeValueVTs.
+ void getReturnInfo(CallingConv::ID CallConv, Type *RetTy, AttributeList Attrs,
+ SmallVectorImpl<BaseArgInfo> &Outs,
+ const DataLayout &DL) const;
+
+ /// Toplevel function to check the return type based on the target calling
+ /// convention. \return True if the return value of \p MF can be returned
+ /// without performing sret demotion.
+ bool checkReturnTypeForCallConv(MachineFunction &MF) const;
+
+ /// This hook must be implemented to check whether the return values
+ /// described by \p Outs can fit into the return registers. If false
+ /// is returned, an sret-demotion is performed.
+ virtual bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv,
+ SmallVectorImpl<BaseArgInfo> &Outs,
+ bool IsVarArg) const {
+ return true;
+ }
+
/// This hook must be implemented to lower outgoing return values, described
/// by \p Val, into the specified virtual registers \p VRegs.
/// This hook is used by GlobalISel.
///
- /// \p FLI is required for sret demotion.
- ///
+ /// \p FLI is required for sret demotion.
+ ///
/// \p SwiftErrorVReg is non-zero if the function has a swifterror parameter
/// that needs to be implicitly returned.
///
/// \return True if the lowering succeeds, false otherwise.
virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
- ArrayRef<Register> VRegs, FunctionLoweringInfo &FLI,
+ ArrayRef<Register> VRegs, FunctionLoweringInfo &FLI,
Register SwiftErrorVReg) const {
if (!supportSwiftError()) {
assert(SwiftErrorVReg == 0 && "attempt to use unsupported swifterror");
- return lowerReturn(MIRBuilder, Val, VRegs, FLI);
+ return lowerReturn(MIRBuilder, Val, VRegs, FLI);
}
return false;
}
@@ -401,8 +401,8 @@ public:
/// This hook behaves as the extended lowerReturn function, but for targets
/// that do not support swifterror value promotion.
virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
- ArrayRef<Register> VRegs,
- FunctionLoweringInfo &FLI) const {
+ ArrayRef<Register> VRegs,
+ FunctionLoweringInfo &FLI) const {
return false;
}
@@ -415,13 +415,13 @@ public:
/// the second in \c VRegs[1], and so on. For each argument, there will be one
/// register for each non-aggregate type, as returned by \c computeValueLLTs.
/// \p MIRBuilder is set to the proper insertion for the argument
- /// lowering. \p FLI is required for sret demotion.
+ /// lowering. \p FLI is required for sret demotion.
///
/// \return True if the lowering succeeded, false otherwise.
virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder,
const Function &F,
- ArrayRef<ArrayRef<Register>> VRegs,
- FunctionLoweringInfo &FLI) const {
+ ArrayRef<ArrayRef<Register>> VRegs,
+ FunctionLoweringInfo &FLI) const {
return false;
}
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 1d29a2ddc8..7e482d9b7d 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -24,8 +24,8 @@
#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINER_HELPER_H
#define LLVM_CODEGEN_GLOBALISEL_COMBINER_HELPER_H
-#include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/Register.h"
#include "llvm/Support/Alignment.h"
@@ -34,15 +34,15 @@ namespace llvm {
class GISelChangeObserver;
class MachineIRBuilder;
-class MachineInstrBuilder;
+class MachineInstrBuilder;
class MachineRegisterInfo;
class MachineInstr;
class MachineOperand;
class GISelKnownBits;
class MachineDominatorTree;
class LegalizerInfo;
-struct LegalityQuery;
-class TargetLowering;
+struct LegalityQuery;
+class TargetLowering;
struct PreferredTuple {
LLT Ty; // The result type of the extend.
@@ -62,37 +62,37 @@ struct PtrAddChain {
Register Base;
};
-struct RegisterImmPair {
- Register Reg;
- int64_t Imm;
-};
-
-struct ShiftOfShiftedLogic {
- MachineInstr *Logic;
- MachineInstr *Shift2;
- Register LogicNonShiftReg;
- uint64_t ValSum;
-};
-
-using OperandBuildSteps =
- SmallVector<std::function<void(MachineInstrBuilder &)>, 4>;
-struct InstructionBuildSteps {
- unsigned Opcode = 0; /// The opcode for the produced instruction.
- OperandBuildSteps OperandFns; /// Operands to be added to the instruction.
- InstructionBuildSteps() = default;
- InstructionBuildSteps(unsigned Opcode, const OperandBuildSteps &OperandFns)
- : Opcode(Opcode), OperandFns(OperandFns) {}
-};
-
-struct InstructionStepsMatchInfo {
- /// Describes instructions to be built during a combine.
- SmallVector<InstructionBuildSteps, 2> InstrsToBuild;
- InstructionStepsMatchInfo() = default;
- InstructionStepsMatchInfo(
- std::initializer_list<InstructionBuildSteps> InstrsToBuild)
- : InstrsToBuild(InstrsToBuild) {}
-};
-
+struct RegisterImmPair {
+ Register Reg;
+ int64_t Imm;
+};
+
+struct ShiftOfShiftedLogic {
+ MachineInstr *Logic;
+ MachineInstr *Shift2;
+ Register LogicNonShiftReg;
+ uint64_t ValSum;
+};
+
+using OperandBuildSteps =
+ SmallVector<std::function<void(MachineInstrBuilder &)>, 4>;
+struct InstructionBuildSteps {
+ unsigned Opcode = 0; /// The opcode for the produced instruction.
+ OperandBuildSteps OperandFns; /// Operands to be added to the instruction.
+ InstructionBuildSteps() = default;
+ InstructionBuildSteps(unsigned Opcode, const OperandBuildSteps &OperandFns)
+ : Opcode(Opcode), OperandFns(OperandFns) {}
+};
+
+struct InstructionStepsMatchInfo {
+ /// Describes instructions to be built during a combine.
+ SmallVector<InstructionBuildSteps, 2> InstrsToBuild;
+ InstructionStepsMatchInfo() = default;
+ InstructionStepsMatchInfo(
+ std::initializer_list<InstructionBuildSteps> InstrsToBuild)
+ : InstrsToBuild(InstrsToBuild) {}
+};
+
class CombinerHelper {
protected:
MachineIRBuilder &Builder;
@@ -112,12 +112,12 @@ public:
return KB;
}
- const TargetLowering &getTargetLowering() const;
-
- /// \return true if the combine is running prior to legalization, or if \p
- /// Query is legal on the target.
- bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const;
-
+ const TargetLowering &getTargetLowering() const;
+
+ /// \return true if the combine is running prior to legalization, or if \p
+ /// Query is legal on the target.
+ bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const;
+
/// MachineRegisterInfo::replaceRegWith() and inform the observer of the changes
void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const;
@@ -156,18 +156,18 @@ public:
bool matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo);
void applyCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo);
- bool matchSextTruncSextLoad(MachineInstr &MI);
- bool applySextTruncSextLoad(MachineInstr &MI);
+ bool matchSextTruncSextLoad(MachineInstr &MI);
+ bool applySextTruncSextLoad(MachineInstr &MI);
- /// Match sext_inreg(load p), imm -> sextload p
- bool matchSextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
- bool applySextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
-
- /// If a brcond's true block is not the fallthrough, make it so by inverting
- /// the condition and swapping operands.
- bool matchOptBrCondByInvertingCond(MachineInstr &MI);
- void applyOptBrCondByInvertingCond(MachineInstr &MI);
+ /// Match sext_inreg(load p), imm -> sextload p
+ bool matchSextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
+ bool applySextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
+ /// If a brcond's true block is not the fallthrough, make it so by inverting
+ /// the condition and swapping operands.
+ bool matchOptBrCondByInvertingCond(MachineInstr &MI);
+ void applyOptBrCondByInvertingCond(MachineInstr &MI);
+
/// If \p MI is G_CONCAT_VECTORS, try to combine it.
/// Returns true if MI changed.
/// Right now, we support:
@@ -243,28 +243,28 @@ public:
bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo);
bool applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo);
- /// Fold (shift (shift base, x), y) -> (shift base (x+y))
- bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo);
- bool applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo);
-
- /// If we have a shift-by-constant of a bitwise logic op that itself has a
- /// shift-by-constant operand with identical opcode, we may be able to convert
- /// that into 2 independent shifts followed by the logic op.
- bool matchShiftOfShiftedLogic(MachineInstr &MI,
- ShiftOfShiftedLogic &MatchInfo);
- bool applyShiftOfShiftedLogic(MachineInstr &MI,
- ShiftOfShiftedLogic &MatchInfo);
-
+ /// Fold (shift (shift base, x), y) -> (shift base (x+y))
+ bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo);
+ bool applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo);
+
+ /// If we have a shift-by-constant of a bitwise logic op that itself has a
+ /// shift-by-constant operand with identical opcode, we may be able to convert
+ /// that into 2 independent shifts followed by the logic op.
+ bool matchShiftOfShiftedLogic(MachineInstr &MI,
+ ShiftOfShiftedLogic &MatchInfo);
+ bool applyShiftOfShiftedLogic(MachineInstr &MI,
+ ShiftOfShiftedLogic &MatchInfo);
+
/// Transform a multiply by a power-of-2 value to a left shift.
bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal);
bool applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal);
- // Transform a G_SHL with an extended source into a narrower shift if
- // possible.
- bool matchCombineShlOfExtend(MachineInstr &MI, RegisterImmPair &MatchData);
- bool applyCombineShlOfExtend(MachineInstr &MI,
- const RegisterImmPair &MatchData);
-
+ // Transform a G_SHL with an extended source into a narrower shift if
+ // possible.
+ bool matchCombineShlOfExtend(MachineInstr &MI, RegisterImmPair &MatchData);
+ bool applyCombineShlOfExtend(MachineInstr &MI,
+ const RegisterImmPair &MatchData);
+
/// Reduce a shift by a constant to an unmerge and a shift on a half sized
/// type. This will not produce a shift smaller than \p TargetShiftSize.
bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize,
@@ -272,86 +272,86 @@ public:
bool applyCombineShiftToUnmerge(MachineInstr &MI, const unsigned &ShiftVal);
bool tryCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftAmount);
- /// Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
- bool
- matchCombineUnmergeMergeToPlainValues(MachineInstr &MI,
- SmallVectorImpl<Register> &Operands);
- bool
- applyCombineUnmergeMergeToPlainValues(MachineInstr &MI,
- SmallVectorImpl<Register> &Operands);
-
- /// Transform G_UNMERGE Constant -> Constant1, Constant2, ...
- bool matchCombineUnmergeConstant(MachineInstr &MI,
- SmallVectorImpl<APInt> &Csts);
- bool applyCombineUnmergeConstant(MachineInstr &MI,
- SmallVectorImpl<APInt> &Csts);
-
- /// Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
- bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
- bool applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
-
- /// Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0
- bool matchCombineUnmergeZExtToZExt(MachineInstr &MI);
- bool applyCombineUnmergeZExtToZExt(MachineInstr &MI);
-
- /// Transform fp_instr(cst) to constant result of the fp operation.
- bool matchCombineConstantFoldFpUnary(MachineInstr &MI,
- Optional<APFloat> &Cst);
- bool applyCombineConstantFoldFpUnary(MachineInstr &MI,
- Optional<APFloat> &Cst);
-
- /// Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
- bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg);
- bool applyCombineI2PToP2I(MachineInstr &MI, Register &Reg);
-
- /// Transform PtrToInt(IntToPtr(x)) to x.
- bool matchCombineP2IToI2P(MachineInstr &MI, Register &Reg);
- bool applyCombineP2IToI2P(MachineInstr &MI, Register &Reg);
-
- /// Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y)
- /// Transform G_ADD y, (G_PTRTOINT x) -> G_PTRTOINT (G_PTR_ADD x, y)
- bool matchCombineAddP2IToPtrAdd(MachineInstr &MI,
- std::pair<Register, bool> &PtrRegAndCommute);
- bool applyCombineAddP2IToPtrAdd(MachineInstr &MI,
- std::pair<Register, bool> &PtrRegAndCommute);
-
- // Transform G_PTR_ADD (G_PTRTOINT C1), C2 -> C1 + C2
- bool matchCombineConstPtrAddToI2P(MachineInstr &MI, int64_t &NewCst);
- bool applyCombineConstPtrAddToI2P(MachineInstr &MI, int64_t &NewCst);
-
- /// Transform anyext(trunc(x)) to x.
- bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg);
- bool applyCombineAnyExtTrunc(MachineInstr &MI, Register &Reg);
-
- /// Transform [asz]ext([asz]ext(x)) to [asz]ext x.
- bool matchCombineExtOfExt(MachineInstr &MI,
- std::tuple<Register, unsigned> &MatchInfo);
- bool applyCombineExtOfExt(MachineInstr &MI,
- std::tuple<Register, unsigned> &MatchInfo);
-
- /// Transform fneg(fneg(x)) to x.
- bool matchCombineFNegOfFNeg(MachineInstr &MI, Register &Reg);
-
- /// Match fabs(fabs(x)) to fabs(x).
- bool matchCombineFAbsOfFAbs(MachineInstr &MI, Register &Src);
- bool applyCombineFAbsOfFAbs(MachineInstr &MI, Register &Src);
-
- /// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
- bool matchCombineTruncOfExt(MachineInstr &MI,
- std::pair<Register, unsigned> &MatchInfo);
- bool applyCombineTruncOfExt(MachineInstr &MI,
- std::pair<Register, unsigned> &MatchInfo);
-
- /// Transform trunc (shl x, K) to shl (trunc x),
- /// K => K < VT.getScalarSizeInBits().
- bool matchCombineTruncOfShl(MachineInstr &MI,
- std::pair<Register, Register> &MatchInfo);
- bool applyCombineTruncOfShl(MachineInstr &MI,
- std::pair<Register, Register> &MatchInfo);
-
- /// Transform G_MUL(x, -1) to G_SUB(0, x)
- bool applyCombineMulByNegativeOne(MachineInstr &MI);
-
+ /// Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
+ bool
+ matchCombineUnmergeMergeToPlainValues(MachineInstr &MI,
+ SmallVectorImpl<Register> &Operands);
+ bool
+ applyCombineUnmergeMergeToPlainValues(MachineInstr &MI,
+ SmallVectorImpl<Register> &Operands);
+
+ /// Transform G_UNMERGE Constant -> Constant1, Constant2, ...
+ bool matchCombineUnmergeConstant(MachineInstr &MI,
+ SmallVectorImpl<APInt> &Csts);
+ bool applyCombineUnmergeConstant(MachineInstr &MI,
+ SmallVectorImpl<APInt> &Csts);
+
+ /// Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
+ bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
+ bool applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
+
+ /// Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0
+ bool matchCombineUnmergeZExtToZExt(MachineInstr &MI);
+ bool applyCombineUnmergeZExtToZExt(MachineInstr &MI);
+
+ /// Transform fp_instr(cst) to constant result of the fp operation.
+ bool matchCombineConstantFoldFpUnary(MachineInstr &MI,
+ Optional<APFloat> &Cst);
+ bool applyCombineConstantFoldFpUnary(MachineInstr &MI,
+ Optional<APFloat> &Cst);
+
+ /// Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
+ bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg);
+ bool applyCombineI2PToP2I(MachineInstr &MI, Register &Reg);
+
+ /// Transform PtrToInt(IntToPtr(x)) to x.
+ bool matchCombineP2IToI2P(MachineInstr &MI, Register &Reg);
+ bool applyCombineP2IToI2P(MachineInstr &MI, Register &Reg);
+
+ /// Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y)
+ /// Transform G_ADD y, (G_PTRTOINT x) -> G_PTRTOINT (G_PTR_ADD x, y)
+ bool matchCombineAddP2IToPtrAdd(MachineInstr &MI,
+ std::pair<Register, bool> &PtrRegAndCommute);
+ bool applyCombineAddP2IToPtrAdd(MachineInstr &MI,
+ std::pair<Register, bool> &PtrRegAndCommute);
+
+ // Transform G_PTR_ADD (G_PTRTOINT C1), C2 -> C1 + C2
+ bool matchCombineConstPtrAddToI2P(MachineInstr &MI, int64_t &NewCst);
+ bool applyCombineConstPtrAddToI2P(MachineInstr &MI, int64_t &NewCst);
+
+ /// Transform anyext(trunc(x)) to x.
+ bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg);
+ bool applyCombineAnyExtTrunc(MachineInstr &MI, Register &Reg);
+
+ /// Transform [asz]ext([asz]ext(x)) to [asz]ext x.
+ bool matchCombineExtOfExt(MachineInstr &MI,
+ std::tuple<Register, unsigned> &MatchInfo);
+ bool applyCombineExtOfExt(MachineInstr &MI,
+ std::tuple<Register, unsigned> &MatchInfo);
+
+ /// Transform fneg(fneg(x)) to x.
+ bool matchCombineFNegOfFNeg(MachineInstr &MI, Register &Reg);
+
+ /// Match fabs(fabs(x)) to fabs(x).
+ bool matchCombineFAbsOfFAbs(MachineInstr &MI, Register &Src);
+ bool applyCombineFAbsOfFAbs(MachineInstr &MI, Register &Src);
+
+ /// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
+ bool matchCombineTruncOfExt(MachineInstr &MI,
+ std::pair<Register, unsigned> &MatchInfo);
+ bool applyCombineTruncOfExt(MachineInstr &MI,
+ std::pair<Register, unsigned> &MatchInfo);
+
+ /// Transform trunc (shl x, K) to shl (trunc x),
+ /// K => K < VT.getScalarSizeInBits().
+ bool matchCombineTruncOfShl(MachineInstr &MI,
+ std::pair<Register, Register> &MatchInfo);
+ bool applyCombineTruncOfShl(MachineInstr &MI,
+ std::pair<Register, Register> &MatchInfo);
+
+ /// Transform G_MUL(x, -1) to G_SUB(0, x)
+ bool applyCombineMulByNegativeOne(MachineInstr &MI);
+
/// Return true if any explicit use operand on \p MI is defined by a
/// G_IMPLICIT_DEF.
bool matchAnyExplicitUseIsUndef(MachineInstr &MI);
@@ -366,13 +366,13 @@ public:
/// Return true if a G_STORE instruction \p MI is storing an undef value.
bool matchUndefStore(MachineInstr &MI);
- /// Return true if a G_SELECT instruction \p MI has an undef comparison.
- bool matchUndefSelectCmp(MachineInstr &MI);
-
- /// Return true if a G_SELECT instruction \p MI has a constant comparison. If
- /// true, \p OpIdx will store the operand index of the known selected value.
- bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx);
-
+ /// Return true if a G_SELECT instruction \p MI has an undef comparison.
+ bool matchUndefSelectCmp(MachineInstr &MI);
+
+ /// Return true if a G_SELECT instruction \p MI has a constant comparison. If
+ /// true, \p OpIdx will store the operand index of the known selected value.
+ bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx);
+
/// Replace an instruction with a G_FCONSTANT with value \p C.
bool replaceInstWithFConstant(MachineInstr &MI, double C);
@@ -385,9 +385,9 @@ public:
/// Delete \p MI and replace all of its uses with its \p OpIdx-th operand.
bool replaceSingleDefInstWithOperand(MachineInstr &MI, unsigned OpIdx);
- /// Delete \p MI and replace all of its uses with \p Replacement.
- bool replaceSingleDefInstWithReg(MachineInstr &MI, Register Replacement);
-
+ /// Delete \p MI and replace all of its uses with \p Replacement.
+ bool replaceSingleDefInstWithReg(MachineInstr &MI, Register Replacement);
+
/// Return true if \p MOP1 and \p MOP2 are register operands are defined by
/// equivalent instructions.
bool matchEqualDefs(const MachineOperand &MOP1, const MachineOperand &MOP2);
@@ -405,12 +405,12 @@ public:
/// Check if operand \p OpIdx is zero.
bool matchOperandIsZero(MachineInstr &MI, unsigned OpIdx);
- /// Check if operand \p OpIdx is undef.
- bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx);
-
- /// Check if operand \p OpIdx is known to be a power of 2.
- bool matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, unsigned OpIdx);
-
+ /// Check if operand \p OpIdx is undef.
+ bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx);
+
+ /// Check if operand \p OpIdx is known to be a power of 2.
+ bool matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, unsigned OpIdx);
+
/// Erase \p MI
bool eraseInst(MachineInstr &MI);
@@ -420,79 +420,79 @@ public:
bool applySimplifyAddToSub(MachineInstr &MI,
std::tuple<Register, Register> &MatchInfo);
- /// Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
- bool
- matchHoistLogicOpWithSameOpcodeHands(MachineInstr &MI,
- InstructionStepsMatchInfo &MatchInfo);
-
- /// Replace \p MI with a series of instructions described in \p MatchInfo.
- bool applyBuildInstructionSteps(MachineInstr &MI,
- InstructionStepsMatchInfo &MatchInfo);
-
- /// Match ashr (shl x, C), C -> sext_inreg (C)
- bool matchAshrShlToSextInreg(MachineInstr &MI,
- std::tuple<Register, int64_t> &MatchInfo);
- bool applyAshShlToSextInreg(MachineInstr &MI,
- std::tuple<Register, int64_t> &MatchInfo);
- /// \return true if \p MI is a G_AND instruction whose operands are x and y
- /// where x & y == x or x & y == y. (E.g., one of operands is all-ones value.)
- ///
- /// \param [in] MI - The G_AND instruction.
- /// \param [out] Replacement - A register the G_AND should be replaced with on
- /// success.
- bool matchRedundantAnd(MachineInstr &MI, Register &Replacement);
-
- /// \return true if \p MI is a G_OR instruction whose operands are x and y
- /// where x | y == x or x | y == y. (E.g., one of operands is all-zeros
- /// value.)
- ///
- /// \param [in] MI - The G_OR instruction.
- /// \param [out] Replacement - A register the G_OR should be replaced with on
- /// success.
- bool matchRedundantOr(MachineInstr &MI, Register &Replacement);
-
- /// \return true if \p MI is a G_SEXT_INREG that can be erased.
- bool matchRedundantSExtInReg(MachineInstr &MI);
-
- /// Combine inverting a result of a compare into the opposite cond code.
- bool matchNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
- bool applyNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
-
- /// Fold (xor (and x, y), y) -> (and (not x), y)
- ///{
- bool matchXorOfAndWithSameReg(MachineInstr &MI,
- std::pair<Register, Register> &MatchInfo);
- bool applyXorOfAndWithSameReg(MachineInstr &MI,
- std::pair<Register, Register> &MatchInfo);
- ///}
-
- /// Combine G_PTR_ADD with nullptr to G_INTTOPTR
- bool matchPtrAddZero(MachineInstr &MI);
- bool applyPtrAddZero(MachineInstr &MI);
-
- /// Combine G_UREM x, (known power of 2) to an add and bitmasking.
- bool applySimplifyURemByPow2(MachineInstr &MI);
-
- bool matchCombineInsertVecElts(MachineInstr &MI,
- SmallVectorImpl<Register> &MatchInfo);
-
- bool applyCombineInsertVecElts(MachineInstr &MI,
- SmallVectorImpl<Register> &MatchInfo);
-
- /// Match expression trees of the form
- ///
- /// \code
- /// sN *a = ...
- /// sM val = a[0] | (a[1] << N) | (a[2] << 2N) | (a[3] << 3N) ...
- /// \endcode
- ///
- /// And check if the tree can be replaced with a M-bit load + possibly a
- /// bswap.
- bool matchLoadOrCombine(MachineInstr &MI,
- std::function<void(MachineIRBuilder &)> &MatchInfo);
- bool applyLoadOrCombine(MachineInstr &MI,
- std::function<void(MachineIRBuilder &)> &MatchInfo);
-
+ /// Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
+ bool
+ matchHoistLogicOpWithSameOpcodeHands(MachineInstr &MI,
+ InstructionStepsMatchInfo &MatchInfo);
+
+ /// Replace \p MI with a series of instructions described in \p MatchInfo.
+ bool applyBuildInstructionSteps(MachineInstr &MI,
+ InstructionStepsMatchInfo &MatchInfo);
+
+ /// Match ashr (shl x, C), C -> sext_inreg (C)
+ bool matchAshrShlToSextInreg(MachineInstr &MI,
+ std::tuple<Register, int64_t> &MatchInfo);
+ bool applyAshShlToSextInreg(MachineInstr &MI,
+ std::tuple<Register, int64_t> &MatchInfo);
+ /// \return true if \p MI is a G_AND instruction whose operands are x and y
+ /// where x & y == x or x & y == y. (E.g., one of operands is all-ones value.)
+ ///
+ /// \param [in] MI - The G_AND instruction.
+ /// \param [out] Replacement - A register the G_AND should be replaced with on
+ /// success.
+ bool matchRedundantAnd(MachineInstr &MI, Register &Replacement);
+
+ /// \return true if \p MI is a G_OR instruction whose operands are x and y
+ /// where x | y == x or x | y == y. (E.g., one of operands is all-zeros
+ /// value.)
+ ///
+ /// \param [in] MI - The G_OR instruction.
+ /// \param [out] Replacement - A register the G_OR should be replaced with on
+ /// success.
+ bool matchRedundantOr(MachineInstr &MI, Register &Replacement);
+
+ /// \return true if \p MI is a G_SEXT_INREG that can be erased.
+ bool matchRedundantSExtInReg(MachineInstr &MI);
+
+ /// Combine inverting a result of a compare into the opposite cond code.
+ bool matchNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
+ bool applyNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
+
+ /// Fold (xor (and x, y), y) -> (and (not x), y)
+ ///{
+ bool matchXorOfAndWithSameReg(MachineInstr &MI,
+ std::pair<Register, Register> &MatchInfo);
+ bool applyXorOfAndWithSameReg(MachineInstr &MI,
+ std::pair<Register, Register> &MatchInfo);
+ ///}
+
+ /// Combine G_PTR_ADD with nullptr to G_INTTOPTR
+ bool matchPtrAddZero(MachineInstr &MI);
+ bool applyPtrAddZero(MachineInstr &MI);
+
+ /// Combine G_UREM x, (known power of 2) to an add and bitmasking.
+ bool applySimplifyURemByPow2(MachineInstr &MI);
+
+ bool matchCombineInsertVecElts(MachineInstr &MI,
+ SmallVectorImpl<Register> &MatchInfo);
+
+ bool applyCombineInsertVecElts(MachineInstr &MI,
+ SmallVectorImpl<Register> &MatchInfo);
+
+ /// Match expression trees of the form
+ ///
+ /// \code
+ /// sN *a = ...
+ /// sM val = a[0] | (a[1] << N) | (a[2] << 2N) | (a[3] << 3N) ...
+ /// \endcode
+ ///
+ /// And check if the tree can be replaced with a M-bit load + possibly a
+ /// bswap.
+ bool matchLoadOrCombine(MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo);
+ bool applyLoadOrCombine(MachineInstr &MI,
+ std::function<void(MachineIRBuilder &)> &MatchInfo);
+
/// Try to transform \p MI by using all of the above
/// combine functions. Returns true if changed.
bool tryCombine(MachineInstr &MI);
@@ -521,30 +521,30 @@ private:
/// \returns true if a candidate is found.
bool findPreIndexCandidate(MachineInstr &MI, Register &Addr, Register &Base,
Register &Offset);
-
- /// Helper function for matchLoadOrCombine. Searches for Registers
- /// which may have been produced by a load instruction + some arithmetic.
- ///
- /// \param [in] Root - The search root.
- ///
- /// \returns The Registers found during the search.
- Optional<SmallVector<Register, 8>>
- findCandidatesForLoadOrCombine(const MachineInstr *Root) const;
-
- /// Helper function for matchLoadOrCombine.
- ///
- /// Checks if every register in \p RegsToVisit is defined by a load
- /// instruction + some arithmetic.
- ///
- /// \param [out] MemOffset2Idx - Maps the byte positions each load ends up
- /// at to the index of the load.
- /// \param [in] MemSizeInBits - The number of bits each load should produce.
- ///
- /// \returns The lowest-index load found and the lowest index on success.
- Optional<std::pair<MachineInstr *, int64_t>> findLoadOffsetsForLoadOrCombine(
- SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
- const SmallVector<Register, 8> &RegsToVisit,
- const unsigned MemSizeInBits);
+
+ /// Helper function for matchLoadOrCombine. Searches for Registers
+ /// which may have been produced by a load instruction + some arithmetic.
+ ///
+ /// \param [in] Root - The search root.
+ ///
+ /// \returns The Registers found during the search.
+ Optional<SmallVector<Register, 8>>
+ findCandidatesForLoadOrCombine(const MachineInstr *Root) const;
+
+ /// Helper function for matchLoadOrCombine.
+ ///
+ /// Checks if every register in \p RegsToVisit is defined by a load
+ /// instruction + some arithmetic.
+ ///
+ /// \param [out] MemOffset2Idx - Maps the byte positions each load ends up
+ /// at to the index of the load.
+ /// \param [in] MemSizeInBits - The number of bits each load should produce.
+ ///
+ /// \returns The lowest-index load found and the lowest index on success.
+ Optional<std::pair<MachineInstr *, int64_t>> findLoadOffsetsForLoadOrCombine(
+ SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
+ const SmallVector<Register, 8> &RegsToVisit,
+ const unsigned MemSizeInBits);
};
} // namespace llvm
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h
index 0833e960fe..3f2f27e157 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelChangeObserver.h
@@ -58,7 +58,7 @@ public:
/// For convenience, finishedChangingAllUsesOfReg() will report the completion
/// of the changes. The use list may change between this call and
/// finishedChangingAllUsesOfReg().
- void changingAllUsesOfReg(const MachineRegisterInfo &MRI, Register Reg);
+ void changingAllUsesOfReg(const MachineRegisterInfo &MRI, Register Reg);
/// All instructions reported as changing by changingAllUsesOfReg() have
/// finished being changed.
void finishedChangingAllUsesOfReg();
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelKnownBits.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelKnownBits.h
index 452ddd17c0..87b74ea403 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelKnownBits.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/GISelKnownBits.h
@@ -20,7 +20,7 @@
#ifndef LLVM_CODEGEN_GLOBALISEL_KNOWNBITSINFO_H
#define LLVM_CODEGEN_GLOBALISEL_KNOWNBITSINFO_H
-#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/Register.h"
@@ -41,13 +41,13 @@ class GISelKnownBits : public GISelChangeObserver {
/// Cache maintained during a computeKnownBits request.
SmallDenseMap<Register, KnownBits, 16> ComputeKnownBitsCache;
- void computeKnownBitsMin(Register Src0, Register Src1, KnownBits &Known,
- const APInt &DemandedElts,
- unsigned Depth = 0);
-
- unsigned computeNumSignBitsMin(Register Src0, Register Src1,
- const APInt &DemandedElts, unsigned Depth = 0);
-
+ void computeKnownBitsMin(Register Src0, Register Src1, KnownBits &Known,
+ const APInt &DemandedElts,
+ unsigned Depth = 0);
+
+ unsigned computeNumSignBitsMin(Register Src0, Register Src1,
+ const APInt &DemandedElts, unsigned Depth = 0);
+
public:
GISelKnownBits(MachineFunction &MF, unsigned MaxDepth = 6);
virtual ~GISelKnownBits() = default;
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index 6c1ac8e115..ec913aa7b9 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -27,14 +27,14 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/CodeGen/FunctionLoweringInfo.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/SwiftErrorValueTracking.h"
#include "llvm/CodeGen/SwitchLoweringUtils.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/Allocator.h"
-#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/CodeGen.h"
#include <memory>
#include <utility>
@@ -45,7 +45,7 @@ class BasicBlock;
class CallInst;
class CallLowering;
class Constant;
-class ConstrainedFPIntrinsic;
+class ConstrainedFPIntrinsic;
class DataLayout;
class Instruction;
class MachineBasicBlock;
@@ -226,14 +226,14 @@ private:
/// Translate an LLVM string intrinsic (memcpy, memset, ...).
bool translateMemFunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
- unsigned Opcode);
+ unsigned Opcode);
void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder);
bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
MachineIRBuilder &MIRBuilder);
- bool translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
- MachineIRBuilder &MIRBuilder);
+ bool translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
+ MachineIRBuilder &MIRBuilder);
/// Helper function for translateSimpleIntrinsic.
/// \return The generic opcode for \p IntrinsicID if \p IntrinsicID is a
@@ -267,19 +267,19 @@ private:
/// \pre \p U is a call instruction.
bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
- /// When an invoke or a cleanupret unwinds to the next EH pad, there are
- /// many places it could ultimately go. In the IR, we have a single unwind
- /// destination, but in the machine CFG, we enumerate all the possible blocks.
- /// This function skips over imaginary basic blocks that hold catchswitch
- /// instructions, and finds all the "real" machine
- /// basic block destinations. As those destinations may not be successors of
- /// EHPadBB, here we also calculate the edge probability to those
- /// destinations. The passed-in Prob is the edge probability to EHPadBB.
- bool findUnwindDestinations(
- const BasicBlock *EHPadBB, BranchProbability Prob,
- SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
- &UnwindDests);
-
+ /// When an invoke or a cleanupret unwinds to the next EH pad, there are
+ /// many places it could ultimately go. In the IR, we have a single unwind
+ /// destination, but in the machine CFG, we enumerate all the possible blocks.
+ /// This function skips over imaginary basic blocks that hold catchswitch
+ /// instructions, and finds all the "real" machine
+ /// basic block destinations. As those destinations may not be successors of
+ /// EHPadBB, here we also calculate the edge probability to those
+ /// destinations. The passed-in Prob is the edge probability to EHPadBB.
+ bool findUnwindDestinations(
+ const BasicBlock *EHPadBB, BranchProbability Prob,
+ SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
+ &UnwindDests);
+
bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);
bool translateCallBr(const User &U, MachineIRBuilder &MIRBuilder);
@@ -311,37 +311,37 @@ private:
/// MachineBasicBlocks for the function have been created.
void finishPendingPhis();
- /// Translate \p Inst into a unary operation \p Opcode.
- /// \pre \p U is a unary operation.
- bool translateUnaryOp(unsigned Opcode, const User &U,
- MachineIRBuilder &MIRBuilder);
-
+ /// Translate \p Inst into a unary operation \p Opcode.
+ /// \pre \p U is a unary operation.
+ bool translateUnaryOp(unsigned Opcode, const User &U,
+ MachineIRBuilder &MIRBuilder);
+
/// Translate \p Inst into a binary operation \p Opcode.
/// \pre \p U is a binary operation.
bool translateBinaryOp(unsigned Opcode, const User &U,
MachineIRBuilder &MIRBuilder);
- /// If the set of cases should be emitted as a series of branches, return
- /// true. If we should emit this as a bunch of and/or'd together conditions,
- /// return false.
- bool shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases);
- /// Helper method for findMergedConditions.
- /// This function emits a branch and is used at the leaves of an OR or an
- /// AND operator tree.
- void emitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- MachineBasicBlock *CurBB,
- MachineBasicBlock *SwitchBB,
- BranchProbability TProb,
- BranchProbability FProb, bool InvertCond);
- /// Used during condbr translation to find trees of conditions that can be
- /// optimized.
- void findMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
- MachineBasicBlock *SwitchBB,
- Instruction::BinaryOps Opc, BranchProbability TProb,
- BranchProbability FProb, bool InvertCond);
-
+ /// If the set of cases should be emitted as a series of branches, return
+ /// true. If we should emit this as a bunch of and/or'd together conditions,
+ /// return false.
+ bool shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases);
+ /// Helper method for findMergedConditions.
+ /// This function emits a branch and is used at the leaves of an OR or an
+ /// AND operator tree.
+ void emitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ MachineBasicBlock *CurBB,
+ MachineBasicBlock *SwitchBB,
+ BranchProbability TProb,
+ BranchProbability FProb, bool InvertCond);
+ /// Used during condbr translation to find trees of conditions that can be
+ /// optimized.
+ void findMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
+ MachineBasicBlock *SwitchBB,
+ Instruction::BinaryOps Opc, BranchProbability TProb,
+ BranchProbability FProb, bool InvertCond);
+
/// Translate branch (br) instruction.
/// \pre \p U is a branch instruction.
bool translateBr(const User &U, MachineIRBuilder &MIRBuilder);
@@ -355,23 +355,23 @@ private:
void emitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB,
MachineIRBuilder &MIB);
- /// Generate for for the BitTest header block, which precedes each sequence of
- /// BitTestCases.
- void emitBitTestHeader(SwitchCG::BitTestBlock &BTB,
- MachineBasicBlock *SwitchMBB);
- /// Generate code to produces one "bit test" for a given BitTestCase \p B.
- void emitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB,
- BranchProbability BranchProbToNext, Register Reg,
- SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);
-
- bool lowerJumpTableWorkItem(
- SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
- MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
- MachineIRBuilder &MIB, MachineFunction::iterator BBI,
- BranchProbability UnhandledProbs, SwitchCG::CaseClusterIt I,
- MachineBasicBlock *Fallthrough, bool FallthroughUnreachable);
-
- bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, Value *Cond,
+ /// Generate for for the BitTest header block, which precedes each sequence of
+ /// BitTestCases.
+ void emitBitTestHeader(SwitchCG::BitTestBlock &BTB,
+ MachineBasicBlock *SwitchMBB);
+ /// Generate code to produces one "bit test" for a given BitTestCase \p B.
+ void emitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB,
+ BranchProbability BranchProbToNext, Register Reg,
+ SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);
+
+ bool lowerJumpTableWorkItem(
+ SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
+ MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
+ MachineIRBuilder &MIB, MachineFunction::iterator BBI,
+ BranchProbability UnhandledProbs, SwitchCG::CaseClusterIt I,
+ MachineBasicBlock *Fallthrough, bool FallthroughUnreachable);
+
+ bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, Value *Cond,
MachineBasicBlock *Fallthrough,
bool FallthroughUnreachable,
BranchProbability UnhandledProbs,
@@ -379,14 +379,14 @@ private:
MachineIRBuilder &MIB,
MachineBasicBlock *SwitchMBB);
- bool lowerBitTestWorkItem(
- SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
- MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
- MachineIRBuilder &MIB, MachineFunction::iterator BBI,
- BranchProbability DefaultProb, BranchProbability UnhandledProbs,
- SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
- bool FallthroughUnreachable);
-
+ bool lowerBitTestWorkItem(
+ SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
+ MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
+ MachineIRBuilder &MIB, MachineFunction::iterator BBI,
+ BranchProbability DefaultProb, BranchProbability UnhandledProbs,
+ SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
+ bool FallthroughUnreachable);
+
bool lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond,
MachineBasicBlock *SwitchMBB,
MachineBasicBlock *DefaultMBB,
@@ -497,9 +497,9 @@ private:
bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) {
return translateBinaryOp(TargetOpcode::G_FADD, U, MIRBuilder);
}
- bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
- return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
- }
+ bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
+ return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
+ }
bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) {
return translateBinaryOp(TargetOpcode::G_FMUL, U, MIRBuilder);
}
@@ -578,8 +578,8 @@ private:
/// Current target configuration. Controls how the pass handles errors.
const TargetPassConfig *TPC;
- CodeGenOpt::Level OptLevel;
-
+ CodeGenOpt::Level OptLevel;
+
/// Current optimization remark emitter. Used to report failures.
std::unique_ptr<OptimizationRemarkEmitter> ORE;
@@ -679,12 +679,12 @@ private:
BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
const MachineBasicBlock *Dst) const;
- void addSuccessorWithProb(
- MachineBasicBlock *Src, MachineBasicBlock *Dst,
- BranchProbability Prob = BranchProbability::getUnknown());
+ void addSuccessorWithProb(
+ MachineBasicBlock *Src, MachineBasicBlock *Dst,
+ BranchProbability Prob = BranchProbability::getUnknown());
public:
- IRTranslator(CodeGenOpt::Level OptLevel = CodeGenOpt::None);
+ IRTranslator(CodeGenOpt::Level OptLevel = CodeGenOpt::None);
StringRef getPassName() const override { return "IRTranslator"; }
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelector.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
index 61123ff85f..d61e273ce1 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
@@ -119,14 +119,14 @@ enum {
/// - InsnID - Instruction ID
/// - Expected opcode
GIM_CheckOpcode,
-
- /// Check the opcode on the specified instruction, checking 2 acceptable
- /// alternatives.
- /// - InsnID - Instruction ID
- /// - Expected opcode
- /// - Alternative expected opcode
- GIM_CheckOpcodeIsEither,
-
+
+ /// Check the opcode on the specified instruction, checking 2 acceptable
+ /// alternatives.
+ /// - InsnID - Instruction ID
+ /// - Expected opcode
+ /// - Alternative expected opcode
+ GIM_CheckOpcodeIsEither,
+
/// Check the instruction has the right number of operands
/// - InsnID - Instruction ID
/// - Expected number of operands
@@ -179,15 +179,15 @@ enum {
GIM_CheckMemorySizeEqualToLLT,
GIM_CheckMemorySizeLessThanLLT,
GIM_CheckMemorySizeGreaterThanLLT,
-
- /// Check if this is a vector that can be treated as a vector splat
- /// constant. This is valid for both G_BUILD_VECTOR as well as
- /// G_BUILD_VECTOR_TRUNC. For AllOnes refers to individual bits, so a -1
- /// element.
- /// - InsnID - Instruction ID
- GIM_CheckIsBuildVectorAllOnes,
- GIM_CheckIsBuildVectorAllZeros,
-
+
+ /// Check if this is a vector that can be treated as a vector splat
+ /// constant. This is valid for both G_BUILD_VECTOR as well as
+ /// G_BUILD_VECTOR_TRUNC. For AllOnes refers to individual bits, so a -1
+ /// element.
+ /// - InsnID - Instruction ID
+ GIM_CheckIsBuildVectorAllOnes,
+ GIM_CheckIsBuildVectorAllZeros,
+
/// Check a generic C++ instruction predicate
/// - InsnID - Instruction ID
/// - PredicateID - The ID of the predicate function to call
@@ -261,15 +261,15 @@ enum {
/// - OtherOpIdx - Other operand index
GIM_CheckIsSameOperand,
- /// Predicates with 'let PredicateCodeUsesOperands = 1' need to examine some
- /// named operands that will be recorded in RecordedOperands. Names of these
- /// operands are referenced in predicate argument list. Emitter determines
- /// StoreIdx(corresponds to the order in which names appear in argument list).
- /// - InsnID - Instruction ID
- /// - OpIdx - Operand index
- /// - StoreIdx - Store location in RecordedOperands.
- GIM_RecordNamedOperand,
-
+ /// Predicates with 'let PredicateCodeUsesOperands = 1' need to examine some
+ /// named operands that will be recorded in RecordedOperands. Names of these
+ /// operands are referenced in predicate argument list. Emitter determines
+ /// StoreIdx(corresponds to the order in which names appear in argument list).
+ /// - InsnID - Instruction ID
+ /// - OpIdx - Operand index
+ /// - StoreIdx - Store location in RecordedOperands.
+ GIM_RecordNamedOperand,
+
/// Fail the current try-block, or completely fail to match if there is no
/// current try-block.
GIM_Reject,
@@ -462,11 +462,11 @@ protected:
std::vector<ComplexRendererFns::value_type> Renderers;
RecordedMIVector MIs;
DenseMap<unsigned, unsigned> TempRegisters;
- /// Named operands that predicate with 'let PredicateCodeUsesOperands = 1'
- /// referenced in its argument list. Operands are inserted at index set by
- /// emitter, it corresponds to the order in which names appear in argument
- /// list. Currently such predicates don't have more then 3 arguments.
- std::array<const MachineOperand *, 3> RecordedOperands;
+ /// Named operands that predicate with 'let PredicateCodeUsesOperands = 1'
+ /// referenced in its argument list. Operands are inserted at index set by
+ /// emitter, it corresponds to the order in which names appear in argument
+ /// list. Currently such predicates don't have more then 3 arguments.
+ std::array<const MachineOperand *, 3> RecordedOperands;
MatcherState(unsigned MaxRenderers);
};
@@ -527,9 +527,9 @@ protected:
llvm_unreachable(
"Subclasses must override this with a tablegen-erated function");
}
- virtual bool testMIPredicate_MI(
- unsigned, const MachineInstr &,
- const std::array<const MachineOperand *, 3> &Operands) const {
+ virtual bool testMIPredicate_MI(
+ unsigned, const MachineInstr &,
+ const std::array<const MachineOperand *, 3> &Operands) const {
llvm_unreachable(
"Subclasses must override this with a tablegen-erated function");
}
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
index e3202fc976..b5885ff663 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
@@ -161,26 +161,26 @@ bool InstructionSelector::executeMatchTable(
break;
}
- case GIM_CheckOpcode:
- case GIM_CheckOpcodeIsEither: {
+ case GIM_CheckOpcode:
+ case GIM_CheckOpcodeIsEither: {
int64_t InsnID = MatchTable[CurrentIdx++];
- int64_t Expected0 = MatchTable[CurrentIdx++];
- int64_t Expected1 = -1;
- if (MatcherOpcode == GIM_CheckOpcodeIsEither)
- Expected1 = MatchTable[CurrentIdx++];
+ int64_t Expected0 = MatchTable[CurrentIdx++];
+ int64_t Expected1 = -1;
+ if (MatcherOpcode == GIM_CheckOpcodeIsEither)
+ Expected1 = MatchTable[CurrentIdx++];
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
unsigned Opcode = State.MIs[InsnID]->getOpcode();
DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckOpcode(MIs[" << InsnID
- << "], ExpectedOpcode=" << Expected0;
- if (MatcherOpcode == GIM_CheckOpcodeIsEither)
- dbgs() << " || " << Expected1;
- dbgs() << ") // Got=" << Opcode << "\n";
- );
-
- if (Opcode != Expected0 && Opcode != Expected1) {
+ dbgs() << CurrentIdx << ": GIM_CheckOpcode(MIs[" << InsnID
+ << "], ExpectedOpcode=" << Expected0;
+ if (MatcherOpcode == GIM_CheckOpcodeIsEither)
+ dbgs() << " || " << Expected1;
+ dbgs() << ") // Got=" << Opcode << "\n";
+ );
+
+ if (Opcode != Expected0 && Opcode != Expected1) {
if (handleReject() == RejectAndGiveUp)
return false;
}
@@ -207,7 +207,7 @@ bool InstructionSelector::executeMatchTable(
CurrentIdx = MatchTable[CurrentIdx + (Opcode - LowerBound)];
if (!CurrentIdx) {
CurrentIdx = Default;
- break;
+ break;
}
OnFailResumeAt.push_back(Default);
break;
@@ -335,35 +335,35 @@ bool InstructionSelector::executeMatchTable(
return false;
break;
}
- case GIM_CheckIsBuildVectorAllOnes:
- case GIM_CheckIsBuildVectorAllZeros: {
- int64_t InsnID = MatchTable[CurrentIdx++];
-
- DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
- dbgs() << CurrentIdx
- << ": GIM_CheckBuildVectorAll{Zeros|Ones}(MIs["
- << InsnID << "])\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
-
- const MachineInstr *MI = State.MIs[InsnID];
- assert((MI->getOpcode() == TargetOpcode::G_BUILD_VECTOR ||
- MI->getOpcode() == TargetOpcode::G_BUILD_VECTOR_TRUNC) &&
- "Expected G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC");
-
- if (MatcherOpcode == GIM_CheckIsBuildVectorAllOnes) {
- if (!isBuildVectorAllOnes(*MI, MRI)) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- } else {
- if (!isBuildVectorAllZeros(*MI, MRI)) {
- if (handleReject() == RejectAndGiveUp)
- return false;
- }
- }
-
- break;
- }
+ case GIM_CheckIsBuildVectorAllOnes:
+ case GIM_CheckIsBuildVectorAllZeros: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx
+ << ": GIM_CheckBuildVectorAll{Zeros|Ones}(MIs["
+ << InsnID << "])\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+
+ const MachineInstr *MI = State.MIs[InsnID];
+ assert((MI->getOpcode() == TargetOpcode::G_BUILD_VECTOR ||
+ MI->getOpcode() == TargetOpcode::G_BUILD_VECTOR_TRUNC) &&
+ "Expected G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC");
+
+ if (MatcherOpcode == GIM_CheckIsBuildVectorAllOnes) {
+ if (!isBuildVectorAllOnes(*MI, MRI)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ } else {
+ if (!isBuildVectorAllZeros(*MI, MRI)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
+ }
+
+ break;
+ }
case GIM_CheckCxxInsnPredicate: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t Predicate = MatchTable[CurrentIdx++];
@@ -374,8 +374,8 @@ bool InstructionSelector::executeMatchTable(
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
assert(Predicate > GIPFP_MI_Invalid && "Expected a valid predicate");
- if (!testMIPredicate_MI(Predicate, *State.MIs[InsnID],
- State.RecordedOperands))
+ if (!testMIPredicate_MI(Predicate, *State.MIs[InsnID],
+ State.RecordedOperands))
if (handleReject() == RejectAndGiveUp)
return false;
break;
@@ -625,20 +625,20 @@ bool InstructionSelector::executeMatchTable(
break;
}
- case GIM_RecordNamedOperand: {
- int64_t InsnID = MatchTable[CurrentIdx++];
- int64_t OpIdx = MatchTable[CurrentIdx++];
- uint64_t StoreIdx = MatchTable[CurrentIdx++];
-
- DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
- dbgs() << CurrentIdx << ": GIM_RecordNamedOperand(MIs["
- << InsnID << "]->getOperand(" << OpIdx
- << "), StoreIdx=" << StoreIdx << ")\n");
- assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- assert(StoreIdx < State.RecordedOperands.size() && "Index out of range");
- State.RecordedOperands[StoreIdx] = &State.MIs[InsnID]->getOperand(OpIdx);
- break;
- }
+ case GIM_RecordNamedOperand: {
+ int64_t InsnID = MatchTable[CurrentIdx++];
+ int64_t OpIdx = MatchTable[CurrentIdx++];
+ uint64_t StoreIdx = MatchTable[CurrentIdx++];
+
+ DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
+ dbgs() << CurrentIdx << ": GIM_RecordNamedOperand(MIs["
+ << InsnID << "]->getOperand(" << OpIdx
+ << "), StoreIdx=" << StoreIdx << ")\n");
+ assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
+ assert(StoreIdx < State.RecordedOperands.size() && "Index out of range");
+ State.RecordedOperands[StoreIdx] = &State.MIs[InsnID]->getOperand(OpIdx);
+ break;
+ }
case GIM_CheckRegBankForClass: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t OpIdx = MatchTable[CurrentIdx++];
@@ -1065,12 +1065,12 @@ bool InstructionSelector::executeMatchTable(
int64_t OpIdx = MatchTable[CurrentIdx++];
int64_t RCEnum = MatchTable[CurrentIdx++];
assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
- MachineInstr &I = *OutMIs[InsnID].getInstr();
- MachineFunction &MF = *I.getParent()->getParent();
- MachineRegisterInfo &MRI = MF.getRegInfo();
- const TargetRegisterClass &RC = *TRI.getRegClass(RCEnum);
- MachineOperand &MO = I.getOperand(OpIdx);
- constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, RC, MO);
+ MachineInstr &I = *OutMIs[InsnID].getInstr();
+ MachineFunction &MF = *I.getParent()->getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const TargetRegisterClass &RC = *TRI.getRegClass(RCEnum);
+ MachineOperand &MO = I.getOperand(OpIdx);
+ constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, RC, MO);
DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
dbgs() << CurrentIdx << ": GIR_ConstrainOperandRC(OutMIs["
<< InsnID << "], " << OpIdx << ", " << RCEnum
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
index 9bed6d039e..0d476efb2c 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h
@@ -112,23 +112,23 @@ public:
Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
// zext(trunc x) - > and (aext/copy/trunc x), mask
- // zext(sext x) -> and (sext x), mask
+ // zext(sext x) -> and (sext x), mask
Register TruncSrc;
- Register SextSrc;
- if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc))) ||
- mi_match(SrcReg, MRI, m_GSExt(m_Reg(SextSrc)))) {
+ Register SextSrc;
+ if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc))) ||
+ mi_match(SrcReg, MRI, m_GSExt(m_Reg(SextSrc)))) {
LLT DstTy = MRI.getType(DstReg);
if (isInstUnsupported({TargetOpcode::G_AND, {DstTy}}) ||
isConstantUnsupported(DstTy))
return false;
LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
LLT SrcTy = MRI.getType(SrcReg);
- APInt MaskVal = APInt::getAllOnesValue(SrcTy.getScalarSizeInBits());
- auto Mask = Builder.buildConstant(
- DstTy, MaskVal.zext(DstTy.getScalarSizeInBits()));
- auto Extended = SextSrc ? Builder.buildSExtOrTrunc(DstTy, SextSrc) :
- Builder.buildAnyExtOrTrunc(DstTy, TruncSrc);
- Builder.buildAnd(DstReg, Extended, Mask);
+ APInt MaskVal = APInt::getAllOnesValue(SrcTy.getScalarSizeInBits());
+ auto Mask = Builder.buildConstant(
+ DstTy, MaskVal.zext(DstTy.getScalarSizeInBits()));
+ auto Extended = SextSrc ? Builder.buildSExtOrTrunc(DstTy, SextSrc) :
+ Builder.buildAnyExtOrTrunc(DstTy, TruncSrc);
+ Builder.buildAnd(DstReg, Extended, Mask);
markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
return true;
}
@@ -493,7 +493,7 @@ public:
MachineRegisterInfo &MRI,
MachineIRBuilder &Builder,
SmallVectorImpl<Register> &UpdatedDefs,
- GISelChangeObserver &Observer) {
+ GISelChangeObserver &Observer) {
if (!llvm::canReplaceReg(DstReg, SrcReg, MRI)) {
Builder.buildCopy(DstReg, SrcReg);
UpdatedDefs.push_back(DstReg);
@@ -513,78 +513,78 @@ public:
Observer.changedInstr(*UseMI);
}
- /// Return the operand index in \p MI that defines \p Def
- static unsigned getDefIndex(const MachineInstr &MI, Register SearchDef) {
- unsigned DefIdx = 0;
- for (const MachineOperand &Def : MI.defs()) {
- if (Def.getReg() == SearchDef)
- break;
- ++DefIdx;
- }
-
- return DefIdx;
- }
-
- bool tryCombineUnmergeValues(MachineInstr &MI,
- SmallVectorImpl<MachineInstr *> &DeadInsts,
- SmallVectorImpl<Register> &UpdatedDefs,
- GISelChangeObserver &Observer) {
+ /// Return the operand index in \p MI that defines \p Def
+ static unsigned getDefIndex(const MachineInstr &MI, Register SearchDef) {
+ unsigned DefIdx = 0;
+ for (const MachineOperand &Def : MI.defs()) {
+ if (Def.getReg() == SearchDef)
+ break;
+ ++DefIdx;
+ }
+
+ return DefIdx;
+ }
+
+ bool tryCombineUnmergeValues(MachineInstr &MI,
+ SmallVectorImpl<MachineInstr *> &DeadInsts,
+ SmallVectorImpl<Register> &UpdatedDefs,
+ GISelChangeObserver &Observer) {
assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
unsigned NumDefs = MI.getNumOperands() - 1;
- Register SrcReg = MI.getOperand(NumDefs).getReg();
- MachineInstr *SrcDef = getDefIgnoringCopies(SrcReg, MRI);
+ Register SrcReg = MI.getOperand(NumDefs).getReg();
+ MachineInstr *SrcDef = getDefIgnoringCopies(SrcReg, MRI);
if (!SrcDef)
return false;
LLT OpTy = MRI.getType(MI.getOperand(NumDefs).getReg());
LLT DestTy = MRI.getType(MI.getOperand(0).getReg());
-
- if (SrcDef->getOpcode() == TargetOpcode::G_UNMERGE_VALUES) {
- // %0:_(<4 x s16>) = G_FOO
- // %1:_(<2 x s16>), %2:_(<2 x s16>) = G_UNMERGE_VALUES %0
- // %3:_(s16), %4:_(s16) = G_UNMERGE_VALUES %1
- //
- // %3:_(s16), %4:_(s16), %5:_(s16), %6:_(s16) = G_UNMERGE_VALUES %0
- const unsigned NumSrcOps = SrcDef->getNumOperands();
- Register SrcUnmergeSrc = SrcDef->getOperand(NumSrcOps - 1).getReg();
- LLT SrcUnmergeSrcTy = MRI.getType(SrcUnmergeSrc);
-
- // If we need to decrease the number of vector elements in the result type
- // of an unmerge, this would involve the creation of an equivalent unmerge
- // to copy back to the original result registers.
- LegalizeActionStep ActionStep = LI.getAction(
- {TargetOpcode::G_UNMERGE_VALUES, {OpTy, SrcUnmergeSrcTy}});
- switch (ActionStep.Action) {
- case LegalizeActions::Lower:
- case LegalizeActions::Unsupported:
- break;
- case LegalizeActions::FewerElements:
- case LegalizeActions::NarrowScalar:
- if (ActionStep.TypeIdx == 1)
- return false;
- break;
- default:
- return false;
- }
-
- Builder.setInstrAndDebugLoc(MI);
- auto NewUnmerge = Builder.buildUnmerge(DestTy, SrcUnmergeSrc);
-
- // TODO: Should we try to process out the other defs now? If the other
- // defs of the source unmerge are also unmerged, we end up with a separate
- // unmerge for each one.
- unsigned SrcDefIdx = getDefIndex(*SrcDef, SrcReg);
- for (unsigned I = 0; I != NumDefs; ++I) {
- Register Def = MI.getOperand(I).getReg();
- replaceRegOrBuildCopy(Def, NewUnmerge.getReg(SrcDefIdx * NumDefs + I),
- MRI, Builder, UpdatedDefs, Observer);
- }
-
- markInstAndDefDead(MI, *SrcDef, DeadInsts, SrcDefIdx);
- return true;
- }
-
+
+ if (SrcDef->getOpcode() == TargetOpcode::G_UNMERGE_VALUES) {
+ // %0:_(<4 x s16>) = G_FOO
+ // %1:_(<2 x s16>), %2:_(<2 x s16>) = G_UNMERGE_VALUES %0
+ // %3:_(s16), %4:_(s16) = G_UNMERGE_VALUES %1
+ //
+ // %3:_(s16), %4:_(s16), %5:_(s16), %6:_(s16) = G_UNMERGE_VALUES %0
+ const unsigned NumSrcOps = SrcDef->getNumOperands();
+ Register SrcUnmergeSrc = SrcDef->getOperand(NumSrcOps - 1).getReg();
+ LLT SrcUnmergeSrcTy = MRI.getType(SrcUnmergeSrc);
+
+ // If we need to decrease the number of vector elements in the result type
+ // of an unmerge, this would involve the creation of an equivalent unmerge
+ // to copy back to the original result registers.
+ LegalizeActionStep ActionStep = LI.getAction(
+ {TargetOpcode::G_UNMERGE_VALUES, {OpTy, SrcUnmergeSrcTy}});
+ switch (ActionStep.Action) {
+ case LegalizeActions::Lower:
+ case LegalizeActions::Unsupported:
+ break;
+ case LegalizeActions::FewerElements:
+ case LegalizeActions::NarrowScalar:
+ if (ActionStep.TypeIdx == 1)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ Builder.setInstrAndDebugLoc(MI);
+ auto NewUnmerge = Builder.buildUnmerge(DestTy, SrcUnmergeSrc);
+
+ // TODO: Should we try to process out the other defs now? If the other
+ // defs of the source unmerge are also unmerged, we end up with a separate
+ // unmerge for each one.
+ unsigned SrcDefIdx = getDefIndex(*SrcDef, SrcReg);
+ for (unsigned I = 0; I != NumDefs; ++I) {
+ Register Def = MI.getOperand(I).getReg();
+ replaceRegOrBuildCopy(Def, NewUnmerge.getReg(SrcDefIdx * NumDefs + I),
+ MRI, Builder, UpdatedDefs, Observer);
+ }
+
+ markInstAndDefDead(MI, *SrcDef, DeadInsts, SrcDefIdx);
+ return true;
+ }
+
MachineInstr *MergeI = SrcDef;
unsigned ConvertOp = 0;
@@ -812,12 +812,12 @@ public:
Changed = tryCombineSExt(MI, DeadInsts, UpdatedDefs);
break;
case TargetOpcode::G_UNMERGE_VALUES:
- Changed =
- tryCombineUnmergeValues(MI, DeadInsts, UpdatedDefs, WrapperObserver);
+ Changed =
+ tryCombineUnmergeValues(MI, DeadInsts, UpdatedDefs, WrapperObserver);
break;
case TargetOpcode::G_MERGE_VALUES:
- case TargetOpcode::G_BUILD_VECTOR:
- case TargetOpcode::G_CONCAT_VECTORS:
+ case TargetOpcode::G_BUILD_VECTOR:
+ case TargetOpcode::G_CONCAT_VECTORS:
// If any of the users of this merge are an unmerge, then add them to the
// artifact worklist in case there's folding that can be done looking up.
for (MachineInstr &U : MRI.use_instructions(MI.getOperand(0).getReg())) {
@@ -901,8 +901,8 @@ private:
/// dead.
/// MI is not marked dead.
void markDefDead(MachineInstr &MI, MachineInstr &DefMI,
- SmallVectorImpl<MachineInstr *> &DeadInsts,
- unsigned DefIdx = 0) {
+ SmallVectorImpl<MachineInstr *> &DeadInsts,
+ unsigned DefIdx = 0) {
// Collect all the copy instructions that are made dead, due to deleting
// this instruction. Collect all of them until the Trunc(DefMI).
// Eg,
@@ -929,27 +929,27 @@ private:
break;
PrevMI = TmpDef;
}
-
- if (PrevMI == &DefMI) {
- unsigned I = 0;
- bool IsDead = true;
- for (MachineOperand &Def : DefMI.defs()) {
- if (I != DefIdx) {
- if (!MRI.use_empty(Def.getReg())) {
- IsDead = false;
- break;
- }
- } else {
- if (!MRI.hasOneUse(DefMI.getOperand(DefIdx).getReg()))
- break;
- }
-
- ++I;
- }
-
- if (IsDead)
- DeadInsts.push_back(&DefMI);
- }
+
+ if (PrevMI == &DefMI) {
+ unsigned I = 0;
+ bool IsDead = true;
+ for (MachineOperand &Def : DefMI.defs()) {
+ if (I != DefIdx) {
+ if (!MRI.use_empty(Def.getReg())) {
+ IsDead = false;
+ break;
+ }
+ } else {
+ if (!MRI.hasOneUse(DefMI.getOperand(DefIdx).getReg()))
+ break;
+ }
+
+ ++I;
+ }
+
+ if (IsDead)
+ DeadInsts.push_back(&DefMI);
+ }
}
/// Mark MI as dead. If a def of one of MI's operands, DefMI, would also be
@@ -958,10 +958,10 @@ private:
/// copies in between the extends and the truncs, and this attempts to collect
/// the in between copies if they're dead.
void markInstAndDefDead(MachineInstr &MI, MachineInstr &DefMI,
- SmallVectorImpl<MachineInstr *> &DeadInsts,
- unsigned DefIdx = 0) {
+ SmallVectorImpl<MachineInstr *> &DeadInsts,
+ unsigned DefIdx = 0) {
DeadInsts.push_back(&MI);
- markDefDead(MI, DefMI, DeadInsts, DefIdx);
+ markDefDead(MI, DefMI, DeadInsts, DefIdx);
}
/// Erase the dead instructions in the list and call the observer hooks.
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index bf45c5e673..050df84035 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -39,7 +39,7 @@ class LegalizerInfo;
class Legalizer;
class MachineRegisterInfo;
class GISelChangeObserver;
-class TargetLowering;
+class TargetLowering;
class LegalizerHelper {
public:
@@ -53,7 +53,7 @@ public:
private:
MachineRegisterInfo &MRI;
const LegalizerInfo &LI;
- const TargetLowering &TLI;
+ const TargetLowering &TLI;
public:
enum LegalizeResult {
@@ -71,7 +71,7 @@ public:
/// Expose LegalizerInfo so the clients can re-use.
const LegalizerInfo &getLegalizerInfo() const { return LI; }
- const TargetLowering &getTargetLowering() const { return TLI; }
+ const TargetLowering &getTargetLowering() const { return TLI; }
LegalizerHelper(MachineFunction &MF, GISelChangeObserver &Observer,
MachineIRBuilder &B);
@@ -164,10 +164,10 @@ public:
/// def by inserting a G_BITCAST from \p CastTy
void bitcastDst(MachineInstr &MI, LLT CastTy, unsigned OpIdx);
- /// Widen \p OrigReg to \p WideTy by merging to a wider type, padding with
- /// G_IMPLICIT_DEF, and producing dead results.
- Register widenWithUnmerge(LLT WideTy, Register OrigReg);
-
+ /// Widen \p OrigReg to \p WideTy by merging to a wider type, padding with
+ /// G_IMPLICIT_DEF, and producing dead results.
+ Register widenWithUnmerge(LLT WideTy, Register OrigReg);
+
private:
LegalizeResult
widenScalarMergeValues(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
@@ -177,10 +177,10 @@ private:
widenScalarExtract(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
LegalizeResult
widenScalarInsert(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
- LegalizeResult widenScalarAddoSubo(MachineInstr &MI, unsigned TypeIdx,
- LLT WideTy);
- LegalizeResult widenScalarAddSubShlSat(MachineInstr &MI, unsigned TypeIdx,
- LLT WideTy);
+ LegalizeResult widenScalarAddoSubo(MachineInstr &MI, unsigned TypeIdx,
+ LLT WideTy);
+ LegalizeResult widenScalarAddSubShlSat(MachineInstr &MI, unsigned TypeIdx,
+ LLT WideTy);
/// Helper function to split a wide generic register into bitwise blocks with
/// the given Type (which implies the number of blocks needed). The generic
@@ -207,19 +207,19 @@ private:
LLT PartTy, ArrayRef<Register> PartRegs,
LLT LeftoverTy = LLT(), ArrayRef<Register> LeftoverRegs = {});
- /// Unmerge \p SrcReg into smaller sized values, and append them to \p
- /// Parts. The elements of \p Parts will be the greatest common divisor type
- /// of \p DstTy, \p NarrowTy and the type of \p SrcReg. This will compute and
- /// return the GCD type.
+ /// Unmerge \p SrcReg into smaller sized values, and append them to \p
+ /// Parts. The elements of \p Parts will be the greatest common divisor type
+ /// of \p DstTy, \p NarrowTy and the type of \p SrcReg. This will compute and
+ /// return the GCD type.
LLT extractGCDType(SmallVectorImpl<Register> &Parts, LLT DstTy,
LLT NarrowTy, Register SrcReg);
- /// Unmerge \p SrcReg into \p GCDTy typed registers. This will append all of
- /// the unpacked registers to \p Parts. This version is if the common unmerge
- /// type is already known.
- void extractGCDType(SmallVectorImpl<Register> &Parts, LLT GCDTy,
- Register SrcReg);
-
+ /// Unmerge \p SrcReg into \p GCDTy typed registers. This will append all of
+ /// the unpacked registers to \p Parts. This version is if the common unmerge
+ /// type is already known.
+ void extractGCDType(SmallVectorImpl<Register> &Parts, LLT GCDTy,
+ Register SrcReg);
+
/// Produce a merge of values in \p VRegs to define \p DstReg. Perform a merge
/// from the least common multiple type, and convert as appropriate to \p
/// DstReg.
@@ -252,23 +252,23 @@ private:
ArrayRef<Register> Src1Regs,
ArrayRef<Register> Src2Regs, LLT NarrowTy);
- void changeOpcode(MachineInstr &MI, unsigned NewOpcode);
-
+ void changeOpcode(MachineInstr &MI, unsigned NewOpcode);
+
public:
- /// Return the alignment to use for a stack temporary object with the given
- /// type.
- Align getStackTemporaryAlignment(LLT Type, Align MinAlign = Align()) const;
-
- /// Create a stack temporary based on the size in bytes and the alignment
- MachineInstrBuilder createStackTemporary(TypeSize Bytes, Align Alignment,
- MachinePointerInfo &PtrInfo);
-
- /// Get a pointer to vector element \p Index located in memory for a vector of
- /// type \p VecTy starting at a base address of \p VecPtr. If \p Index is out
- /// of bounds the returned pointer is unspecified, but will be within the
- /// vector bounds.
- Register getVectorElementPointer(Register VecPtr, LLT VecTy, Register Index);
-
+ /// Return the alignment to use for a stack temporary object with the given
+ /// type.
+ Align getStackTemporaryAlignment(LLT Type, Align MinAlign = Align()) const;
+
+ /// Create a stack temporary based on the size in bytes and the alignment
+ MachineInstrBuilder createStackTemporary(TypeSize Bytes, Align Alignment,
+ MachinePointerInfo &PtrInfo);
+
+ /// Get a pointer to vector element \p Index located in memory for a vector of
+ /// type \p VecTy starting at a base address of \p VecPtr. If \p Index is out
+ /// of bounds the returned pointer is unspecified, but will be within the
+ /// vector bounds.
+ Register getVectorElementPointer(Register VecPtr, LLT VecTy, Register Index);
+
LegalizeResult fewerElementsVectorImplicitDef(MachineInstr &MI,
unsigned TypeIdx, LLT NarrowTy);
@@ -296,11 +296,11 @@ public:
LegalizeResult fewerElementsVectorUnmergeValues(MachineInstr &MI,
unsigned TypeIdx,
LLT NarrowTy);
- LegalizeResult fewerElementsVectorMerge(MachineInstr &MI, unsigned TypeIdx,
- LLT NarrowTy);
- LegalizeResult fewerElementsVectorExtractInsertVectorElt(MachineInstr &MI,
- unsigned TypeIdx,
- LLT NarrowTy);
+ LegalizeResult fewerElementsVectorMerge(MachineInstr &MI, unsigned TypeIdx,
+ LLT NarrowTy);
+ LegalizeResult fewerElementsVectorExtractInsertVectorElt(MachineInstr &MI,
+ unsigned TypeIdx,
+ LLT NarrowTy);
LegalizeResult
reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy);
@@ -323,7 +323,7 @@ public:
LegalizeResult narrowScalarShift(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
LegalizeResult narrowScalarMul(MachineInstr &MI, LLT Ty);
- LegalizeResult narrowScalarFPTOI(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
+ LegalizeResult narrowScalarFPTOI(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
LegalizeResult narrowScalarExtract(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
LegalizeResult narrowScalarInsert(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
@@ -334,52 +334,52 @@ public:
LegalizeResult narrowScalarCTTZ(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
LegalizeResult narrowScalarCTPOP(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
- /// Perform Bitcast legalize action on G_EXTRACT_VECTOR_ELT.
- LegalizeResult bitcastExtractVectorElt(MachineInstr &MI, unsigned TypeIdx,
- LLT CastTy);
-
- /// Perform Bitcast legalize action on G_INSERT_VECTOR_ELT.
- LegalizeResult bitcastInsertVectorElt(MachineInstr &MI, unsigned TypeIdx,
- LLT CastTy);
-
+ /// Perform Bitcast legalize action on G_EXTRACT_VECTOR_ELT.
+ LegalizeResult bitcastExtractVectorElt(MachineInstr &MI, unsigned TypeIdx,
+ LLT CastTy);
+
+ /// Perform Bitcast legalize action on G_INSERT_VECTOR_ELT.
+ LegalizeResult bitcastInsertVectorElt(MachineInstr &MI, unsigned TypeIdx,
+ LLT CastTy);
+
LegalizeResult lowerBitcast(MachineInstr &MI);
- LegalizeResult lowerLoad(MachineInstr &MI);
- LegalizeResult lowerStore(MachineInstr &MI);
- LegalizeResult lowerBitCount(MachineInstr &MI);
+ LegalizeResult lowerLoad(MachineInstr &MI);
+ LegalizeResult lowerStore(MachineInstr &MI);
+ LegalizeResult lowerBitCount(MachineInstr &MI);
LegalizeResult lowerU64ToF32BitOps(MachineInstr &MI);
- LegalizeResult lowerUITOFP(MachineInstr &MI);
- LegalizeResult lowerSITOFP(MachineInstr &MI);
- LegalizeResult lowerFPTOUI(MachineInstr &MI);
+ LegalizeResult lowerUITOFP(MachineInstr &MI);
+ LegalizeResult lowerSITOFP(MachineInstr &MI);
+ LegalizeResult lowerFPTOUI(MachineInstr &MI);
LegalizeResult lowerFPTOSI(MachineInstr &MI);
LegalizeResult lowerFPTRUNC_F64_TO_F16(MachineInstr &MI);
- LegalizeResult lowerFPTRUNC(MachineInstr &MI);
- LegalizeResult lowerFPOWI(MachineInstr &MI);
+ LegalizeResult lowerFPTRUNC(MachineInstr &MI);
+ LegalizeResult lowerFPOWI(MachineInstr &MI);
- LegalizeResult lowerMinMax(MachineInstr &MI);
- LegalizeResult lowerFCopySign(MachineInstr &MI);
+ LegalizeResult lowerMinMax(MachineInstr &MI);
+ LegalizeResult lowerFCopySign(MachineInstr &MI);
LegalizeResult lowerFMinNumMaxNum(MachineInstr &MI);
LegalizeResult lowerFMad(MachineInstr &MI);
LegalizeResult lowerIntrinsicRound(MachineInstr &MI);
LegalizeResult lowerFFloor(MachineInstr &MI);
LegalizeResult lowerMergeValues(MachineInstr &MI);
LegalizeResult lowerUnmergeValues(MachineInstr &MI);
- LegalizeResult lowerExtractInsertVectorElt(MachineInstr &MI);
+ LegalizeResult lowerExtractInsertVectorElt(MachineInstr &MI);
LegalizeResult lowerShuffleVector(MachineInstr &MI);
LegalizeResult lowerDynStackAlloc(MachineInstr &MI);
LegalizeResult lowerExtract(MachineInstr &MI);
LegalizeResult lowerInsert(MachineInstr &MI);
LegalizeResult lowerSADDO_SSUBO(MachineInstr &MI);
- LegalizeResult lowerAddSubSatToMinMax(MachineInstr &MI);
- LegalizeResult lowerAddSubSatToAddoSubo(MachineInstr &MI);
- LegalizeResult lowerShlSat(MachineInstr &MI);
+ LegalizeResult lowerAddSubSatToMinMax(MachineInstr &MI);
+ LegalizeResult lowerAddSubSatToAddoSubo(MachineInstr &MI);
+ LegalizeResult lowerShlSat(MachineInstr &MI);
LegalizeResult lowerBswap(MachineInstr &MI);
LegalizeResult lowerBitreverse(MachineInstr &MI);
LegalizeResult lowerReadWriteRegister(MachineInstr &MI);
- LegalizeResult lowerSMULH_UMULH(MachineInstr &MI);
- LegalizeResult lowerSelect(MachineInstr &MI);
-
+ LegalizeResult lowerSMULH_UMULH(MachineInstr &MI);
+ LegalizeResult lowerSelect(MachineInstr &MI);
+
};
/// Helper function that creates a libcall to the given \p Name using the given
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
index 0ae41c1a8d..ff313f43c0 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
@@ -189,7 +189,7 @@ struct TypePairAndMemDesc {
MemSize == Other.MemSize;
}
- /// \returns true if this memory access is legal with for the access described
+ /// \returns true if this memory access is legal with for the access described
/// by \p Other (The alignment is sufficient for the size and result type).
bool isCompatible(const TypePairAndMemDesc &Other) const {
return Type0 == Other.Type0 && Type1 == Other.Type1 &&
@@ -224,19 +224,19 @@ Predicate any(Predicate P0, Predicate P1, Args... args) {
return any(any(P0, P1), args...);
}
-/// True iff the given type index is the specified type.
+/// True iff the given type index is the specified type.
LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit);
/// True iff the given type index is one of the specified types.
LegalityPredicate typeInSet(unsigned TypeIdx,
std::initializer_list<LLT> TypesInit);
-
-/// True iff the given type index is not the specified type.
-inline LegalityPredicate typeIsNot(unsigned TypeIdx, LLT Type) {
- return [=](const LegalityQuery &Query) {
- return Query.Types[TypeIdx] != Type;
- };
-}
-
+
+/// True iff the given type index is not the specified type.
+inline LegalityPredicate typeIsNot(unsigned TypeIdx, LLT Type) {
+ return [=](const LegalityQuery &Query) {
+ return Query.Types[TypeIdx] != Type;
+ };
+}
+
/// True iff the given types for the given pair of type indexes is one of the
/// specified type pairs.
LegalityPredicate
@@ -322,11 +322,11 @@ LegalizeMutation changeElementTo(unsigned TypeIdx, unsigned FromTypeIdx);
/// Keep the same scalar or element type as the given type.
LegalizeMutation changeElementTo(unsigned TypeIdx, LLT Ty);
-/// Change the scalar size or element size to have the same scalar size as type
-/// index \p FromIndex. Unlike changeElementTo, this discards pointer types and
-/// only changes the size.
-LegalizeMutation changeElementSizeTo(unsigned TypeIdx, unsigned FromTypeIdx);
-
+/// Change the scalar size or element size to have the same scalar size as type
+/// index \p FromIndex. Unlike changeElementTo, this discards pointer types and
+/// only changes the size.
+LegalizeMutation changeElementSizeTo(unsigned TypeIdx, unsigned FromTypeIdx);
+
/// Widen the scalar type or vector element type for the given type index to the
/// next power of 2.
LegalizeMutation widenScalarOrEltToNextPow2(unsigned TypeIdx, unsigned Min = 0);
@@ -635,7 +635,7 @@ public:
/// The instruction is lowered when type index 0 is any type in the given
/// list. Keep type index 0 as the same type.
LegalizeRuleSet &lowerFor(std::initializer_list<LLT> Types) {
- return actionFor(LegalizeAction::Lower, Types);
+ return actionFor(LegalizeAction::Lower, Types);
}
/// The instruction is lowered when type index 0 is any type in the given
/// list.
@@ -646,7 +646,7 @@ public:
/// The instruction is lowered when type indexes 0 and 1 is any type pair in
/// the given list. Keep type index 0 as the same type.
LegalizeRuleSet &lowerFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
- return actionFor(LegalizeAction::Lower, Types);
+ return actionFor(LegalizeAction::Lower, Types);
}
/// The instruction is lowered when type indexes 0 and 1 is any type pair in
/// the given list.
@@ -671,15 +671,15 @@ public:
Types2);
}
- /// The instruction is emitted as a library call.
- LegalizeRuleSet &libcall() {
- using namespace LegalizeMutations;
- // We have no choice but conservatively assume that predicate-less lowering
- // properly handles all type indices by design:
- markAllIdxsAsCovered();
- return actionIf(LegalizeAction::Libcall, always);
- }
-
+ /// The instruction is emitted as a library call.
+ LegalizeRuleSet &libcall() {
+ using namespace LegalizeMutations;
+ // We have no choice but conservatively assume that predicate-less lowering
+ // properly handles all type indices by design:
+ markAllIdxsAsCovered();
+ return actionIf(LegalizeAction::Libcall, always);
+ }
+
/// Like legalIf, but for the Libcall action.
LegalizeRuleSet &libcallIf(LegalityPredicate Predicate) {
// We have no choice but conservatively assume that a libcall with a
@@ -722,13 +722,13 @@ public:
markAllIdxsAsCovered();
return actionIf(LegalizeAction::NarrowScalar, Predicate, Mutation);
}
- /// Narrow the scalar, specified in mutation, when type indexes 0 and 1 is any
- /// type pair in the given list.
- LegalizeRuleSet &
- narrowScalarFor(std::initializer_list<std::pair<LLT, LLT>> Types,
- LegalizeMutation Mutation) {
- return actionFor(LegalizeAction::NarrowScalar, Types, Mutation);
- }
+ /// Narrow the scalar, specified in mutation, when type indexes 0 and 1 is any
+ /// type pair in the given list.
+ LegalizeRuleSet &
+ narrowScalarFor(std::initializer_list<std::pair<LLT, LLT>> Types,
+ LegalizeMutation Mutation) {
+ return actionFor(LegalizeAction::NarrowScalar, Types, Mutation);
+ }
/// Add more elements to reach the type selected by the mutation if the
/// predicate is true.
@@ -833,13 +833,13 @@ public:
LegalizeMutations::scalarize(TypeIdx));
}
- LegalizeRuleSet &scalarizeIf(LegalityPredicate Predicate, unsigned TypeIdx) {
- using namespace LegalityPredicates;
- return actionIf(LegalizeAction::FewerElements,
- all(Predicate, isVector(typeIdx(TypeIdx))),
- LegalizeMutations::scalarize(TypeIdx));
- }
-
+ LegalizeRuleSet &scalarizeIf(LegalityPredicate Predicate, unsigned TypeIdx) {
+ using namespace LegalityPredicates;
+ return actionIf(LegalizeAction::FewerElements,
+ all(Predicate, isVector(typeIdx(TypeIdx))),
+ LegalizeMutations::scalarize(TypeIdx));
+ }
+
/// Ensure the scalar or element is at least as wide as Ty.
LegalizeRuleSet &minScalarOrElt(unsigned TypeIdx, const LLT Ty) {
using namespace LegalityPredicates;
@@ -897,10 +897,10 @@ public:
return actionIf(
LegalizeAction::NarrowScalar,
[=](const LegalityQuery &Query) {
- const LLT QueryTy = Query.Types[TypeIdx];
- return QueryTy.isScalar() &&
- QueryTy.getSizeInBits() > Ty.getSizeInBits() &&
- Predicate(Query);
+ const LLT QueryTy = Query.Types[TypeIdx];
+ return QueryTy.isScalar() &&
+ QueryTy.getSizeInBits() > Ty.getSizeInBits() &&
+ Predicate(Query);
},
changeElementTo(typeIdx(TypeIdx), Ty));
}
@@ -926,27 +926,27 @@ public:
return Query.Types[LargeTypeIdx].getScalarSizeInBits() >
Query.Types[TypeIdx].getSizeInBits();
},
- LegalizeMutations::changeElementSizeTo(TypeIdx, LargeTypeIdx));
- }
-
- /// Narrow the scalar to match the size of another.
- LegalizeRuleSet &maxScalarSameAs(unsigned TypeIdx, unsigned NarrowTypeIdx) {
- typeIdx(TypeIdx);
- return narrowScalarIf(
+ LegalizeMutations::changeElementSizeTo(TypeIdx, LargeTypeIdx));
+ }
+
+ /// Narrow the scalar to match the size of another.
+ LegalizeRuleSet &maxScalarSameAs(unsigned TypeIdx, unsigned NarrowTypeIdx) {
+ typeIdx(TypeIdx);
+ return narrowScalarIf(
[=](const LegalityQuery &Query) {
- return Query.Types[NarrowTypeIdx].getScalarSizeInBits() <
- Query.Types[TypeIdx].getSizeInBits();
- },
- LegalizeMutations::changeElementSizeTo(TypeIdx, NarrowTypeIdx));
- }
-
- /// Change the type \p TypeIdx to have the same scalar size as type \p
- /// SameSizeIdx.
- LegalizeRuleSet &scalarSameSizeAs(unsigned TypeIdx, unsigned SameSizeIdx) {
- return minScalarSameAs(TypeIdx, SameSizeIdx)
- .maxScalarSameAs(TypeIdx, SameSizeIdx);
- }
-
+ return Query.Types[NarrowTypeIdx].getScalarSizeInBits() <
+ Query.Types[TypeIdx].getSizeInBits();
+ },
+ LegalizeMutations::changeElementSizeTo(TypeIdx, NarrowTypeIdx));
+ }
+
+ /// Change the type \p TypeIdx to have the same scalar size as type \p
+ /// SameSizeIdx.
+ LegalizeRuleSet &scalarSameSizeAs(unsigned TypeIdx, unsigned SameSizeIdx) {
+ return minScalarSameAs(TypeIdx, SameSizeIdx)
+ .maxScalarSameAs(TypeIdx, SameSizeIdx);
+ }
+
/// Conditionally widen the scalar or elt to match the size of another.
LegalizeRuleSet &minScalarEltSameAsIf(LegalityPredicate Predicate,
unsigned TypeIdx, unsigned LargeTypeIdx) {
@@ -1264,12 +1264,12 @@ public:
bool isLegal(const LegalityQuery &Query) const {
return getAction(Query).Action == LegalizeAction::Legal;
}
-
- bool isLegalOrCustom(const LegalityQuery &Query) const {
- auto Action = getAction(Query).Action;
- return Action == LegalizeAction::Legal || Action == LegalizeAction::Custom;
- }
-
+
+ bool isLegalOrCustom(const LegalityQuery &Query) const {
+ auto Action = getAction(Query).Action;
+ return Action == LegalizeAction::Legal || Action == LegalizeAction::Custom;
+ }
+
bool isLegal(const MachineInstr &MI, const MachineRegisterInfo &MRI) const;
bool isLegalOrCustom(const MachineInstr &MI,
const MachineRegisterInfo &MRI) const;
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Localizer.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Localizer.h
index d1716931e6..65f63dc8bc 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Localizer.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Localizer.h
@@ -71,11 +71,11 @@ private:
typedef SmallSetVector<MachineInstr *, 32> LocalizedSetVecT;
- /// If \p Op is a phi operand and not unique in that phi, that is,
- /// there are other operands in the phi with the same register,
- /// return true.
- bool isNonUniquePhiValue(MachineOperand &Op) const;
-
+ /// If \p Op is a phi operand and not unique in that phi, that is,
+ /// there are other operands in the phi with the same register,
+ /// return true.
+ bool isNonUniquePhiValue(MachineOperand &Op) const;
+
/// Do inter-block localization from the entry block.
bool localizeInterBlock(MachineFunction &MF,
LocalizedSetVecT &LocalizedInstrs);
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
index 223f61ccc5..13085d3d44 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MIPatternMatch.h
@@ -46,25 +46,25 @@ inline OneUse_match<SubPat> m_OneUse(const SubPat &SP) {
return SP;
}
-template <typename SubPatternT> struct OneNonDBGUse_match {
- SubPatternT SubPat;
- OneNonDBGUse_match(const SubPatternT &SP) : SubPat(SP) {}
-
- bool match(const MachineRegisterInfo &MRI, Register Reg) {
- return MRI.hasOneNonDBGUse(Reg) && SubPat.match(MRI, Reg);
- }
-};
-
-template <typename SubPat>
-inline OneNonDBGUse_match<SubPat> m_OneNonDBGUse(const SubPat &SP) {
- return SP;
-}
-
+template <typename SubPatternT> struct OneNonDBGUse_match {
+ SubPatternT SubPat;
+ OneNonDBGUse_match(const SubPatternT &SP) : SubPat(SP) {}
+
+ bool match(const MachineRegisterInfo &MRI, Register Reg) {
+ return MRI.hasOneNonDBGUse(Reg) && SubPat.match(MRI, Reg);
+ }
+};
+
+template <typename SubPat>
+inline OneNonDBGUse_match<SubPat> m_OneNonDBGUse(const SubPat &SP) {
+ return SP;
+}
+
struct ConstantMatch {
int64_t &CR;
ConstantMatch(int64_t &C) : CR(C) {}
bool match(const MachineRegisterInfo &MRI, Register Reg) {
- if (auto MaybeCst = getConstantVRegSExtVal(Reg, MRI)) {
+ if (auto MaybeCst = getConstantVRegSExtVal(Reg, MRI)) {
CR = *MaybeCst;
return true;
}
@@ -74,29 +74,29 @@ struct ConstantMatch {
inline ConstantMatch m_ICst(int64_t &Cst) { return ConstantMatch(Cst); }
-/// Matcher for a specific constant value.
-struct SpecificConstantMatch {
- int64_t RequestedVal;
- SpecificConstantMatch(int64_t RequestedVal) : RequestedVal(RequestedVal) {}
- bool match(const MachineRegisterInfo &MRI, Register Reg) {
- int64_t MatchedVal;
- return mi_match(Reg, MRI, m_ICst(MatchedVal)) && MatchedVal == RequestedVal;
- }
-};
-
-/// Matches a constant equal to \p RequestedValue.
-inline SpecificConstantMatch m_SpecificICst(int64_t RequestedValue) {
- return SpecificConstantMatch(RequestedValue);
-}
-
-///{
-/// Convenience matchers for specific integer values.
-inline SpecificConstantMatch m_ZeroInt() { return SpecificConstantMatch(0); }
-inline SpecificConstantMatch m_AllOnesInt() {
- return SpecificConstantMatch(-1);
-}
-///}
-
+/// Matcher for a specific constant value.
+struct SpecificConstantMatch {
+ int64_t RequestedVal;
+ SpecificConstantMatch(int64_t RequestedVal) : RequestedVal(RequestedVal) {}
+ bool match(const MachineRegisterInfo &MRI, Register Reg) {
+ int64_t MatchedVal;
+ return mi_match(Reg, MRI, m_ICst(MatchedVal)) && MatchedVal == RequestedVal;
+ }
+};
+
+/// Matches a constant equal to \p RequestedValue.
+inline SpecificConstantMatch m_SpecificICst(int64_t RequestedValue) {
+ return SpecificConstantMatch(RequestedValue);
+}
+
+///{
+/// Convenience matchers for specific integer values.
+inline SpecificConstantMatch m_ZeroInt() { return SpecificConstantMatch(0); }
+inline SpecificConstantMatch m_AllOnesInt() {
+ return SpecificConstantMatch(-1);
+}
+///}
+
// TODO: Rework this for different kinds of MachineOperand.
// Currently assumes the Src for a match is a register.
// We might want to support taking in some MachineOperands and call getReg on
@@ -242,12 +242,12 @@ m_GAdd(const LHS &L, const RHS &R) {
}
template <typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, TargetOpcode::G_PTR_ADD, true>
-m_GPtrAdd(const LHS &L, const RHS &R) {
- return BinaryOp_match<LHS, RHS, TargetOpcode::G_PTR_ADD, true>(L, R);
-}
-
-template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_PTR_ADD, true>
+m_GPtrAdd(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, TargetOpcode::G_PTR_ADD, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_SUB> m_GSub(const LHS &L,
const RHS &R) {
return BinaryOp_match<LHS, RHS, TargetOpcode::G_SUB>(L, R);
@@ -284,12 +284,12 @@ m_GAnd(const LHS &L, const RHS &R) {
}
template <typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, TargetOpcode::G_XOR, true>
-m_GXor(const LHS &L, const RHS &R) {
- return BinaryOp_match<LHS, RHS, TargetOpcode::G_XOR, true>(L, R);
-}
-
-template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_XOR, true>
+m_GXor(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, TargetOpcode::G_XOR, true>(L, R);
+}
+
+template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_OR, true> m_GOr(const LHS &L,
const RHS &R) {
return BinaryOp_match<LHS, RHS, TargetOpcode::G_OR, true>(L, R);
@@ -307,12 +307,12 @@ m_GLShr(const LHS &L, const RHS &R) {
return BinaryOp_match<LHS, RHS, TargetOpcode::G_LSHR, false>(L, R);
}
-template <typename LHS, typename RHS>
-inline BinaryOp_match<LHS, RHS, TargetOpcode::G_ASHR, false>
-m_GAShr(const LHS &L, const RHS &R) {
- return BinaryOp_match<LHS, RHS, TargetOpcode::G_ASHR, false>(L, R);
-}
-
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, TargetOpcode::G_ASHR, false>
+m_GAShr(const LHS &L, const RHS &R) {
+ return BinaryOp_match<LHS, RHS, TargetOpcode::G_ASHR, false>(L, R);
+}
+
// Helper for unary instructions (G_[ZSA]EXT/G_TRUNC) etc
template <typename SrcTy, unsigned Opcode> struct UnaryOp_match {
SrcTy L;
@@ -446,51 +446,51 @@ struct CheckType {
inline CheckType m_SpecificType(LLT Ty) { return Ty; }
-template <typename Src0Ty, typename Src1Ty, typename Src2Ty, unsigned Opcode>
-struct TernaryOp_match {
- Src0Ty Src0;
- Src1Ty Src1;
- Src2Ty Src2;
-
- TernaryOp_match(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2)
- : Src0(Src0), Src1(Src1), Src2(Src2) {}
- template <typename OpTy>
- bool match(const MachineRegisterInfo &MRI, OpTy &&Op) {
- MachineInstr *TmpMI;
- if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
- if (TmpMI->getOpcode() == Opcode && TmpMI->getNumOperands() == 4) {
- return (Src0.match(MRI, TmpMI->getOperand(1).getReg()) &&
- Src1.match(MRI, TmpMI->getOperand(2).getReg()) &&
- Src2.match(MRI, TmpMI->getOperand(3).getReg()));
- }
- }
- return false;
- }
-};
-template <typename Src0Ty, typename Src1Ty, typename Src2Ty>
-inline TernaryOp_match<Src0Ty, Src1Ty, Src2Ty,
- TargetOpcode::G_INSERT_VECTOR_ELT>
-m_GInsertVecElt(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2) {
- return TernaryOp_match<Src0Ty, Src1Ty, Src2Ty,
- TargetOpcode::G_INSERT_VECTOR_ELT>(Src0, Src1, Src2);
-}
-
-/// Matches a register negated by a G_SUB.
-/// G_SUB 0, %negated_reg
-template <typename SrcTy>
-inline BinaryOp_match<SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB>
-m_Neg(const SrcTy &&Src) {
- return m_GSub(m_ZeroInt(), Src);
-}
-
-/// Matches a register not-ed by a G_XOR.
-/// G_XOR %not_reg, -1
-template <typename SrcTy>
-inline BinaryOp_match<SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true>
-m_Not(const SrcTy &&Src) {
- return m_GXor(Src, m_AllOnesInt());
-}
-
+template <typename Src0Ty, typename Src1Ty, typename Src2Ty, unsigned Opcode>
+struct TernaryOp_match {
+ Src0Ty Src0;
+ Src1Ty Src1;
+ Src2Ty Src2;
+
+ TernaryOp_match(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2)
+ : Src0(Src0), Src1(Src1), Src2(Src2) {}
+ template <typename OpTy>
+ bool match(const MachineRegisterInfo &MRI, OpTy &&Op) {
+ MachineInstr *TmpMI;
+ if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
+ if (TmpMI->getOpcode() == Opcode && TmpMI->getNumOperands() == 4) {
+ return (Src0.match(MRI, TmpMI->getOperand(1).getReg()) &&
+ Src1.match(MRI, TmpMI->getOperand(2).getReg()) &&
+ Src2.match(MRI, TmpMI->getOperand(3).getReg()));
+ }
+ }
+ return false;
+ }
+};
+template <typename Src0Ty, typename Src1Ty, typename Src2Ty>
+inline TernaryOp_match<Src0Ty, Src1Ty, Src2Ty,
+ TargetOpcode::G_INSERT_VECTOR_ELT>
+m_GInsertVecElt(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2) {
+ return TernaryOp_match<Src0Ty, Src1Ty, Src2Ty,
+ TargetOpcode::G_INSERT_VECTOR_ELT>(Src0, Src1, Src2);
+}
+
+/// Matches a register negated by a G_SUB.
+/// G_SUB 0, %negated_reg
+template <typename SrcTy>
+inline BinaryOp_match<SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB>
+m_Neg(const SrcTy &&Src) {
+ return m_GSub(m_ZeroInt(), Src);
+}
+
+/// Matches a register not-ed by a G_XOR.
+/// G_XOR %not_reg, -1
+template <typename SrcTy>
+inline BinaryOp_match<SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true>
+m_Not(const SrcTy &&Src) {
+ return m_GXor(Src, m_AllOnesInt());
+}
+
} // namespace GMIPatternMatch
} // namespace llvm
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index 6e3f7cdc26..324d80e16c 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -25,10 +25,10 @@
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugLoc.h"
-#include "llvm/IR/Module.h"
+#include "llvm/IR/Module.h"
namespace llvm {
@@ -231,7 +231,7 @@ class MachineIRBuilder {
protected:
void validateTruncExt(const LLT Dst, const LLT Src, bool IsExtend);
- void validateUnaryOp(const LLT Res, const LLT Op0);
+ void validateUnaryOp(const LLT Res, const LLT Op0);
void validateBinaryOp(const LLT Res, const LLT Op0, const LLT Op1);
void validateShiftOp(const LLT Res, const LLT Op0, const LLT Op1);
@@ -259,11 +259,11 @@ public:
setDebugLoc(MI.getDebugLoc());
}
- MachineIRBuilder(MachineInstr &MI, GISelChangeObserver &Observer) :
- MachineIRBuilder(MI) {
- setChangeObserver(Observer);
- }
-
+ MachineIRBuilder(MachineInstr &MI, GISelChangeObserver &Observer) :
+ MachineIRBuilder(MI) {
+ setChangeObserver(Observer);
+ }
+
virtual ~MachineIRBuilder() = default;
MachineIRBuilder(const MachineIRBuilderState &BState) : State(BState) {}
@@ -743,7 +743,7 @@ public:
/// depend on bit 0 (for now).
///
/// \return The newly created instruction.
- MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest);
+ MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest);
/// Build and insert G_BRINDIRECT \p Tgt
///
@@ -827,18 +827,18 @@ public:
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr,
- MachineMemOperand &MMO) {
- return buildLoadInstr(TargetOpcode::G_LOAD, Res, Addr, MMO);
- }
-
- /// Build and insert a G_LOAD instruction, while constructing the
- /// MachineMemOperand.
- MachineInstrBuilder
- buildLoad(const DstOp &Res, const SrcOp &Addr, MachinePointerInfo PtrInfo,
- Align Alignment,
- MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
- const AAMDNodes &AAInfo = AAMDNodes());
-
+ MachineMemOperand &MMO) {
+ return buildLoadInstr(TargetOpcode::G_LOAD, Res, Addr, MMO);
+ }
+
+ /// Build and insert a G_LOAD instruction, while constructing the
+ /// MachineMemOperand.
+ MachineInstrBuilder
+ buildLoad(const DstOp &Res, const SrcOp &Addr, MachinePointerInfo PtrInfo,
+ Align Alignment,
+ MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
+ const AAMDNodes &AAInfo = AAMDNodes());
+
/// Build and insert `Res = <opcode> Addr, MMO`.
///
/// Loads the value stored at \p Addr. Puts the result in \p Res.
@@ -871,14 +871,14 @@ public:
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr,
MachineMemOperand &MMO);
- /// Build and insert a G_STORE instruction, while constructing the
- /// MachineMemOperand.
- MachineInstrBuilder
- buildStore(const SrcOp &Val, const SrcOp &Addr, MachinePointerInfo PtrInfo,
- Align Alignment,
- MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
- const AAMDNodes &AAInfo = AAMDNodes());
-
+ /// Build and insert a G_STORE instruction, while constructing the
+ /// MachineMemOperand.
+ MachineInstrBuilder
+ buildStore(const SrcOp &Val, const SrcOp &Addr, MachinePointerInfo PtrInfo,
+ Align Alignment,
+ MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
+ const AAMDNodes &AAInfo = AAMDNodes());
+
/// Build and insert `Res0, ... = G_EXTRACT Src, Idx0`.
///
/// \pre setBasicBlock or setMI must have been called.
@@ -970,23 +970,23 @@ public:
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res,
ArrayRef<Register> Ops);
- /// Build and insert a vector splat of a scalar \p Src using a
- /// G_INSERT_VECTOR_ELT and G_SHUFFLE_VECTOR idiom.
- ///
- /// \pre setBasicBlock or setMI must have been called.
- /// \pre \p Src must have the same type as the element type of \p Dst
- ///
- /// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildShuffleSplat(const DstOp &Res, const SrcOp &Src);
-
- /// Build and insert \p Res = G_SHUFFLE_VECTOR \p Src1, \p Src2, \p Mask
- ///
- /// \pre setBasicBlock or setMI must have been called.
- ///
- /// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1,
- const SrcOp &Src2, ArrayRef<int> Mask);
-
+ /// Build and insert a vector splat of a scalar \p Src using a
+ /// G_INSERT_VECTOR_ELT and G_SHUFFLE_VECTOR idiom.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Src must have the same type as the element type of \p Dst
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildShuffleSplat(const DstOp &Res, const SrcOp &Src);
+
+ /// Build and insert \p Res = G_SHUFFLE_VECTOR \p Src1, \p Src2, \p Mask
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1,
+ const SrcOp &Src2, ArrayRef<int> Mask);
+
/// Build and insert \p Res = G_CONCAT_VECTORS \p Op0, ...
///
/// G_CONCAT_VECTORS creates a vector from the concatenation of 2 or more
@@ -1570,13 +1570,13 @@ public:
return buildInstr(TargetOpcode::G_FSUB, {Dst}, {Src0, Src1}, Flags);
}
- /// Build and insert \p Res = G_FDIV \p Op0, \p Op1
- MachineInstrBuilder buildFDiv(const DstOp &Dst, const SrcOp &Src0,
- const SrcOp &Src1,
- Optional<unsigned> Flags = None) {
- return buildInstr(TargetOpcode::G_FDIV, {Dst}, {Src0, Src1}, Flags);
- }
-
+ /// Build and insert \p Res = G_FDIV \p Op0, \p Op1
+ MachineInstrBuilder buildFDiv(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FDIV, {Dst}, {Src0, Src1}, Flags);
+ }
+
/// Build and insert \p Res = G_FMA \p Op0, \p Op1, \p Op2
MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1, const SrcOp &Src2,
@@ -1639,13 +1639,13 @@ public:
return buildInstr(TargetOpcode::G_FEXP2, {Dst}, {Src}, Flags);
}
- /// Build and insert \p Dst = G_FPOW \p Src0, \p Src1
- MachineInstrBuilder buildFPow(const DstOp &Dst, const SrcOp &Src0,
- const SrcOp &Src1,
- Optional<unsigned> Flags = None) {
- return buildInstr(TargetOpcode::G_FPOW, {Dst}, {Src0, Src1}, Flags);
- }
-
+ /// Build and insert \p Dst = G_FPOW \p Src0, \p Src1
+ MachineInstrBuilder buildFPow(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_FPOW, {Dst}, {Src0, Src1}, Flags);
+ }
+
/// Build and insert \p Res = G_FCOPYSIGN \p Op0, \p Op1
MachineInstrBuilder buildFCopysign(const DstOp &Dst, const SrcOp &Src0,
const SrcOp &Src1) {
@@ -1696,11 +1696,11 @@ public:
return buildInstr(TargetOpcode::G_UMAX, {Dst}, {Src0, Src1});
}
- /// Build and insert \p Dst = G_ABS \p Src
- MachineInstrBuilder buildAbs(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_ABS, {Dst}, {Src});
- }
-
+ /// Build and insert \p Dst = G_ABS \p Src
+ MachineInstrBuilder buildAbs(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_ABS, {Dst}, {Src});
+ }
+
/// Build and insert \p Res = G_JUMP_TABLE \p JTI
///
/// G_JUMP_TABLE sets \p Res to the address of the jump table specified by
@@ -1709,101 +1709,101 @@ public:
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI);
- /// Build and insert \p Res = G_VECREDUCE_SEQ_FADD \p ScalarIn, \p VecIn
- ///
- /// \p ScalarIn is the scalar accumulator input to start the sequential
- /// reduction operation of \p VecIn.
- MachineInstrBuilder buildVecReduceSeqFAdd(const DstOp &Dst,
- const SrcOp &ScalarIn,
- const SrcOp &VecIn) {
- return buildInstr(TargetOpcode::G_VECREDUCE_SEQ_FADD, {Dst},
- {ScalarIn, {VecIn}});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_SEQ_FMUL \p ScalarIn, \p VecIn
- ///
- /// \p ScalarIn is the scalar accumulator input to start the sequential
- /// reduction operation of \p VecIn.
- MachineInstrBuilder buildVecReduceSeqFMul(const DstOp &Dst,
- const SrcOp &ScalarIn,
- const SrcOp &VecIn) {
- return buildInstr(TargetOpcode::G_VECREDUCE_SEQ_FMUL, {Dst},
- {ScalarIn, {VecIn}});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_FADD \p Src
- ///
- /// \p ScalarIn is the scalar accumulator input to the reduction operation of
- /// \p VecIn.
- MachineInstrBuilder buildVecReduceFAdd(const DstOp &Dst,
- const SrcOp &ScalarIn,
- const SrcOp &VecIn) {
- return buildInstr(TargetOpcode::G_VECREDUCE_FADD, {Dst}, {ScalarIn, VecIn});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_FMUL \p Src
- ///
- /// \p ScalarIn is the scalar accumulator input to the reduction operation of
- /// \p VecIn.
- MachineInstrBuilder buildVecReduceFMul(const DstOp &Dst,
- const SrcOp &ScalarIn,
- const SrcOp &VecIn) {
- return buildInstr(TargetOpcode::G_VECREDUCE_FMUL, {Dst}, {ScalarIn, VecIn});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_FMAX \p Src
- MachineInstrBuilder buildVecReduceFMax(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_VECREDUCE_FMAX, {Dst}, {Src});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_FMIN \p Src
- MachineInstrBuilder buildVecReduceFMin(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_VECREDUCE_FMIN, {Dst}, {Src});
- }
- /// Build and insert \p Res = G_VECREDUCE_ADD \p Src
- MachineInstrBuilder buildVecReduceAdd(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_VECREDUCE_ADD, {Dst}, {Src});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_MUL \p Src
- MachineInstrBuilder buildVecReduceMul(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_VECREDUCE_MUL, {Dst}, {Src});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_AND \p Src
- MachineInstrBuilder buildVecReduceAnd(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_VECREDUCE_AND, {Dst}, {Src});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_OR \p Src
- MachineInstrBuilder buildVecReduceOr(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_VECREDUCE_OR, {Dst}, {Src});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_XOR \p Src
- MachineInstrBuilder buildVecReduceXor(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_VECREDUCE_XOR, {Dst}, {Src});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_SMAX \p Src
- MachineInstrBuilder buildVecReduceSMax(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_VECREDUCE_SMAX, {Dst}, {Src});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_SMIN \p Src
- MachineInstrBuilder buildVecReduceSMin(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_VECREDUCE_SMIN, {Dst}, {Src});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_UMAX \p Src
- MachineInstrBuilder buildVecReduceUMax(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_VECREDUCE_UMAX, {Dst}, {Src});
- }
-
- /// Build and insert \p Res = G_VECREDUCE_UMIN \p Src
- MachineInstrBuilder buildVecReduceUMin(const DstOp &Dst, const SrcOp &Src) {
- return buildInstr(TargetOpcode::G_VECREDUCE_UMIN, {Dst}, {Src});
- }
+ /// Build and insert \p Res = G_VECREDUCE_SEQ_FADD \p ScalarIn, \p VecIn
+ ///
+ /// \p ScalarIn is the scalar accumulator input to start the sequential
+ /// reduction operation of \p VecIn.
+ MachineInstrBuilder buildVecReduceSeqFAdd(const DstOp &Dst,
+ const SrcOp &ScalarIn,
+ const SrcOp &VecIn) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_SEQ_FADD, {Dst},
+ {ScalarIn, {VecIn}});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_SEQ_FMUL \p ScalarIn, \p VecIn
+ ///
+ /// \p ScalarIn is the scalar accumulator input to start the sequential
+ /// reduction operation of \p VecIn.
+ MachineInstrBuilder buildVecReduceSeqFMul(const DstOp &Dst,
+ const SrcOp &ScalarIn,
+ const SrcOp &VecIn) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_SEQ_FMUL, {Dst},
+ {ScalarIn, {VecIn}});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_FADD \p Src
+ ///
+ /// \p ScalarIn is the scalar accumulator input to the reduction operation of
+ /// \p VecIn.
+ MachineInstrBuilder buildVecReduceFAdd(const DstOp &Dst,
+ const SrcOp &ScalarIn,
+ const SrcOp &VecIn) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_FADD, {Dst}, {ScalarIn, VecIn});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_FMUL \p Src
+ ///
+ /// \p ScalarIn is the scalar accumulator input to the reduction operation of
+ /// \p VecIn.
+ MachineInstrBuilder buildVecReduceFMul(const DstOp &Dst,
+ const SrcOp &ScalarIn,
+ const SrcOp &VecIn) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_FMUL, {Dst}, {ScalarIn, VecIn});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_FMAX \p Src
+ MachineInstrBuilder buildVecReduceFMax(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_FMAX, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_FMIN \p Src
+ MachineInstrBuilder buildVecReduceFMin(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_FMIN, {Dst}, {Src});
+ }
+ /// Build and insert \p Res = G_VECREDUCE_ADD \p Src
+ MachineInstrBuilder buildVecReduceAdd(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_ADD, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_MUL \p Src
+ MachineInstrBuilder buildVecReduceMul(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_MUL, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_AND \p Src
+ MachineInstrBuilder buildVecReduceAnd(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_AND, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_OR \p Src
+ MachineInstrBuilder buildVecReduceOr(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_OR, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_XOR \p Src
+ MachineInstrBuilder buildVecReduceXor(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_XOR, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_SMAX \p Src
+ MachineInstrBuilder buildVecReduceSMax(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_SMAX, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_SMIN \p Src
+ MachineInstrBuilder buildVecReduceSMin(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_SMIN, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_UMAX \p Src
+ MachineInstrBuilder buildVecReduceUMax(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_UMAX, {Dst}, {Src});
+ }
+
+ /// Build and insert \p Res = G_VECREDUCE_UMIN \p Src
+ MachineInstrBuilder buildVecReduceUMin(const DstOp &Dst, const SrcOp &Src) {
+ return buildInstr(TargetOpcode::G_VECREDUCE_UMIN, {Dst}, {Src});
+ }
virtual MachineInstrBuilder buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
ArrayRef<SrcOp> SrcOps,
Optional<unsigned> Flags = None);
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
index 0dbd1ecffe..baff41e3c3 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/RegisterBankInfo.h
@@ -111,37 +111,37 @@ public:
/// Currently the TableGen-like file would look like:
/// \code
/// PartialMapping[] = {
- /// /*32-bit add*/ {0, 32, GPR}, // Scalar entry repeated for first
- /// // vec elt.
- /// /*2x32-bit add*/ {0, 32, GPR}, {32, 32, GPR},
- /// /*<2x32-bit> vadd*/ {0, 64, VPR}
+ /// /*32-bit add*/ {0, 32, GPR}, // Scalar entry repeated for first
+ /// // vec elt.
+ /// /*2x32-bit add*/ {0, 32, GPR}, {32, 32, GPR},
+ /// /*<2x32-bit> vadd*/ {0, 64, VPR}
/// }; // PartialMapping duplicated.
///
/// ValueMapping[] {
- /// /*plain 32-bit add*/ {&PartialMapping[0], 1},
+ /// /*plain 32-bit add*/ {&PartialMapping[0], 1},
/// /*expanded vadd on 2xadd*/ {&PartialMapping[1], 2},
- /// /*plain <2x32-bit> vadd*/ {&PartialMapping[3], 1}
+ /// /*plain <2x32-bit> vadd*/ {&PartialMapping[3], 1}
/// };
/// \endcode
///
/// With the array of pointer, we would have:
/// \code
/// PartialMapping[] = {
- /// /*32-bit add lower */ { 0, 32, GPR},
+ /// /*32-bit add lower */ { 0, 32, GPR},
/// /*32-bit add upper */ {32, 32, GPR},
- /// /*<2x32-bit> vadd */ { 0, 64, VPR}
+ /// /*<2x32-bit> vadd */ { 0, 64, VPR}
/// }; // No more duplication.
///
/// BreakDowns[] = {
- /// /*AddBreakDown*/ &PartialMapping[0],
+ /// /*AddBreakDown*/ &PartialMapping[0],
/// /*2xAddBreakDown*/ &PartialMapping[0], &PartialMapping[1],
- /// /*VAddBreakDown*/ &PartialMapping[2]
+ /// /*VAddBreakDown*/ &PartialMapping[2]
/// }; // Addresses of PartialMapping duplicated (smaller).
///
/// ValueMapping[] {
- /// /*plain 32-bit add*/ {&BreakDowns[0], 1},
+ /// /*plain 32-bit add*/ {&BreakDowns[0], 1},
/// /*expanded vadd on 2xadd*/ {&BreakDowns[1], 2},
- /// /*plain <2x32-bit> vadd*/ {&BreakDowns[3], 1}
+ /// /*plain <2x32-bit> vadd*/ {&BreakDowns[3], 1}
/// };
/// \endcode
///
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Utils.h b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Utils.h
index d07bcae8e7..6059e13234 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Utils.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -25,12 +25,12 @@
#include "llvm/CodeGen/Register.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/LowLevelTypeImpl.h"
-#include <cstdint>
+#include <cstdint>
namespace llvm {
class AnalysisUsage;
-class GISelKnownBits;
+class GISelKnownBits;
class MachineFunction;
class MachineInstr;
class MachineOperand;
@@ -41,7 +41,7 @@ class MachineRegisterInfo;
class MCInstrDesc;
class RegisterBankInfo;
class TargetInstrInfo;
-class TargetLowering;
+class TargetLowering;
class TargetPassConfig;
class TargetRegisterInfo;
class TargetRegisterClass;
@@ -59,10 +59,10 @@ Register constrainRegToClass(MachineRegisterInfo &MRI,
/// Constrain the Register operand OpIdx, so that it is now constrained to the
/// TargetRegisterClass passed as an argument (RegClass).
-/// If this fails, create a new virtual register in the correct class and insert
-/// a COPY before \p InsertPt if it is a use or after if it is a definition.
-/// In both cases, the function also updates the register of RegMo. The debug
-/// location of \p InsertPt is used for the new copy.
+/// If this fails, create a new virtual register in the correct class and insert
+/// a COPY before \p InsertPt if it is a use or after if it is a definition.
+/// In both cases, the function also updates the register of RegMo. The debug
+/// location of \p InsertPt is used for the new copy.
///
/// \return The virtual register constrained to the right register class.
Register constrainOperandRegClass(const MachineFunction &MF,
@@ -72,13 +72,13 @@ Register constrainOperandRegClass(const MachineFunction &MF,
const RegisterBankInfo &RBI,
MachineInstr &InsertPt,
const TargetRegisterClass &RegClass,
- MachineOperand &RegMO);
+ MachineOperand &RegMO);
-/// Try to constrain Reg so that it is usable by argument OpIdx of the provided
-/// MCInstrDesc \p II. If this fails, create a new virtual register in the
-/// correct class and insert a COPY before \p InsertPt if it is a use or after
-/// if it is a definition. In both cases, the function also updates the register
-/// of RegMo.
+/// Try to constrain Reg so that it is usable by argument OpIdx of the provided
+/// MCInstrDesc \p II. If this fails, create a new virtual register in the
+/// correct class and insert a COPY before \p InsertPt if it is a use or after
+/// if it is a definition. In both cases, the function also updates the register
+/// of RegMo.
/// This is equivalent to constrainOperandRegClass(..., RegClass, ...)
/// with RegClass obtained from the MCInstrDesc. The debug location of \p
/// InsertPt is used for the new copy.
@@ -90,7 +90,7 @@ Register constrainOperandRegClass(const MachineFunction &MF,
const TargetInstrInfo &TII,
const RegisterBankInfo &RBI,
MachineInstr &InsertPt, const MCInstrDesc &II,
- MachineOperand &RegMO, unsigned OpIdx);
+ MachineOperand &RegMO, unsigned OpIdx);
/// Mutate the newly-selected instruction \p I to constrain its (possibly
/// generic) virtual register operands to the instruction's register class.
@@ -131,19 +131,19 @@ void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
MachineOptimizationRemarkEmitter &MORE,
MachineOptimizationRemarkMissed &R);
-/// If \p VReg is defined by a G_CONSTANT, return the corresponding value.
-Optional<APInt> getConstantVRegVal(Register VReg,
- const MachineRegisterInfo &MRI);
-
+/// If \p VReg is defined by a G_CONSTANT, return the corresponding value.
+Optional<APInt> getConstantVRegVal(Register VReg,
+ const MachineRegisterInfo &MRI);
+
/// If \p VReg is defined by a G_CONSTANT fits in int64_t
/// returns it.
-Optional<int64_t> getConstantVRegSExtVal(Register VReg,
- const MachineRegisterInfo &MRI);
-
+Optional<int64_t> getConstantVRegSExtVal(Register VReg,
+ const MachineRegisterInfo &MRI);
+
/// Simple struct used to hold a constant integer value and a virtual
/// register.
struct ValueAndVReg {
- APInt Value;
+ APInt Value;
Register VReg;
};
/// If \p VReg is defined by a statically evaluable chain of
@@ -153,13 +153,13 @@ struct ValueAndVReg {
/// When \p LookThroughInstrs == false this function behaves like
/// getConstantVRegVal.
/// When \p HandleFConstants == false the function bails on G_FCONSTANTs.
-/// When \p LookThroughAnyExt == true the function treats G_ANYEXT same as
-/// G_SEXT.
+/// When \p LookThroughAnyExt == true the function treats G_ANYEXT same as
+/// G_SEXT.
Optional<ValueAndVReg>
getConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI,
bool LookThroughInstrs = true,
- bool HandleFConstants = true,
- bool LookThroughAnyExt = false);
+ bool HandleFConstants = true,
+ bool LookThroughAnyExt = false);
const ConstantFP* getConstantFPVRegVal(Register VReg,
const MachineRegisterInfo &MRI);
@@ -169,20 +169,20 @@ const ConstantFP* getConstantFPVRegVal(Register VReg,
MachineInstr *getOpcodeDef(unsigned Opcode, Register Reg,
const MachineRegisterInfo &MRI);
-/// Simple struct used to hold a Register value and the instruction which
-/// defines it.
-struct DefinitionAndSourceRegister {
- MachineInstr *MI;
- Register Reg;
-};
-
-/// Find the def instruction for \p Reg, and underlying value Register folding
-/// away any copies.
-Optional<DefinitionAndSourceRegister>
-getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
-
-/// Find the def instruction for \p Reg, folding away any trivial copies. May
-/// return nullptr if \p Reg is not a generic virtual register.
+/// Simple struct used to hold a Register value and the instruction which
+/// defines it.
+struct DefinitionAndSourceRegister {
+ MachineInstr *MI;
+ Register Reg;
+};
+
+/// Find the def instruction for \p Reg, and underlying value Register folding
+/// away any copies.
+Optional<DefinitionAndSourceRegister>
+getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);
+
+/// Find the def instruction for \p Reg, folding away any trivial copies. May
+/// return nullptr if \p Reg is not a generic virtual register.
MachineInstr *getDefIgnoringCopies(Register Reg,
const MachineRegisterInfo &MRI);
@@ -207,12 +207,12 @@ Optional<APInt> ConstantFoldBinOp(unsigned Opcode, const Register Op1,
Optional<APInt> ConstantFoldExtOp(unsigned Opcode, const Register Op1,
uint64_t Imm, const MachineRegisterInfo &MRI);
-/// Test if the given value is known to have exactly one bit set. This differs
-/// from computeKnownBits in that it doesn't necessarily determine which bit is
-/// set.
-bool isKnownToBeAPowerOfTwo(Register Val, const MachineRegisterInfo &MRI,
- GISelKnownBits *KnownBits = nullptr);
-
+/// Test if the given value is known to have exactly one bit set. This differs
+/// from computeKnownBits in that it doesn't necessarily determine which bit is
+/// set.
+bool isKnownToBeAPowerOfTwo(Register Val, const MachineRegisterInfo &MRI,
+ GISelKnownBits *KnownBits = nullptr);
+
/// Returns true if \p Val can be assumed to never be a NaN. If \p SNaN is true,
/// this returns if \p Val can be assumed to never be a signaling NaN.
bool isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
@@ -225,66 +225,66 @@ inline bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI) {
Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO);
-/// Return a virtual register corresponding to the incoming argument register \p
-/// PhysReg. This register is expected to have class \p RC, and optional type \p
-/// RegTy. This assumes all references to the register will use the same type.
-///
-/// If there is an existing live-in argument register, it will be returned.
-/// This will also ensure there is a valid copy
-Register getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TII,
- MCRegister PhysReg,
- const TargetRegisterClass &RC,
- LLT RegTy = LLT());
-
-/// Return the least common multiple type of \p OrigTy and \p TargetTy, by changing the
-/// number of vector elements or scalar bitwidth. The intent is a
-/// G_MERGE_VALUES, G_BUILD_VECTOR, or G_CONCAT_VECTORS can be constructed from
-/// \p OrigTy elements, and unmerged into \p TargetTy
-LLVM_READNONE
-LLT getLCMType(LLT OrigTy, LLT TargetTy);
-
-/// Return a type where the total size is the greatest common divisor of \p
-/// OrigTy and \p TargetTy. This will try to either change the number of vector
-/// elements, or bitwidth of scalars. The intent is the result type can be used
-/// as the result of a G_UNMERGE_VALUES from \p OrigTy, and then some
-/// combination of G_MERGE_VALUES, G_BUILD_VECTOR and G_CONCAT_VECTORS (possibly
-/// with intermediate casts) can re-form \p TargetTy.
-///
-/// If these are vectors with different element types, this will try to produce
-/// a vector with a compatible total size, but the element type of \p OrigTy. If
-/// this can't be satisfied, this will produce a scalar smaller than the
-/// original vector elements.
-///
-/// In the worst case, this returns LLT::scalar(1)
-LLVM_READNONE
+/// Return a virtual register corresponding to the incoming argument register \p
+/// PhysReg. This register is expected to have class \p RC, and optional type \p
+/// RegTy. This assumes all references to the register will use the same type.
+///
+/// If there is an existing live-in argument register, it will be returned.
+/// This will also ensure there is a valid copy
+Register getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TII,
+ MCRegister PhysReg,
+ const TargetRegisterClass &RC,
+ LLT RegTy = LLT());
+
+/// Return the least common multiple type of \p OrigTy and \p TargetTy, by changing the
+/// number of vector elements or scalar bitwidth. The intent is a
+/// G_MERGE_VALUES, G_BUILD_VECTOR, or G_CONCAT_VECTORS can be constructed from
+/// \p OrigTy elements, and unmerged into \p TargetTy
+LLVM_READNONE
+LLT getLCMType(LLT OrigTy, LLT TargetTy);
+
+/// Return a type where the total size is the greatest common divisor of \p
+/// OrigTy and \p TargetTy. This will try to either change the number of vector
+/// elements, or bitwidth of scalars. The intent is the result type can be used
+/// as the result of a G_UNMERGE_VALUES from \p OrigTy, and then some
+/// combination of G_MERGE_VALUES, G_BUILD_VECTOR and G_CONCAT_VECTORS (possibly
+/// with intermediate casts) can re-form \p TargetTy.
+///
+/// If these are vectors with different element types, this will try to produce
+/// a vector with a compatible total size, but the element type of \p OrigTy. If
+/// this can't be satisfied, this will produce a scalar smaller than the
+/// original vector elements.
+///
+/// In the worst case, this returns LLT::scalar(1)
+LLVM_READNONE
LLT getGCDType(LLT OrigTy, LLT TargetTy);
-/// \returns The splat index of a G_SHUFFLE_VECTOR \p MI when \p MI is a splat.
-/// If \p MI is not a splat, returns None.
-Optional<int> getSplatIndex(MachineInstr &MI);
-
-/// Returns a scalar constant of a G_BUILD_VECTOR splat if it exists.
-Optional<int64_t> getBuildVectorConstantSplat(const MachineInstr &MI,
- const MachineRegisterInfo &MRI);
-
-/// Return true if the specified instruction is a G_BUILD_VECTOR or
-/// G_BUILD_VECTOR_TRUNC where all of the elements are 0 or undef.
-bool isBuildVectorAllZeros(const MachineInstr &MI,
- const MachineRegisterInfo &MRI);
-
-/// Return true if the specified instruction is a G_BUILD_VECTOR or
-/// G_BUILD_VECTOR_TRUNC where all of the elements are ~0 or undef.
-bool isBuildVectorAllOnes(const MachineInstr &MI,
- const MachineRegisterInfo &MRI);
-
-/// Returns true if given the TargetLowering's boolean contents information,
-/// the value \p Val contains a true value.
-bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
- bool IsFP);
-
-/// Returns an integer representing true, as defined by the
-/// TargetBooleanContents.
-int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP);
+/// \returns The splat index of a G_SHUFFLE_VECTOR \p MI when \p MI is a splat.
+/// If \p MI is not a splat, returns None.
+Optional<int> getSplatIndex(MachineInstr &MI);
+
+/// Returns a scalar constant of a G_BUILD_VECTOR splat if it exists.
+Optional<int64_t> getBuildVectorConstantSplat(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI);
+
+/// Return true if the specified instruction is a G_BUILD_VECTOR or
+/// G_BUILD_VECTOR_TRUNC where all of the elements are 0 or undef.
+bool isBuildVectorAllZeros(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI);
+
+/// Return true if the specified instruction is a G_BUILD_VECTOR or
+/// G_BUILD_VECTOR_TRUNC where all of the elements are ~0 or undef.
+bool isBuildVectorAllOnes(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI);
+
+/// Returns true if given the TargetLowering's boolean contents information,
+/// the value \p Val contains a true value.
+bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
+ bool IsFP);
+
+/// Returns an integer representing true, as defined by the
+/// TargetBooleanContents.
+int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP);
} // End namespace llvm.
#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/ISDOpcodes.h b/contrib/libs/llvm12/include/llvm/CodeGen/ISDOpcodes.h
index 8dae18f3e2..d944d3b4df 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/ISDOpcodes.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/ISDOpcodes.h
@@ -93,16 +93,16 @@ enum NodeType {
/// the parent's frame or return address, and so on.
FRAMEADDR,
RETURNADDR,
-
- /// ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
- /// This node takes no operand, returns a target-specific pointer to the
- /// place in the stack frame where the return address of the current
- /// function is stored.
+
+ /// ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
+ /// This node takes no operand, returns a target-specific pointer to the
+ /// place in the stack frame where the return address of the current
+ /// function is stored.
ADDROFRETURNADDR,
-
- /// SPONENTRY - Represents the llvm.sponentry intrinsic. Takes no argument
- /// and returns the stack pointer value at the entry of the current
- /// function calling this intrinsic.
+
+ /// SPONENTRY - Represents the llvm.sponentry intrinsic. Takes no argument
+ /// and returns the stack pointer value at the entry of the current
+ /// function calling this intrinsic.
SPONENTRY,
/// LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
@@ -290,16 +290,16 @@ enum NodeType {
ADDCARRY,
SUBCARRY,
- /// Carry-using overflow-aware nodes for multiple precision addition and
- /// subtraction. These nodes take three operands: The first two are normal lhs
- /// and rhs to the add or sub, and the third is a boolean indicating if there
- /// is an incoming carry. They produce two results: the normal result of the
- /// add or sub, and a boolean that indicates if an overflow occured (*not*
- /// flag, because it may be a store to memory, etc.). If the type of the
- /// boolean is not i1 then the high bits conform to getBooleanContents.
- SADDO_CARRY,
- SSUBO_CARRY,
-
+ /// Carry-using overflow-aware nodes for multiple precision addition and
+ /// subtraction. These nodes take three operands: The first two are normal lhs
+ /// and rhs to the add or sub, and the third is a boolean indicating if there
+ /// is an incoming carry. They produce two results: the normal result of the
+ /// add or sub, and a boolean that indicates if an overflow occured (*not*
+ /// flag, because it may be a store to memory, etc.). If the type of the
+ /// boolean is not i1 then the high bits conform to getBooleanContents.
+ SADDO_CARRY,
+ SSUBO_CARRY,
+
/// RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
/// These nodes take two operands: the normal LHS and RHS to the add. They
/// produce two results: the normal result of the add, and a boolean that
@@ -336,16 +336,16 @@ enum NodeType {
SSUBSAT,
USUBSAT,
- /// RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift. The first
- /// operand is the value to be shifted, and the second argument is the amount
- /// to shift by. Both must be integers of the same bit width (W). If the true
- /// value of LHS << RHS exceeds the largest value that can be represented by
- /// W bits, the resulting value is this maximum value, Otherwise, if this
- /// value is less than the smallest value that can be represented by W bits,
- /// the resulting value is this minimum value.
- SSHLSAT,
- USHLSAT,
-
+ /// RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift. The first
+ /// operand is the value to be shifted, and the second argument is the amount
+ /// to shift by. Both must be integers of the same bit width (W). If the true
+ /// value of LHS << RHS exceeds the largest value that can be represented by
+ /// W bits, the resulting value is this maximum value, Otherwise, if this
+ /// value is less than the smallest value that can be represented by W bits,
+ /// the resulting value is this minimum value.
+ SSHLSAT,
+ USHLSAT,
+
/// RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication
/// on
/// 2 integers with the same width and scale. SCALE represents the scale of
@@ -540,8 +540,8 @@ enum NodeType {
/// IDX is first scaled by the runtime scaling factor of T. Elements IDX
/// through (IDX + num_elements(T) - 1) must be valid VECTOR indices. If this
/// condition cannot be determined statically but is false at runtime, then
- /// the result vector is undefined. The IDX parameter must be a vector index
- /// constant type, which for most targets will be an integer pointer type.
+ /// the result vector is undefined. The IDX parameter must be a vector index
+ /// constant type, which for most targets will be an integer pointer type.
///
/// This operation supports extracting a fixed-width vector from a scalable
/// vector, but not the other way around.
@@ -624,7 +624,7 @@ enum NodeType {
CTLZ,
CTPOP,
BITREVERSE,
- PARITY,
+ PARITY,
/// Bit counting operators with an undefined result for zero inputs.
CTTZ_ZERO_UNDEF,
@@ -741,21 +741,21 @@ enum NodeType {
FP_TO_SINT,
FP_TO_UINT,
- /// FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a
- /// signed or unsigned integer type with the bit width given in operand 1 with
- /// the following semantics:
- ///
- /// * If the value is NaN, zero is returned.
- /// * If the value is larger/smaller than the largest/smallest integer,
- /// the largest/smallest integer is returned (saturation).
- /// * Otherwise the result of rounding the value towards zero is returned.
- ///
- /// The width given in operand 1 must be equal to, or smaller than, the scalar
- /// result type width. It may end up being smaller than the result witdh as a
- /// result of integer type legalization.
- FP_TO_SINT_SAT,
- FP_TO_UINT_SAT,
-
+ /// FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a
+ /// signed or unsigned integer type with the bit width given in operand 1 with
+ /// the following semantics:
+ ///
+ /// * If the value is NaN, zero is returned.
+ /// * If the value is larger/smaller than the largest/smallest integer,
+ /// the largest/smallest integer is returned (saturation).
+ /// * Otherwise the result of rounding the value towards zero is returned.
+ ///
+ /// The width given in operand 1 must be equal to, or smaller than, the scalar
+ /// result type width. It may end up being smaller than the result witdh as a
+ /// result of integer type legalization.
+ FP_TO_SINT_SAT,
+ FP_TO_UINT_SAT,
+
/// X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type
/// down to the precision of the destination VT. TRUNC is a flag, which is
/// always an integer that is zero or one. If TRUNC is 0, this is a
@@ -897,18 +897,18 @@ enum NodeType {
/// BRCOND - Conditional branch. The first operand is the chain, the
/// second is the condition, the third is the block to branch to if the
/// condition is true. If the type of the condition is not i1, then the
- /// high bits must conform to getBooleanContents. If the condition is undef,
- /// it nondeterministically jumps to the block.
- /// TODO: Its semantics w.r.t undef requires further discussion; we need to
- /// make it sure that it is consistent with optimizations in MIR & the
- /// meaning of IMPLICIT_DEF. See https://reviews.llvm.org/D92015
+ /// high bits must conform to getBooleanContents. If the condition is undef,
+ /// it nondeterministically jumps to the block.
+ /// TODO: Its semantics w.r.t undef requires further discussion; we need to
+ /// make it sure that it is consistent with optimizations in MIR & the
+ /// meaning of IMPLICIT_DEF. See https://reviews.llvm.org/D92015
BRCOND,
/// BR_CC - Conditional branch. The behavior is like that of SELECT_CC, in
/// that the condition is represented as condition code, and two nodes to
/// compare, rather than as a combined SetCC node. The operands in order
- /// are chain, cc, lhs, rhs, block to branch to if condition is true. If
- /// condition is undef, it nondeterministically jumps to the block.
+ /// are chain, cc, lhs, rhs, block to branch to if condition is true. If
+ /// condition is undef, it nondeterministically jumps to the block.
BR_CC,
/// INLINEASM - Represents an inline asm block. This node always has two
@@ -1039,9 +1039,9 @@ enum NodeType {
/// DEBUGTRAP - Trap intended to get the attention of a debugger.
DEBUGTRAP,
- /// UBSANTRAP - Trap with an immediate describing the kind of sanitizer failure.
- UBSANTRAP,
-
+ /// UBSANTRAP - Trap with an immediate describing the kind of sanitizer failure.
+ UBSANTRAP,
+
/// PREFETCH - This corresponds to a prefetch intrinsic. The first operand
/// is the chain. The other operands are the address to prefetch,
/// read / write specifier, locality specifier and instruction / data cache
@@ -1136,10 +1136,10 @@ enum NodeType {
/// known nonzero constant. The only operand here is the chain.
GET_DYNAMIC_AREA_OFFSET,
- /// Pseudo probe for AutoFDO, as a place holder in a basic block to improve
- /// the sample counts quality.
- PSEUDO_PROBE,
-
+ /// Pseudo probe for AutoFDO, as a place holder in a basic block to improve
+ /// the sample counts quality.
+ PSEUDO_PROBE,
+
/// VSCALE(IMM) - Returns the runtime scaling factor used to calculate the
/// number of elements within a scalable vector. IMM is a constant integer
/// multiplier that is applied to the runtime value.
@@ -1147,25 +1147,25 @@ enum NodeType {
/// Generic reduction nodes. These nodes represent horizontal vector
/// reduction operations, producing a scalar result.
- /// The SEQ variants perform reductions in sequential order. The first
+ /// The SEQ variants perform reductions in sequential order. The first
/// operand is an initial scalar accumulator value, and the second operand
/// is the vector to reduce.
- /// E.g. RES = VECREDUCE_SEQ_FADD f32 ACC, <4 x f32> SRC_VEC
- /// ... is equivalent to
- /// RES = (((ACC + SRC_VEC[0]) + SRC_VEC[1]) + SRC_VEC[2]) + SRC_VEC[3]
- VECREDUCE_SEQ_FADD,
- VECREDUCE_SEQ_FMUL,
-
- /// These reductions have relaxed evaluation order semantics, and have a
- /// single vector operand. The order of evaluation is unspecified. For
- /// pow-of-2 vectors, one valid legalizer expansion is to use a tree
- /// reduction, i.e.:
- /// For RES = VECREDUCE_FADD <8 x f16> SRC_VEC
- /// PART_RDX = FADD SRC_VEC[0:3], SRC_VEC[4:7]
- /// PART_RDX2 = FADD PART_RDX[0:1], PART_RDX[2:3]
- /// RES = FADD PART_RDX2[0], PART_RDX2[1]
- /// For non-pow-2 vectors, this can be computed by extracting each element
- /// and performing the operation as if it were scalarized.
+ /// E.g. RES = VECREDUCE_SEQ_FADD f32 ACC, <4 x f32> SRC_VEC
+ /// ... is equivalent to
+ /// RES = (((ACC + SRC_VEC[0]) + SRC_VEC[1]) + SRC_VEC[2]) + SRC_VEC[3]
+ VECREDUCE_SEQ_FADD,
+ VECREDUCE_SEQ_FMUL,
+
+ /// These reductions have relaxed evaluation order semantics, and have a
+ /// single vector operand. The order of evaluation is unspecified. For
+ /// pow-of-2 vectors, one valid legalizer expansion is to use a tree
+ /// reduction, i.e.:
+ /// For RES = VECREDUCE_FADD <8 x f16> SRC_VEC
+ /// PART_RDX = FADD SRC_VEC[0:3], SRC_VEC[4:7]
+ /// PART_RDX2 = FADD PART_RDX[0:1], PART_RDX[2:3]
+ /// RES = FADD PART_RDX2[0], PART_RDX2[1]
+ /// For non-pow-2 vectors, this can be computed by extracting each element
+ /// and performing the operation as if it were scalarized.
VECREDUCE_FADD,
VECREDUCE_FMUL,
/// FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@@ -1184,10 +1184,10 @@ enum NodeType {
VECREDUCE_UMAX,
VECREDUCE_UMIN,
-// Vector Predication
-#define BEGIN_REGISTER_VP_SDNODE(VPSDID, ...) VPSDID,
-#include "llvm/IR/VPIntrinsics.def"
-
+// Vector Predication
+#define BEGIN_REGISTER_VP_SDNODE(VPSDID, ...) VPSDID,
+#include "llvm/IR/VPIntrinsics.def"
+
/// BUILTIN_OP_END - This must be the last enum value in this list.
/// The target-specific pre-isel opcode values start here.
BUILTIN_OP_END
@@ -1204,19 +1204,19 @@ static const int FIRST_TARGET_STRICTFP_OPCODE = BUILTIN_OP_END + 400;
/// be used with SelectionDAG::getMemIntrinsicNode.
static const int FIRST_TARGET_MEMORY_OPCODE = BUILTIN_OP_END + 500;
-/// Get underlying scalar opcode for VECREDUCE opcode.
-/// For example ISD::AND for ISD::VECREDUCE_AND.
-NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode);
-
-/// Whether this is a vector-predicated Opcode.
-bool isVPOpcode(unsigned Opcode);
-
-/// The operand position of the vector mask.
-Optional<unsigned> getVPMaskIdx(unsigned Opcode);
-
-/// The operand position of the explicit vector length parameter.
-Optional<unsigned> getVPExplicitVectorLengthIdx(unsigned Opcode);
-
+/// Get underlying scalar opcode for VECREDUCE opcode.
+/// For example ISD::AND for ISD::VECREDUCE_AND.
+NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode);
+
+/// Whether this is a vector-predicated Opcode.
+bool isVPOpcode(unsigned Opcode);
+
+/// The operand position of the vector mask.
+Optional<unsigned> getVPMaskIdx(unsigned Opcode);
+
+/// The operand position of the explicit vector length parameter.
+Optional<unsigned> getVPExplicitVectorLengthIdx(unsigned Opcode);
+
//===--------------------------------------------------------------------===//
/// MemIndexedMode enum - This enum defines the load / store indexed
/// addressing modes.
@@ -1339,12 +1339,12 @@ inline bool isUnsignedIntSetCC(CondCode Code) {
return Code == SETUGT || Code == SETUGE || Code == SETULT || Code == SETULE;
}
-/// Return true if this is a setcc instruction that performs an equality
-/// comparison when used with integer operands.
-inline bool isIntEqualitySetCC(CondCode Code) {
- return Code == SETEQ || Code == SETNE;
-}
-
+/// Return true if this is a setcc instruction that performs an equality
+/// comparison when used with integer operands.
+inline bool isIntEqualitySetCC(CondCode Code) {
+ return Code == SETEQ || Code == SETNE;
+}
+
/// Return true if the specified condition returns true if the two operands to
/// the condition are equal. Note that if one of the two operands is a NaN,
/// this value is meaningless.
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/LiveInterval.h b/contrib/libs/llvm12/include/llvm/CodeGen/LiveInterval.h
index 0ee69222af..e4a03e9598 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/LiveInterval.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/LiveInterval.h
@@ -32,7 +32,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
-#include "llvm/CodeGen/Register.h"
+#include "llvm/CodeGen/Register.h"
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/MC/LaneBitmask.h"
#include "llvm/Support/Allocator.h"
@@ -605,9 +605,9 @@ namespace llvm {
/// @p End.
bool isUndefIn(ArrayRef<SlotIndex> Undefs, SlotIndex Begin,
SlotIndex End) const {
- return llvm::any_of(Undefs, [Begin, End](SlotIndex Idx) -> bool {
- return Begin <= Idx && Idx < End;
- });
+ return llvm::any_of(Undefs, [Begin, End](SlotIndex Idx) -> bool {
+ return Begin <= Idx && Idx < End;
+ });
}
/// Flush segment set into the regular segment vector.
@@ -711,16 +711,16 @@ namespace llvm {
private:
SubRange *SubRanges = nullptr; ///< Single linked list of subregister live
/// ranges.
- const Register Reg; // the register or stack slot of this interval.
- float Weight = 0.0; // weight of this interval
+ const Register Reg; // the register or stack slot of this interval.
+ float Weight = 0.0; // weight of this interval
public:
- Register reg() const { return Reg; }
- float weight() const { return Weight; }
- void incrementWeight(float Inc) { Weight += Inc; }
- void setWeight(float Value) { Weight = Value; }
+ Register reg() const { return Reg; }
+ float weight() const { return Weight; }
+ void incrementWeight(float Inc) { Weight += Inc; }
+ void setWeight(float Value) { Weight = Value; }
- LiveInterval(unsigned Reg, float Weight) : Reg(Reg), Weight(Weight) {}
+ LiveInterval(unsigned Reg, float Weight) : Reg(Reg), Weight(Weight) {}
~LiveInterval() {
clearSubRanges();
@@ -817,10 +817,10 @@ namespace llvm {
unsigned getSize() const;
/// isSpillable - Can this interval be spilled?
- bool isSpillable() const { return Weight != huge_valf; }
+ bool isSpillable() const { return Weight != huge_valf; }
/// markNotSpillable - Mark interval as not spillable
- void markNotSpillable() { Weight = huge_valf; }
+ void markNotSpillable() { Weight = huge_valf; }
/// For a given lane mask @p LaneMask, compute indexes at which the
/// lane is marked undefined by subregister <def,read-undef> definitions.
@@ -841,7 +841,7 @@ namespace llvm {
/// function will be applied to the L0010 and L0008 subranges.
///
/// \p Indexes and \p TRI are required to clean up the VNIs that
- /// don't define the related lane masks after they get shrunk. E.g.,
+ /// don't define the related lane masks after they get shrunk. E.g.,
/// when L000F gets split into L0007 and L0008 maybe only a subset
/// of the VNIs that defined L000F defines L0007.
///
@@ -877,7 +877,7 @@ namespace llvm {
bool operator<(const LiveInterval& other) const {
const SlotIndex &thisIndex = beginIndex();
const SlotIndex &otherIndex = other.beginIndex();
- return std::tie(thisIndex, Reg) < std::tie(otherIndex, other.Reg);
+ return std::tie(thisIndex, Reg) < std::tie(otherIndex, other.Reg);
}
void print(raw_ostream &OS) const;
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/LiveIntervalUnion.h b/contrib/libs/llvm12/include/llvm/CodeGen/LiveIntervalUnion.h
index 6ac8cebb14..d05ab9c533 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/LiveIntervalUnion.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/LiveIntervalUnion.h
@@ -111,9 +111,9 @@ public:
void verify(LiveVirtRegBitSet& VisitedVRegs);
#endif
- // Get any virtual register that is assign to this physical unit
- LiveInterval *getOneVReg() const;
-
+ // Get any virtual register that is assign to this physical unit
+ LiveInterval *getOneVReg() const;
+
/// Query interferences between a single live virtual register and a live
/// interval union.
class Query {
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/LiveIntervals.h b/contrib/libs/llvm12/include/llvm/CodeGen/LiveIntervals.h
index c4abe43a75..bf630ba784 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/LiveIntervals.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/LiveIntervals.h
@@ -121,8 +121,8 @@ class VirtRegMap;
LiveInterval &getInterval(Register Reg) {
if (hasInterval(Reg))
return *VirtRegIntervals[Reg.id()];
-
- return createAndComputeVirtRegInterval(Reg);
+
+ return createAndComputeVirtRegInterval(Reg);
}
const LiveInterval &getInterval(Register Reg) const {
@@ -149,14 +149,14 @@ class VirtRegMap;
}
/// Interval removal.
- void removeInterval(Register Reg) {
+ void removeInterval(Register Reg) {
delete VirtRegIntervals[Reg];
VirtRegIntervals[Reg] = nullptr;
}
/// Given a register and an instruction, adds a live segment from that
/// instruction to the end of its MBB.
- LiveInterval::Segment addSegmentToEndOfBlock(Register Reg,
+ LiveInterval::Segment addSegmentToEndOfBlock(Register Reg,
MachineInstr &startInst);
/// After removing some uses of a register, shrink its live range to just
@@ -174,7 +174,7 @@ class VirtRegMap;
/// the lane mask of the subregister range.
/// This may leave the subrange empty which needs to be cleaned up with
/// LiveInterval::removeEmptySubranges() afterwards.
- void shrinkToUses(LiveInterval::SubRange &SR, Register Reg);
+ void shrinkToUses(LiveInterval::SubRange &SR, Register Reg);
/// Extend the live range \p LR to reach all points in \p Indices. The
/// points in the \p Indices array must be jointly dominated by the union
@@ -263,8 +263,8 @@ class VirtRegMap;
return Indexes->getMBBFromIndex(index);
}
- void insertMBBInMaps(MachineBasicBlock *MBB) {
- Indexes->insertMBBInMaps(MBB);
+ void insertMBBInMaps(MachineBasicBlock *MBB) {
+ Indexes->insertMBBInMaps(MBB);
assert(unsigned(MBB->getNumber()) == RegMaskBlocks.size() &&
"Blocks must be added in order.");
RegMaskBlocks.push_back(std::make_pair(RegMaskSlots.size(), 0));
@@ -429,7 +429,7 @@ class VirtRegMap;
/// Reg. Subsequent uses should rely on on-demand recomputation. \note This
/// method can result in inconsistent liveness tracking if multiple phyical
/// registers share a regunit, and should be used cautiously.
- void removeAllRegUnitsForPhysReg(MCRegister Reg) {
+ void removeAllRegUnitsForPhysReg(MCRegister Reg) {
for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units)
removeRegUnit(*Units);
}
@@ -437,7 +437,7 @@ class VirtRegMap;
/// Remove value numbers and related live segments starting at position
/// \p Pos that are part of any liverange of physical register \p Reg or one
/// of its subregisters.
- void removePhysRegDefAt(MCRegister Reg, SlotIndex Pos);
+ void removePhysRegDefAt(MCRegister Reg, SlotIndex Pos);
/// Remove value number and related live segments of \p LI and its subranges
/// that start at position \p Pos.
@@ -469,7 +469,7 @@ class VirtRegMap;
bool computeDeadValues(LiveInterval &LI,
SmallVectorImpl<MachineInstr*> *dead);
- static LiveInterval *createInterval(Register Reg);
+ static LiveInterval *createInterval(Register Reg);
void printInstrs(raw_ostream &O) const;
void dumpInstrs() const;
@@ -480,7 +480,7 @@ class VirtRegMap;
using ShrinkToUsesWorkList = SmallVector<std::pair<SlotIndex, VNInfo*>, 16>;
void extendSegmentsToUses(LiveRange &Segments,
- ShrinkToUsesWorkList &WorkList, Register Reg,
+ ShrinkToUsesWorkList &WorkList, Register Reg,
LaneBitmask LaneMask);
/// Helper function for repairIntervalsInRange(), walks backwards and
@@ -490,7 +490,7 @@ class VirtRegMap;
void repairOldRegInRange(MachineBasicBlock::iterator Begin,
MachineBasicBlock::iterator End,
const SlotIndex endIdx, LiveRange &LR,
- Register Reg,
+ Register Reg,
LaneBitmask LaneMask = LaneBitmask::getAll());
class HMEditor;
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/LiveRangeEdit.h b/contrib/libs/llvm12/include/llvm/CodeGen/LiveRangeEdit.h
index cf6b28aebe..f16f8dea4a 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/LiveRangeEdit.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/LiveRangeEdit.h
@@ -63,14 +63,14 @@ public:
/// Called when a virtual register is no longer used. Return false to defer
/// its deletion from LiveIntervals.
- virtual bool LRE_CanEraseVirtReg(Register) { return true; }
+ virtual bool LRE_CanEraseVirtReg(Register) { return true; }
/// Called before shrinking the live range of a virtual register.
- virtual void LRE_WillShrinkVirtReg(Register) {}
+ virtual void LRE_WillShrinkVirtReg(Register) {}
/// Called after cloning a virtual register.
/// This is used for new registers representing connected components of Old.
- virtual void LRE_DidCloneVirtReg(Register New, Register Old) {}
+ virtual void LRE_DidCloneVirtReg(Register New, Register Old) {}
};
private:
@@ -159,7 +159,7 @@ public:
return *Parent;
}
- Register getReg() const { return getParent().reg(); }
+ Register getReg() const { return getParent().reg(); }
/// Iterator for accessing the new registers added by this edit.
using iterator = SmallVectorImpl<Register>::const_iterator;
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/LiveRegMatrix.h b/contrib/libs/llvm12/include/llvm/CodeGen/LiveRegMatrix.h
index 4d6d31e2b6..4bea80c868 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/LiveRegMatrix.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/LiveRegMatrix.h
@@ -111,19 +111,19 @@ public:
/// If this function returns IK_Free, it is legal to assign(VirtReg, PhysReg).
/// When there is more than one kind of interference, the InterferenceKind
/// with the highest enum value is returned.
- InterferenceKind checkInterference(LiveInterval &VirtReg, MCRegister PhysReg);
+ InterferenceKind checkInterference(LiveInterval &VirtReg, MCRegister PhysReg);
/// Check for interference in the segment [Start, End) that may prevent
/// assignment to PhysReg. If this function returns true, there is
/// interference in the segment [Start, End) of some other interval already
/// assigned to PhysReg. If this function returns false, PhysReg is free at
/// the segment [Start, End).
- bool checkInterference(SlotIndex Start, SlotIndex End, MCRegister PhysReg);
+ bool checkInterference(SlotIndex Start, SlotIndex End, MCRegister PhysReg);
/// Assign VirtReg to PhysReg.
/// This will mark VirtReg's live range as occupied in the LiveRegMatrix and
/// update VirtRegMap. The live range is expected to be available in PhysReg.
- void assign(LiveInterval &VirtReg, MCRegister PhysReg);
+ void assign(LiveInterval &VirtReg, MCRegister PhysReg);
/// Unassign VirtReg from its PhysReg.
/// Assuming that VirtReg was previously assigned to a PhysReg, this undoes
@@ -131,7 +131,7 @@ public:
void unassign(LiveInterval &VirtReg);
/// Returns true if the given \p PhysReg has any live intervals assigned.
- bool isPhysRegUsed(MCRegister PhysReg) const;
+ bool isPhysRegUsed(MCRegister PhysReg) const;
//===--------------------------------------------------------------------===//
// Low-level interface.
@@ -143,25 +143,25 @@ public:
/// Check for regmask interference only.
/// Return true if VirtReg crosses a regmask operand that clobbers PhysReg.
/// If PhysReg is null, check if VirtReg crosses any regmask operands.
- bool checkRegMaskInterference(LiveInterval &VirtReg,
- MCRegister PhysReg = MCRegister::NoRegister);
+ bool checkRegMaskInterference(LiveInterval &VirtReg,
+ MCRegister PhysReg = MCRegister::NoRegister);
/// Check for regunit interference only.
/// Return true if VirtReg overlaps a fixed assignment of one of PhysRegs's
/// register units.
- bool checkRegUnitInterference(LiveInterval &VirtReg, MCRegister PhysReg);
+ bool checkRegUnitInterference(LiveInterval &VirtReg, MCRegister PhysReg);
/// Query a line of the assigned virtual register matrix directly.
/// Use MCRegUnitIterator to enumerate all regunits in the desired PhysReg.
/// This returns a reference to an internal Query data structure that is only
/// valid until the next query() call.
- LiveIntervalUnion::Query &query(const LiveRange &LR, MCRegister RegUnit);
+ LiveIntervalUnion::Query &query(const LiveRange &LR, MCRegister RegUnit);
/// Directly access the live interval unions per regunit.
/// This returns an array indexed by the regunit number.
LiveIntervalUnion *getLiveUnions() { return &Matrix[0]; }
-
- Register getOneVReg(unsigned PhysReg) const;
+
+ Register getOneVReg(unsigned PhysReg) const;
};
} // end namespace llvm
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/LiveRegUnits.h b/contrib/libs/llvm12/include/llvm/CodeGen/LiveRegUnits.h
index 7382263262..e4fd7ebe92 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/LiveRegUnits.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/LiveRegUnits.h
@@ -22,7 +22,7 @@
#define LLVM_CODEGEN_LIVEREGUNITS_H
#include "llvm/ADT/BitVector.h"
-#include "llvm/CodeGen/MachineInstrBundle.h"
+#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/MC/LaneBitmask.h"
#include "llvm/MC/MCRegisterInfo.h"
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/LiveVariables.h b/contrib/libs/llvm12/include/llvm/CodeGen/LiveVariables.h
index 8ab04c8214..3924e2f5a4 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/LiveVariables.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/LiveVariables.h
@@ -112,7 +112,7 @@ public:
/// isLiveIn - Is Reg live in to MBB? This means that Reg is live through
/// MBB, or it is killed in MBB. If Reg is only used by PHI instructions in
/// MBB, it is not considered live in.
- bool isLiveIn(const MachineBasicBlock &MBB, Register Reg,
+ bool isLiveIn(const MachineBasicBlock &MBB, Register Reg,
MachineRegisterInfo &MRI);
void dump() const;
@@ -155,25 +155,25 @@ private: // Intermediate data structures
/// HandlePhysRegKill - Add kills of Reg and its sub-registers to the
/// uses. Pay special attention to the sub-register uses which may come below
/// the last use of the whole register.
- bool HandlePhysRegKill(Register Reg, MachineInstr *MI);
+ bool HandlePhysRegKill(Register Reg, MachineInstr *MI);
/// HandleRegMask - Call HandlePhysRegKill for all registers clobbered by Mask.
void HandleRegMask(const MachineOperand&);
- void HandlePhysRegUse(Register Reg, MachineInstr &MI);
- void HandlePhysRegDef(Register Reg, MachineInstr *MI,
+ void HandlePhysRegUse(Register Reg, MachineInstr &MI);
+ void HandlePhysRegDef(Register Reg, MachineInstr *MI,
SmallVectorImpl<unsigned> &Defs);
void UpdatePhysRegDefs(MachineInstr &MI, SmallVectorImpl<unsigned> &Defs);
/// FindLastRefOrPartRef - Return the last reference or partial reference of
/// the specified register.
- MachineInstr *FindLastRefOrPartRef(Register Reg);
+ MachineInstr *FindLastRefOrPartRef(Register Reg);
/// FindLastPartialDef - Return the last partial def of the specified
/// register. Also returns the sub-registers that're defined by the
/// instruction.
- MachineInstr *FindLastPartialDef(Register Reg,
- SmallSet<unsigned, 4> &PartDefRegs);
+ MachineInstr *FindLastPartialDef(Register Reg,
+ SmallSet<unsigned, 4> &PartDefRegs);
/// analyzePHINodes - Gather information about the PHI nodes in here. In
/// particular, we want to map the variable information of a virtual
@@ -190,21 +190,21 @@ public:
/// RegisterDefIsDead - Return true if the specified instruction defines the
/// specified register, but that definition is dead.
- bool RegisterDefIsDead(MachineInstr &MI, Register Reg) const;
+ bool RegisterDefIsDead(MachineInstr &MI, Register Reg) const;
//===--------------------------------------------------------------------===//
// API to update live variable information
/// replaceKillInstruction - Update register kill info by replacing a kill
/// instruction with a new one.
- void replaceKillInstruction(Register Reg, MachineInstr &OldMI,
+ void replaceKillInstruction(Register Reg, MachineInstr &OldMI,
MachineInstr &NewMI);
/// addVirtualRegisterKilled - Add information about the fact that the
/// specified register is killed after being used by the specified
/// instruction. If AddIfNotFound is true, add a implicit operand if it's
/// not found.
- void addVirtualRegisterKilled(Register IncomingReg, MachineInstr &MI,
+ void addVirtualRegisterKilled(Register IncomingReg, MachineInstr &MI,
bool AddIfNotFound = false) {
if (MI.addRegisterKilled(IncomingReg, TRI, AddIfNotFound))
getVarInfo(IncomingReg).Kills.push_back(&MI);
@@ -214,14 +214,14 @@ public:
/// register from the live variable information. Returns true if the
/// variable was marked as killed by the specified instruction,
/// false otherwise.
- bool removeVirtualRegisterKilled(Register Reg, MachineInstr &MI) {
- if (!getVarInfo(Reg).removeKill(MI))
+ bool removeVirtualRegisterKilled(Register Reg, MachineInstr &MI) {
+ if (!getVarInfo(Reg).removeKill(MI))
return false;
bool Removed = false;
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
- if (MO.isReg() && MO.isKill() && MO.getReg() == Reg) {
+ if (MO.isReg() && MO.isKill() && MO.getReg() == Reg) {
MO.setIsKill(false);
Removed = true;
break;
@@ -240,7 +240,7 @@ public:
/// addVirtualRegisterDead - Add information about the fact that the specified
/// register is dead after being used by the specified instruction. If
/// AddIfNotFound is true, add a implicit operand if it's not found.
- void addVirtualRegisterDead(Register IncomingReg, MachineInstr &MI,
+ void addVirtualRegisterDead(Register IncomingReg, MachineInstr &MI,
bool AddIfNotFound = false) {
if (MI.addRegisterDead(IncomingReg, TRI, AddIfNotFound))
getVarInfo(IncomingReg).Kills.push_back(&MI);
@@ -250,14 +250,14 @@ public:
/// register from the live variable information. Returns true if the
/// variable was marked dead at the specified instruction, false
/// otherwise.
- bool removeVirtualRegisterDead(Register Reg, MachineInstr &MI) {
- if (!getVarInfo(Reg).removeKill(MI))
+ bool removeVirtualRegisterDead(Register Reg, MachineInstr &MI) {
+ if (!getVarInfo(Reg).removeKill(MI))
return false;
bool Removed = false;
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
- if (MO.isReg() && MO.isDef() && MO.getReg() == Reg) {
+ if (MO.isReg() && MO.isDef() && MO.getReg() == Reg) {
MO.setIsDead(false);
Removed = true;
break;
@@ -276,25 +276,25 @@ public:
/// getVarInfo - Return the VarInfo structure for the specified VIRTUAL
/// register.
- VarInfo &getVarInfo(Register Reg);
+ VarInfo &getVarInfo(Register Reg);
void MarkVirtRegAliveInBlock(VarInfo& VRInfo, MachineBasicBlock* DefBlock,
MachineBasicBlock *BB);
- void MarkVirtRegAliveInBlock(VarInfo &VRInfo, MachineBasicBlock *DefBlock,
+ void MarkVirtRegAliveInBlock(VarInfo &VRInfo, MachineBasicBlock *DefBlock,
MachineBasicBlock *BB,
- SmallVectorImpl<MachineBasicBlock *> &WorkList);
+ SmallVectorImpl<MachineBasicBlock *> &WorkList);
- void HandleVirtRegDef(Register reg, MachineInstr &MI);
- void HandleVirtRegUse(Register reg, MachineBasicBlock *MBB, MachineInstr &MI);
-
- bool isLiveIn(Register Reg, const MachineBasicBlock &MBB) {
+ void HandleVirtRegDef(Register reg, MachineInstr &MI);
+ void HandleVirtRegUse(Register reg, MachineBasicBlock *MBB, MachineInstr &MI);
+
+ bool isLiveIn(Register Reg, const MachineBasicBlock &MBB) {
return getVarInfo(Reg).isLiveIn(MBB, Reg, *MRI);
}
/// isLiveOut - Determine if Reg is live out from MBB, when not considering
/// PHI nodes. This means that Reg is either killed by a successor block or
/// passed through one.
- bool isLiveOut(Register Reg, const MachineBasicBlock &MBB);
+ bool isLiveOut(Register Reg, const MachineBasicBlock &MBB);
/// addNewBlock - Add a new basic block BB between DomBB and SuccBB. All
/// variables that are live out of DomBB and live into SuccBB will be marked
@@ -310,10 +310,10 @@ public:
std::vector<SparseBitVector<>> &LiveInSets);
/// isPHIJoin - Return true if Reg is a phi join register.
- bool isPHIJoin(Register Reg) { return PHIJoins.test(Reg.id()); }
+ bool isPHIJoin(Register Reg) { return PHIJoins.test(Reg.id()); }
/// setPHIJoin - Mark Reg as a phi join register.
- void setPHIJoin(Register Reg) { PHIJoins.set(Reg.id()); }
+ void setPHIJoin(Register Reg) { PHIJoins.set(Reg.id()); }
};
} // End llvm namespace
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/LowLevelType.h b/contrib/libs/llvm12/include/llvm/CodeGen/LowLevelType.h
index 4c0fa175cc..d6a9cde70d 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/LowLevelType.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/LowLevelType.h
@@ -30,7 +30,7 @@ namespace llvm {
class DataLayout;
class Type;
-struct fltSemantics;
+struct fltSemantics;
/// Construct a low-level type based on an LLVM type.
LLT getLLTForType(Type &Ty, const DataLayout &DL);
@@ -43,9 +43,9 @@ MVT getMVTForLLT(LLT Ty);
/// scalarable vector types, and will assert if used.
LLT getLLTForMVT(MVT Ty);
-/// Get the appropriate floating point arithmetic semantic based on the bit size
-/// of the given scalar LLT.
-const llvm::fltSemantics &getFltSemanticForLLT(LLT Ty);
+/// Get the appropriate floating point arithmetic semantic based on the bit size
+/// of the given scalar LLT.
+const llvm::fltSemantics &getFltSemanticForLLT(LLT Ty);
}
#endif // LLVM_CODEGEN_LOWLEVELTYPE_H
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MBFIWrapper.h b/contrib/libs/llvm12/include/llvm/CodeGen/MBFIWrapper.h
index aff14a6a8b..5024dd83cd 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MBFIWrapper.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MBFIWrapper.h
@@ -35,8 +35,8 @@ class MBFIWrapper {
BlockFrequency getBlockFreq(const MachineBasicBlock *MBB) const;
void setBlockFreq(const MachineBasicBlock *MBB, BlockFrequency F);
- Optional<uint64_t> getBlockProfileCount(const MachineBasicBlock *MBB) const;
-
+ Optional<uint64_t> getBlockProfileCount(const MachineBasicBlock *MBB) const;
+
raw_ostream &printBlockFreq(raw_ostream &OS,
const MachineBasicBlock *MBB) const;
raw_ostream &printBlockFreq(raw_ostream &OS,
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MIRFormatter.h b/contrib/libs/llvm12/include/llvm/CodeGen/MIRFormatter.h
index b6d11b3ffa..0a8329c9a9 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MIRFormatter.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MIRFormatter.h
@@ -21,15 +21,15 @@
#ifndef LLVM_CODEGEN_MIRFORMATTER_H
#define LLVM_CODEGEN_MIRFORMATTER_H
-#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/Support/raw_ostream.h"
-#include <cstdint>
+#include "llvm/Support/raw_ostream.h"
+#include <cstdint>
namespace llvm {
-class MachineFunction;
-class MachineInstr;
+class MachineFunction;
+class MachineInstr;
struct PerFunctionMIParsingState;
struct SlotMapping;
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MIRYamlMapping.h b/contrib/libs/llvm12/include/llvm/CodeGen/MIRYamlMapping.h
index 3cfd2837d6..d774dec719 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MIRYamlMapping.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MIRYamlMapping.h
@@ -166,22 +166,22 @@ template <> struct ScalarTraits<MaybeAlign> {
static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};
-template <> struct ScalarTraits<Align> {
- static void output(const Align &Alignment, void *, llvm::raw_ostream &OS) {
- OS << Alignment.value();
- }
- static StringRef input(StringRef Scalar, void *, Align &Alignment) {
- unsigned long long N;
- if (getAsUnsignedInteger(Scalar, 10, N))
- return "invalid number";
- if (!isPowerOf2_64(N))
- return "must be a power of two";
- Alignment = Align(N);
- return StringRef();
- }
- static QuotingType mustQuote(StringRef) { return QuotingType::None; }
-};
-
+template <> struct ScalarTraits<Align> {
+ static void output(const Align &Alignment, void *, llvm::raw_ostream &OS) {
+ OS << Alignment.value();
+ }
+ static StringRef input(StringRef Scalar, void *, Align &Alignment) {
+ unsigned long long N;
+ if (getAsUnsignedInteger(Scalar, 10, N))
+ return "invalid number";
+ if (!isPowerOf2_64(N))
+ return "must be a power of two";
+ Alignment = Align(N);
+ return StringRef();
+ }
+ static QuotingType mustQuote(StringRef) { return QuotingType::None; }
+};
+
} // end namespace yaml
} // end namespace llvm
@@ -354,7 +354,7 @@ struct ScalarEnumerationTraits<TargetStackID::Value> {
static void enumeration(yaml::IO &IO, TargetStackID::Value &ID) {
IO.enumCase(ID, "default", TargetStackID::Default);
IO.enumCase(ID, "sgpr-spill", TargetStackID::SGPRSpill);
- IO.enumCase(ID, "scalable-vector", TargetStackID::ScalableVector);
+ IO.enumCase(ID, "scalable-vector", TargetStackID::ScalableVector);
IO.enumCase(ID, "noalloc", TargetStackID::NoAlloc);
}
};
@@ -448,36 +448,36 @@ template <> struct MappingTraits<CallSiteInfo> {
static const bool flow = true;
};
-/// Serializable representation of debug value substitutions.
-struct DebugValueSubstitution {
- unsigned SrcInst;
- unsigned SrcOp;
- unsigned DstInst;
- unsigned DstOp;
-
- bool operator==(const DebugValueSubstitution &Other) const {
- return std::tie(SrcInst, SrcOp, DstInst, DstOp) ==
- std::tie(Other.SrcInst, Other.SrcOp, Other.DstInst, Other.DstOp);
- }
-};
-
-template <> struct MappingTraits<DebugValueSubstitution> {
- static void mapping(IO &YamlIO, DebugValueSubstitution &Sub) {
- YamlIO.mapRequired("srcinst", Sub.SrcInst);
- YamlIO.mapRequired("srcop", Sub.SrcOp);
- YamlIO.mapRequired("dstinst", Sub.DstInst);
- YamlIO.mapRequired("dstop", Sub.DstOp);
- }
-
- static const bool flow = true;
-};
-} // namespace yaml
-} // namespace llvm
-
-LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::DebugValueSubstitution)
-
-namespace llvm {
-namespace yaml {
+/// Serializable representation of debug value substitutions.
+struct DebugValueSubstitution {
+ unsigned SrcInst;
+ unsigned SrcOp;
+ unsigned DstInst;
+ unsigned DstOp;
+
+ bool operator==(const DebugValueSubstitution &Other) const {
+ return std::tie(SrcInst, SrcOp, DstInst, DstOp) ==
+ std::tie(Other.SrcInst, Other.SrcOp, Other.DstInst, Other.DstOp);
+ }
+};
+
+template <> struct MappingTraits<DebugValueSubstitution> {
+ static void mapping(IO &YamlIO, DebugValueSubstitution &Sub) {
+ YamlIO.mapRequired("srcinst", Sub.SrcInst);
+ YamlIO.mapRequired("srcop", Sub.SrcOp);
+ YamlIO.mapRequired("dstinst", Sub.DstInst);
+ YamlIO.mapRequired("dstop", Sub.DstOp);
+ }
+
+ static const bool flow = true;
+};
+} // namespace yaml
+} // namespace llvm
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::DebugValueSubstitution)
+
+namespace llvm {
+namespace yaml {
struct MachineConstantPoolValue {
UnsignedValue ID;
StringValue Value;
@@ -662,7 +662,7 @@ struct MachineFunction {
std::vector<MachineConstantPoolValue> Constants; /// Constant pool.
std::unique_ptr<MachineFunctionInfo> MachineFuncInfo;
std::vector<CallSiteInfo> CallSitesInfo;
- std::vector<DebugValueSubstitution> DebugValueSubstitutions;
+ std::vector<DebugValueSubstitution> DebugValueSubstitutions;
MachineJumpTable JumpTableInfo;
BlockStringValue Body;
};
@@ -691,8 +691,8 @@ template <> struct MappingTraits<MachineFunction> {
std::vector<MachineStackObject>());
YamlIO.mapOptional("callSites", MF.CallSitesInfo,
std::vector<CallSiteInfo>());
- YamlIO.mapOptional("debugValueSubstitutions", MF.DebugValueSubstitutions,
- std::vector<DebugValueSubstitution>());
+ YamlIO.mapOptional("debugValueSubstitutions", MF.DebugValueSubstitutions,
+ std::vector<DebugValueSubstitution>());
YamlIO.mapOptional("constants", MF.Constants,
std::vector<MachineConstantPoolValue>());
YamlIO.mapOptional("machineFunctionInfo", MF.MachineFuncInfo);
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MachineBasicBlock.h b/contrib/libs/llvm12/include/llvm/CodeGen/MachineBasicBlock.h
index fb36c30de8..c67ba05321 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MachineBasicBlock.h
@@ -47,7 +47,7 @@ class Printable;
class SlotIndexes;
class StringRef;
class raw_ostream;
-class LiveIntervals;
+class LiveIntervals;
class TargetRegisterClass;
class TargetRegisterInfo;
@@ -182,9 +182,9 @@ private:
/// is only computed once and is cached.
mutable MCSymbol *CachedMCSymbol = nullptr;
- /// Marks the end of the basic block. Used during basic block sections to
- /// calculate the size of the basic block, or the BB section ending with it.
- mutable MCSymbol *CachedEndMCSymbol = nullptr;
+ /// Marks the end of the basic block. Used during basic block sections to
+ /// calculate the size of the basic block, or the BB section ending with it.
+ mutable MCSymbol *CachedEndMCSymbol = nullptr;
// Intrusive list support
MachineBasicBlock() = default;
@@ -441,9 +441,9 @@ public:
bool hasEHPadSuccessor() const;
- /// Returns true if this is the entry block of the function.
- bool isEntryBlock() const;
-
+ /// Returns true if this is the entry block of the function.
+ bool isEntryBlock() const;
+
/// Returns true if this is the entry block of an EH scope, i.e., the block
/// that used to have a catchpad or cleanuppad instruction in the LLVM IR.
bool isEHScopeEntry() const { return IsEHScopeEntry; }
@@ -486,9 +486,9 @@ public:
/// Sets the section ID for this basic block.
void setSectionID(MBBSectionID V) { SectionID = V; }
- /// Returns the MCSymbol marking the end of this basic block.
- MCSymbol *getEndSymbol() const;
-
+ /// Returns the MCSymbol marking the end of this basic block.
+ MCSymbol *getEndSymbol() const;
+
/// Returns true if this block may have an INLINEASM_BR (overestimate, by
/// checking if any of the successors are indirect targets of any inlineasm_br
/// in the function).
@@ -686,17 +686,17 @@ public:
return !empty() && back().isEHScopeReturn();
}
- /// Split a basic block into 2 pieces at \p SplitPoint. A new block will be
- /// inserted after this block, and all instructions after \p SplitInst moved
- /// to it (\p SplitInst will be in the original block). If \p LIS is provided,
- /// LiveIntervals will be appropriately updated. \return the newly inserted
- /// block.
- ///
- /// If \p UpdateLiveIns is true, this will ensure the live ins list is
- /// accurate, including for physreg uses/defs in the original block.
- MachineBasicBlock *splitAt(MachineInstr &SplitInst, bool UpdateLiveIns = true,
- LiveIntervals *LIS = nullptr);
-
+ /// Split a basic block into 2 pieces at \p SplitPoint. A new block will be
+ /// inserted after this block, and all instructions after \p SplitInst moved
+ /// to it (\p SplitInst will be in the original block). If \p LIS is provided,
+ /// LiveIntervals will be appropriately updated. \return the newly inserted
+ /// block.
+ ///
+ /// If \p UpdateLiveIns is true, this will ensure the live ins list is
+ /// accurate, including for physreg uses/defs in the original block.
+ MachineBasicBlock *splitAt(MachineInstr &SplitInst, bool UpdateLiveIns = true,
+ LiveIntervals *LIS = nullptr);
+
/// Split the critical edge from this block to the given successor block, and
/// return the newly created block, or null if splitting is not possible.
///
@@ -898,14 +898,14 @@ public:
void print(raw_ostream &OS, ModuleSlotTracker &MST,
const SlotIndexes * = nullptr, bool IsStandalone = true) const;
- enum PrintNameFlag {
- PrintNameIr = (1 << 0), ///< Add IR name where available
- PrintNameAttributes = (1 << 1), ///< Print attributes
- };
-
- void printName(raw_ostream &os, unsigned printNameFlags = PrintNameIr,
- ModuleSlotTracker *moduleSlotTracker = nullptr) const;
-
+ enum PrintNameFlag {
+ PrintNameIr = (1 << 0), ///< Add IR name where available
+ PrintNameAttributes = (1 << 1), ///< Print attributes
+ };
+
+ void printName(raw_ostream &os, unsigned printNameFlags = PrintNameIr,
+ ModuleSlotTracker *moduleSlotTracker = nullptr) const;
+
// Printing method used by LoopInfo.
void printAsOperand(raw_ostream &OS, bool PrintType = true) const;
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MachineBlockFrequencyInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
index d6a36181f6..251f53ae45 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
@@ -65,33 +65,33 @@ public:
/// information. Please note that initial frequency is equal to 1024. It means
/// that we should not rely on the value itself, but only on the comparison to
/// the other block frequencies. We do this to avoid using of floating points.
- /// For example, to get the frequency of a block relative to the entry block,
- /// divide the integral value returned by this function (the
- /// BlockFrequency::getFrequency() value) by getEntryFreq().
+ /// For example, to get the frequency of a block relative to the entry block,
+ /// divide the integral value returned by this function (the
+ /// BlockFrequency::getFrequency() value) by getEntryFreq().
BlockFrequency getBlockFreq(const MachineBasicBlock *MBB) const;
- /// Compute the frequency of the block, relative to the entry block.
- /// This API assumes getEntryFreq() is non-zero.
- float getBlockFreqRelativeToEntryBlock(const MachineBasicBlock *MBB) const {
- return getBlockFreq(MBB).getFrequency() * (1.0f / getEntryFreq());
- }
-
+ /// Compute the frequency of the block, relative to the entry block.
+ /// This API assumes getEntryFreq() is non-zero.
+ float getBlockFreqRelativeToEntryBlock(const MachineBasicBlock *MBB) const {
+ return getBlockFreq(MBB).getFrequency() * (1.0f / getEntryFreq());
+ }
+
Optional<uint64_t> getBlockProfileCount(const MachineBasicBlock *MBB) const;
Optional<uint64_t> getProfileCountFromFreq(uint64_t Freq) const;
- bool isIrrLoopHeader(const MachineBasicBlock *MBB) const;
+ bool isIrrLoopHeader(const MachineBasicBlock *MBB) const;
- /// incrementally calculate block frequencies when we split edges, to avoid
- /// full CFG traversal.
- void onEdgeSplit(const MachineBasicBlock &NewPredecessor,
- const MachineBasicBlock &NewSuccessor,
- const MachineBranchProbabilityInfo &MBPI);
+ /// incrementally calculate block frequencies when we split edges, to avoid
+ /// full CFG traversal.
+ void onEdgeSplit(const MachineBasicBlock &NewPredecessor,
+ const MachineBasicBlock &NewSuccessor,
+ const MachineBranchProbabilityInfo &MBPI);
const MachineFunction *getFunction() const;
const MachineBranchProbabilityInfo *getMBPI() const;
-
- /// Pop up a ghostview window with the current block frequency propagation
- /// rendered using dot.
+
+ /// Pop up a ghostview window with the current block frequency propagation
+ /// rendered using dot.
void view(const Twine &Name, bool isSimple = true) const;
// Print the block frequency Freq to OS using the current functions entry
@@ -103,8 +103,8 @@ public:
raw_ostream &printBlockFreq(raw_ostream &OS,
const MachineBasicBlock *MBB) const;
- /// Divide a block's BlockFrequency::getFrequency() value by this value to
- /// obtain the entry block - relative frequency of said block.
+ /// Divide a block's BlockFrequency::getFrequency() value by this value to
+ /// obtain the entry block - relative frequency of said block.
uint64_t getEntryFreq() const;
};
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MachineCombinerPattern.h b/contrib/libs/llvm12/include/llvm/CodeGen/MachineCombinerPattern.h
index add0aaad21..7557deeb15 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MachineCombinerPattern.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MachineCombinerPattern.h
@@ -36,11 +36,11 @@ enum class MachineCombinerPattern {
REASSOC_XY_AMM_BMM,
REASSOC_XMM_AMM_BMM,
- // These are patterns matched by the PowerPC to reassociate FMA and FSUB to
- // reduce register pressure.
- REASSOC_XY_BCA,
- REASSOC_XY_BAC,
-
+ // These are patterns matched by the PowerPC to reassociate FMA and FSUB to
+ // reduce register pressure.
+ REASSOC_XY_BCA,
+ REASSOC_XY_BAC,
+
// These are multiply-add patterns matched by the AArch64 machine combiner.
MULADDW_OP1,
MULADDW_OP2,
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MachineConstantPool.h b/contrib/libs/llvm12/include/llvm/CodeGen/MachineConstantPool.h
index 6cd70f8609..fe4cb90b6b 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MachineConstantPool.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MachineConstantPool.h
@@ -50,8 +50,8 @@ public:
Type *getType() const { return Ty; }
- virtual unsigned getSizeInBytes(const DataLayout &DL) const;
-
+ virtual unsigned getSizeInBytes(const DataLayout &DL) const;
+
virtual int getExistingMachineCPValue(MachineConstantPool *CP,
Align Alignment) = 0;
@@ -101,7 +101,7 @@ public:
Align getAlign() const { return Alignment; }
- unsigned getSizeInBytes(const DataLayout &DL) const;
+ unsigned getSizeInBytes(const DataLayout &DL) const;
/// This method classifies the entry according to whether or not it may
/// generate a relocation entry. This must be conservative, so if it might
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MachineFrameInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/MachineFrameInfo.h
index 6a9f911f96..c31e8ccfd2 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MachineFrameInfo.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MachineFrameInfo.h
@@ -21,7 +21,7 @@
#define LLVM_CODEGEN_MACHINEFRAMEINFO_H
#include "llvm/ADT/SmallVector.h"
-#include "llvm/CodeGen/Register.h"
+#include "llvm/CodeGen/Register.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/DataTypes.h"
#include <cassert>
@@ -39,7 +39,7 @@ class AllocaInst;
/// Callee saved reg can also be saved to a different register rather than
/// on the stack by setting DstReg instead of FrameIdx.
class CalleeSavedInfo {
- Register Reg;
+ Register Reg;
union {
int FrameIdx;
unsigned DstReg;
@@ -66,14 +66,14 @@ public:
: Reg(R), FrameIdx(FI), Restored(true), SpilledToReg(false) {}
// Accessors.
- Register getReg() const { return Reg; }
+ Register getReg() const { return Reg; }
int getFrameIdx() const { return FrameIdx; }
unsigned getDstReg() const { return DstReg; }
void setFrameIdx(int FI) {
FrameIdx = FI;
SpilledToReg = false;
}
- void setDstReg(Register SpillReg) {
+ void setDstReg(Register SpillReg) {
DstReg = SpillReg;
SpilledToReg = true;
}
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MachineFunction.h b/contrib/libs/llvm12/include/llvm/CodeGen/MachineFunction.h
index 8b89caedf6..ff1e7dc302 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MachineFunction.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MachineFunction.h
@@ -438,39 +438,39 @@ public:
using VariableDbgInfoMapTy = SmallVector<VariableDbgInfo, 4>;
VariableDbgInfoMapTy VariableDbgInfos;
- /// A count of how many instructions in the function have had numbers
- /// assigned to them. Used for debug value tracking, to determine the
- /// next instruction number.
- unsigned DebugInstrNumberingCount = 0;
-
- /// Set value of DebugInstrNumberingCount field. Avoid using this unless
- /// you're deserializing this data.
- void setDebugInstrNumberingCount(unsigned Num);
-
- /// Pair of instruction number and operand number.
- using DebugInstrOperandPair = std::pair<unsigned, unsigned>;
-
- /// Substitution map: from one <inst,operand> pair to another. Used to
- /// record changes in where a value is defined, so that debug variable
- /// locations can find it later.
- std::map<DebugInstrOperandPair, DebugInstrOperandPair>
- DebugValueSubstitutions;
-
- /// Create a substitution between one <instr,operand> value to a different,
- /// new value.
- void makeDebugValueSubstitution(DebugInstrOperandPair, DebugInstrOperandPair);
-
- /// Create substitutions for any tracked values in \p Old, to point at
- /// \p New. Needed when we re-create an instruction during optimization,
- /// which has the same signature (i.e., def operands in the same place) but
- /// a modified instruction type, flags, or otherwise. An example: X86 moves
- /// are sometimes transformed into equivalent LEAs.
- /// If the two instructions are not the same opcode, limit which operands to
- /// examine for substitutions to the first N operands by setting
- /// \p MaxOperand.
- void substituteDebugValuesForInst(const MachineInstr &Old, MachineInstr &New,
- unsigned MaxOperand = UINT_MAX);
-
+ /// A count of how many instructions in the function have had numbers
+ /// assigned to them. Used for debug value tracking, to determine the
+ /// next instruction number.
+ unsigned DebugInstrNumberingCount = 0;
+
+ /// Set value of DebugInstrNumberingCount field. Avoid using this unless
+ /// you're deserializing this data.
+ void setDebugInstrNumberingCount(unsigned Num);
+
+ /// Pair of instruction number and operand number.
+ using DebugInstrOperandPair = std::pair<unsigned, unsigned>;
+
+ /// Substitution map: from one <inst,operand> pair to another. Used to
+ /// record changes in where a value is defined, so that debug variable
+ /// locations can find it later.
+ std::map<DebugInstrOperandPair, DebugInstrOperandPair>
+ DebugValueSubstitutions;
+
+ /// Create a substitution between one <instr,operand> value to a different,
+ /// new value.
+ void makeDebugValueSubstitution(DebugInstrOperandPair, DebugInstrOperandPair);
+
+ /// Create substitutions for any tracked values in \p Old, to point at
+ /// \p New. Needed when we re-create an instruction during optimization,
+ /// which has the same signature (i.e., def operands in the same place) but
+ /// a modified instruction type, flags, or otherwise. An example: X86 moves
+ /// are sometimes transformed into equivalent LEAs.
+ /// If the two instructions are not the same opcode, limit which operands to
+ /// examine for substitutions to the first N operands by setting
+ /// \p MaxOperand.
+ void substituteDebugValuesForInst(const MachineInstr &Old, MachineInstr &New,
+ unsigned MaxOperand = UINT_MAX);
+
MachineFunction(Function &F, const LLVMTargetMachine &Target,
const TargetSubtargetInfo &STI, unsigned FunctionNum,
MachineModuleInfo &MMI);
@@ -534,8 +534,8 @@ public:
/// Returns true if this function has basic block sections enabled.
bool hasBBSections() const {
return (BBSectionsType == BasicBlockSection::All ||
- BBSectionsType == BasicBlockSection::List ||
- BBSectionsType == BasicBlockSection::Preset);
+ BBSectionsType == BasicBlockSection::List ||
+ BBSectionsType == BasicBlockSection::Preset);
}
/// Returns true if basic block labels are to be generated for this function.
@@ -807,7 +807,7 @@ public:
/// CreateMachineInstr - Allocate a new MachineInstr. Use this instead
/// of `new MachineInstr'.
MachineInstr *CreateMachineInstr(const MCInstrDesc &MCID, const DebugLoc &DL,
- bool NoImplicit = false);
+ bool NoImplicit = false);
/// Create a new MachineInstr which is a copy of \p Orig, identical in all
/// ways except the instruction has no parent, prev, or next. Bundling flags
@@ -853,14 +853,14 @@ public:
MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
int64_t Offset, uint64_t Size);
- /// getMachineMemOperand - Allocate a new MachineMemOperand by copying
- /// an existing one, replacing only the MachinePointerInfo and size.
- /// MachineMemOperands are owned by the MachineFunction and need not be
- /// explicitly deallocated.
- MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
- MachinePointerInfo &PtrInfo,
- uint64_t Size);
-
+ /// getMachineMemOperand - Allocate a new MachineMemOperand by copying
+ /// an existing one, replacing only the MachinePointerInfo and size.
+ /// MachineMemOperands are owned by the MachineFunction and need not be
+ /// explicitly deallocated.
+ MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
+ MachinePointerInfo &PtrInfo,
+ uint64_t Size);
+
/// Allocate a new MachineMemOperand by copying an existing one,
/// replacing only AliasAnalysis information. MachineMemOperands are owned
/// by the MachineFunction and need not be explicitly deallocated.
@@ -1113,10 +1113,10 @@ public:
/// the same callee.
void moveCallSiteInfo(const MachineInstr *Old,
const MachineInstr *New);
-
- unsigned getNewDebugInstrNum() {
- return ++DebugInstrNumberingCount;
- }
+
+ unsigned getNewDebugInstrNum() {
+ return ++DebugInstrNumberingCount;
+ }
};
//===--------------------------------------------------------------------===//
@@ -1183,11 +1183,11 @@ template <> struct GraphTraits<Inverse<const MachineFunction*>> :
}
};
-class MachineFunctionAnalysisManager;
-void verifyMachineFunction(MachineFunctionAnalysisManager *,
- const std::string &Banner,
- const MachineFunction &MF);
-
+class MachineFunctionAnalysisManager;
+void verifyMachineFunction(MachineFunctionAnalysisManager *,
+ const std::string &Banner,
+ const MachineFunction &MF);
+
} // end namespace llvm
#endif // LLVM_CODEGEN_MACHINEFUNCTION_H
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MachineInstr.h b/contrib/libs/llvm12/include/llvm/CodeGen/MachineInstr.h
index f0f53003c4..7dbbda8971 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MachineInstr.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MachineInstr.h
@@ -256,10 +256,10 @@ private:
DebugLoc debugLoc; // Source line information.
- /// Unique instruction number. Used by DBG_INSTR_REFs to refer to the values
- /// defined by this instruction.
- unsigned DebugInstrNum;
-
+ /// Unique instruction number. Used by DBG_INSTR_REFs to refer to the values
+ /// defined by this instruction.
+ unsigned DebugInstrNum;
+
// Intrusive list support
friend struct ilist_traits<MachineInstr>;
friend struct ilist_callback_traits<MachineBasicBlock>;
@@ -291,9 +291,9 @@ public:
const MachineBasicBlock* getParent() const { return Parent; }
MachineBasicBlock* getParent() { return Parent; }
- /// Move the instruction before \p MovePos.
- void moveBefore(MachineInstr *MovePos);
-
+ /// Move the instruction before \p MovePos.
+ void moveBefore(MachineInstr *MovePos);
+
/// Return the function that contains the basic block that this instruction
/// belongs to.
///
@@ -455,18 +455,18 @@ public:
/// this DBG_LABEL instruction.
const DILabel *getDebugLabel() const;
- /// Fetch the instruction number of this MachineInstr. If it does not have
- /// one already, a new and unique number will be assigned.
- unsigned getDebugInstrNum();
-
- /// Examine the instruction number of this MachineInstr. May be zero if
- /// it hasn't been assigned a number yet.
- unsigned peekDebugInstrNum() const { return DebugInstrNum; }
-
- /// Set instruction number of this MachineInstr. Avoid using unless you're
- /// deserializing this information.
- void setDebugInstrNum(unsigned Num) { DebugInstrNum = Num; }
-
+ /// Fetch the instruction number of this MachineInstr. If it does not have
+ /// one already, a new and unique number will be assigned.
+ unsigned getDebugInstrNum();
+
+ /// Examine the instruction number of this MachineInstr. May be zero if
+ /// it hasn't been assigned a number yet.
+ unsigned peekDebugInstrNum() const { return DebugInstrNum; }
+
+ /// Set instruction number of this MachineInstr. Avoid using unless you're
+ /// deserializing this information.
+ void setDebugInstrNum(unsigned Num) { DebugInstrNum = Num; }
+
/// Emit an error referring to the source location of this instruction.
/// This should only be used for inline assembly that is somehow
/// impossible to compile. Other errors should have been handled much
@@ -1163,22 +1163,22 @@ public:
return getOpcode() == TargetOpcode::CFI_INSTRUCTION;
}
- bool isPseudoProbe() const {
- return getOpcode() == TargetOpcode::PSEUDO_PROBE;
- }
-
+ bool isPseudoProbe() const {
+ return getOpcode() == TargetOpcode::PSEUDO_PROBE;
+ }
+
// True if the instruction represents a position in the function.
bool isPosition() const { return isLabel() || isCFIInstruction(); }
bool isDebugValue() const { return getOpcode() == TargetOpcode::DBG_VALUE; }
bool isDebugLabel() const { return getOpcode() == TargetOpcode::DBG_LABEL; }
- bool isDebugRef() const { return getOpcode() == TargetOpcode::DBG_INSTR_REF; }
- bool isDebugInstr() const {
- return isDebugValue() || isDebugLabel() || isDebugRef();
- }
- bool isDebugOrPseudoInstr() const {
- return isDebugInstr() || isPseudoProbe();
- }
+ bool isDebugRef() const { return getOpcode() == TargetOpcode::DBG_INSTR_REF; }
+ bool isDebugInstr() const {
+ return isDebugValue() || isDebugLabel() || isDebugRef();
+ }
+ bool isDebugOrPseudoInstr() const {
+ return isDebugInstr() || isPseudoProbe();
+ }
bool isDebugOffsetImm() const { return getDebugOffset().isImm(); }
@@ -1271,11 +1271,11 @@ public:
case TargetOpcode::EH_LABEL:
case TargetOpcode::GC_LABEL:
case TargetOpcode::DBG_VALUE:
- case TargetOpcode::DBG_INSTR_REF:
+ case TargetOpcode::DBG_INSTR_REF:
case TargetOpcode::DBG_LABEL:
case TargetOpcode::LIFETIME_START:
case TargetOpcode::LIFETIME_END:
- case TargetOpcode::PSEUDO_PROBE:
+ case TargetOpcode::PSEUDO_PROBE:
return true;
}
}
@@ -1348,8 +1348,8 @@ public:
/// Return true if the MachineInstr modifies (fully define or partially
/// define) the specified register.
/// NOTE: It's ignoring subreg indices on virtual registers.
- bool modifiesRegister(Register Reg,
- const TargetRegisterInfo *TRI = nullptr) const {
+ bool modifiesRegister(Register Reg,
+ const TargetRegisterInfo *TRI = nullptr) const {
return findRegisterDefOperandIdx(Reg, false, true, TRI) != -1;
}
@@ -1800,10 +1800,10 @@ public:
void setDebugValueUndef() {
assert(isDebugValue() && "Must be a debug value instruction.");
for (MachineOperand &MO : debug_operands()) {
- if (MO.isReg()) {
+ if (MO.isReg()) {
MO.setReg(0);
- MO.setSubReg(0);
- }
+ MO.setSubReg(0);
+ }
}
}
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MachineInstrBuilder.h b/contrib/libs/llvm12/include/llvm/CodeGen/MachineInstrBuilder.h
index 61c44f8a01..0635b48655 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MachineInstrBuilder.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MachineInstrBuilder.h
@@ -47,30 +47,30 @@ class MDNode;
namespace RegState {
-enum {
- /// Register definition.
- Define = 0x2,
- /// Not emitted register (e.g. carry, or temporary result).
- Implicit = 0x4,
- /// The last use of a register.
- Kill = 0x8,
- /// Unused definition.
- Dead = 0x10,
- /// Value of the register doesn't matter.
- Undef = 0x20,
- /// Register definition happens before uses.
- EarlyClobber = 0x40,
- /// Register 'use' is for debugging purpose.
- Debug = 0x80,
- /// Register reads a value that is defined inside the same instruction or
- /// bundle.
- InternalRead = 0x100,
- /// Register that may be renamed.
- Renamable = 0x200,
- DefineNoRead = Define | Undef,
- ImplicitDefine = Implicit | Define,
- ImplicitKill = Implicit | Kill
-};
+enum {
+ /// Register definition.
+ Define = 0x2,
+ /// Not emitted register (e.g. carry, or temporary result).
+ Implicit = 0x4,
+ /// The last use of a register.
+ Kill = 0x8,
+ /// Unused definition.
+ Dead = 0x10,
+ /// Value of the register doesn't matter.
+ Undef = 0x20,
+ /// Register definition happens before uses.
+ EarlyClobber = 0x40,
+ /// Register 'use' is for debugging purpose.
+ Debug = 0x80,
+ /// Register reads a value that is defined inside the same instruction or
+ /// bundle.
+ InternalRead = 0x100,
+ /// Register that may be renamed.
+ Renamable = 0x200,
+ DefineNoRead = Define | Undef,
+ ImplicitDefine = Implicit | Define,
+ ImplicitKill = Implicit | Kill
+};
} // end namespace RegState
@@ -312,9 +312,9 @@ public:
case MachineOperand::MO_BlockAddress:
return addBlockAddress(Disp.getBlockAddress(), Disp.getOffset() + off,
TargetFlags);
- case MachineOperand::MO_JumpTableIndex:
- assert(off == 0 && "cannot create offset into jump tables");
- return addJumpTableIndex(Disp.getIndex(), TargetFlags);
+ case MachineOperand::MO_JumpTableIndex:
+ assert(off == 0 && "cannot create offset into jump tables");
+ return addJumpTableIndex(Disp.getIndex(), TargetFlags);
}
}
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MachineJumpTableInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/MachineJumpTableInfo.h
index 0b13ed4891..3aa5f2c3c3 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MachineJumpTableInfo.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MachineJumpTableInfo.h
@@ -113,9 +113,9 @@ public:
JumpTables[Idx].MBBs.clear();
}
- /// RemoveMBBFromJumpTables - If MBB is present in any jump tables, remove it.
- bool RemoveMBBFromJumpTables(MachineBasicBlock *MBB);
-
+ /// RemoveMBBFromJumpTables - If MBB is present in any jump tables, remove it.
+ bool RemoveMBBFromJumpTables(MachineBasicBlock *MBB);
+
/// ReplaceMBBInJumpTables - If Old is the target of any jump tables, update
/// the jump tables to branch to New instead.
bool ReplaceMBBInJumpTables(MachineBasicBlock *Old, MachineBasicBlock *New);
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MachineLoopInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/MachineLoopInfo.h
index fbd203328c..5d5b3ec63c 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MachineLoopInfo.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MachineLoopInfo.h
@@ -74,12 +74,12 @@ public:
/// it returns an unknown location.
DebugLoc getStartLoc() const;
- /// Returns true if the instruction is loop invariant.
- /// I.e., all virtual register operands are defined outside of the loop,
- /// physical registers aren't accessed explicitly, and there are no side
- /// effects that aren't captured by the operands or other flags.
- bool isLoopInvariant(MachineInstr &I) const;
-
+ /// Returns true if the instruction is loop invariant.
+ /// I.e., all virtual register operands are defined outside of the loop,
+ /// physical registers aren't accessed explicitly, and there are no side
+ /// effects that aren't captured by the operands or other flags.
+ bool isLoopInvariant(MachineInstr &I) const;
+
void dump() const;
private:
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MachineModuleInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/MachineModuleInfo.h
index b791e255b7..585d9adfbc 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MachineModuleInfo.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MachineModuleInfo.h
@@ -61,8 +61,8 @@ class Module;
//===----------------------------------------------------------------------===//
/// This class can be derived from and used by targets to hold private
/// target-specific information for each Module. Objects of type are
-/// accessed/created with MachineModuleInfo::getObjFileInfo and destroyed when
-/// the MachineModuleInfo is destroyed.
+/// accessed/created with MachineModuleInfo::getObjFileInfo and destroyed when
+/// the MachineModuleInfo is destroyed.
///
class MachineModuleInfoImpl {
public:
@@ -90,9 +90,9 @@ class MachineModuleInfo {
/// This is the MCContext used for the entire code generator.
MCContext Context;
- // This is an external context, that if assigned, will be used instead of the
- // internal context.
- MCContext *ExternalContext = nullptr;
+ // This is an external context, that if assigned, will be used instead of the
+ // internal context.
+ MCContext *ExternalContext = nullptr;
/// This is the LLVM Module being worked on.
const Module *TheModule;
@@ -159,9 +159,9 @@ class MachineModuleInfo {
public:
explicit MachineModuleInfo(const LLVMTargetMachine *TM = nullptr);
- explicit MachineModuleInfo(const LLVMTargetMachine *TM,
- MCContext *ExtContext);
-
+ explicit MachineModuleInfo(const LLVMTargetMachine *TM,
+ MCContext *ExtContext);
+
MachineModuleInfo(MachineModuleInfo &&MMII);
~MachineModuleInfo();
@@ -171,12 +171,12 @@ public:
const LLVMTargetMachine &getTarget() const { return TM; }
- const MCContext &getContext() const {
- return ExternalContext ? *ExternalContext : Context;
- }
- MCContext &getContext() {
- return ExternalContext ? *ExternalContext : Context;
- }
+ const MCContext &getContext() const {
+ return ExternalContext ? *ExternalContext : Context;
+ }
+ MCContext &getContext() {
+ return ExternalContext ? *ExternalContext : Context;
+ }
const Module *getModule() const { return TheModule; }
@@ -268,12 +268,12 @@ public:
return Personalities;
}
/// \}
-
- // MMI owes MCContext. It should never be invalidated.
- bool invalidate(Module &, const PreservedAnalyses &,
- ModuleAnalysisManager::Invalidator &) {
- return false;
- }
+
+ // MMI owes MCContext. It should never be invalidated.
+ bool invalidate(Module &, const PreservedAnalyses &,
+ ModuleAnalysisManager::Invalidator &) {
+ return false;
+ }
}; // End class MachineModuleInfo
class MachineModuleInfoWrapperPass : public ImmutablePass {
@@ -283,9 +283,9 @@ public:
static char ID; // Pass identification, replacement for typeid
explicit MachineModuleInfoWrapperPass(const LLVMTargetMachine *TM = nullptr);
- explicit MachineModuleInfoWrapperPass(const LLVMTargetMachine *TM,
- MCContext *ExtContext);
-
+ explicit MachineModuleInfoWrapperPass(const LLVMTargetMachine *TM,
+ MCContext *ExtContext);
+
// Initialization and Finalization
bool doInitialization(Module &) override;
bool doFinalization(Module &) override;
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MachineOperand.h b/contrib/libs/llvm12/include/llvm/CodeGen/MachineOperand.h
index d5ea31a6cd..ad3c11b1f1 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MachineOperand.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MachineOperand.h
@@ -734,12 +734,12 @@ public:
/// ChangeToImmediate - Replace this operand with a new immediate operand of
/// the specified value. If an operand is known to be an immediate already,
/// the setImm method should be used.
- void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags = 0);
+ void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags = 0);
/// ChangeToFPImmediate - Replace this operand with a new FP immediate operand
/// of the specified value. If an operand is known to be an FP immediate
/// already, the setFPImm method should be used.
- void ChangeToFPImmediate(const ConstantFP *FPImm, unsigned TargetFlags = 0);
+ void ChangeToFPImmediate(const ConstantFP *FPImm, unsigned TargetFlags = 0);
/// ChangeToES - Replace this operand with a new external symbol operand.
void ChangeToES(const char *SymName, unsigned TargetFlags = 0);
@@ -749,10 +749,10 @@ public:
unsigned TargetFlags = 0);
/// ChangeToMCSymbol - Replace this operand with a new MC symbol operand.
- void ChangeToMCSymbol(MCSymbol *Sym, unsigned TargetFlags = 0);
+ void ChangeToMCSymbol(MCSymbol *Sym, unsigned TargetFlags = 0);
/// Replace this operand with a frame index.
- void ChangeToFrameIndex(int Idx, unsigned TargetFlags = 0);
+ void ChangeToFrameIndex(int Idx, unsigned TargetFlags = 0);
/// Replace this operand with a target index.
void ChangeToTargetIndex(unsigned Idx, int64_t Offset,
@@ -765,11 +765,11 @@ public:
bool isKill = false, bool isDead = false,
bool isUndef = false, bool isDebug = false);
- /// getTargetIndexName - If this MachineOperand is a TargetIndex that has a
- /// name, attempt to get the name. Returns nullptr if the TargetIndex does not
- /// have a name. Asserts if MO is not a TargetIndex.
- const char *getTargetIndexName() const;
-
+ /// getTargetIndexName - If this MachineOperand is a TargetIndex that has a
+ /// name, attempt to get the name. Returns nullptr if the TargetIndex does not
+ /// have a name. Asserts if MO is not a TargetIndex.
+ const char *getTargetIndexName() const;
+
//===--------------------------------------------------------------------===//
// Construction methods.
//===--------------------------------------------------------------------===//
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MachineOutliner.h b/contrib/libs/llvm12/include/llvm/CodeGen/MachineOutliner.h
index ebe91415af..e39090728f 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MachineOutliner.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MachineOutliner.h
@@ -22,10 +22,10 @@
#ifndef LLVM_MACHINEOUTLINER_H
#define LLVM_MACHINEOUTLINER_H
-#include "llvm/CodeGen/LivePhysRegs.h"
+#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/LiveRegUnits.h"
#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
namespace llvm {
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MachinePassManager.h b/contrib/libs/llvm12/include/llvm/CodeGen/MachinePassManager.h
index d9cb20d0de..efb51b1d77 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MachinePassManager.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MachinePassManager.h
@@ -1,267 +1,267 @@
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-parameter"
-#endif
-
-//===- PassManager.h --- Pass management for CodeGen ------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This header defines the pass manager interface for codegen. The codegen
-// pipeline consists of only machine function passes. There is no container
-// relationship between IR module/function and machine function in terms of pass
-// manager organization. So there is no need for adaptor classes (for example
-// ModuleToMachineFunctionAdaptor). Since invalidation could only happen among
-// machine function passes, there is no proxy classes to handle cross-IR-unit
-// invalidation. IR analysis results are provided for machine function passes by
-// their respective analysis managers such as ModuleAnalysisManager and
-// FunctionAnalysisManager.
-//
-// TODO: Add MachineFunctionProperties support.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_MACHINEPASSMANAGER_H
-#define LLVM_CODEGEN_MACHINEPASSMANAGER_H
-
-#include "llvm/ADT/FunctionExtras.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/IR/PassManager.h"
-#include "llvm/Support/Error.h"
-#include "llvm/Support/type_traits.h"
-
-namespace llvm {
-class Module;
-
-extern template class AnalysisManager<MachineFunction>;
-
-/// An AnalysisManager<MachineFunction> that also exposes IR analysis results.
-class MachineFunctionAnalysisManager : public AnalysisManager<MachineFunction> {
-public:
- using Base = AnalysisManager<MachineFunction>;
-
- MachineFunctionAnalysisManager() : Base(false), FAM(nullptr), MAM(nullptr) {}
- MachineFunctionAnalysisManager(FunctionAnalysisManager &FAM,
- ModuleAnalysisManager &MAM,
- bool DebugLogging = false)
- : Base(DebugLogging), FAM(&FAM), MAM(&MAM) {}
- MachineFunctionAnalysisManager(MachineFunctionAnalysisManager &&) = default;
- MachineFunctionAnalysisManager &
- operator=(MachineFunctionAnalysisManager &&) = default;
-
- /// Get the result of an analysis pass for a Function.
- ///
- /// Runs the analysis if a cached result is not available.
- template <typename PassT> typename PassT::Result &getResult(Function &F) {
- return FAM->getResult<PassT>(F);
- }
-
- /// Get the cached result of an analysis pass for a Function.
- ///
- /// This method never runs the analysis.
- ///
- /// \returns null if there is no cached result.
- template <typename PassT>
- typename PassT::Result *getCachedResult(Function &F) {
- return FAM->getCachedResult<PassT>(F);
- }
-
- /// Get the result of an analysis pass for a Module.
- ///
- /// Runs the analysis if a cached result is not available.
- template <typename PassT> typename PassT::Result &getResult(Module &M) {
- return MAM->getResult<PassT>(M);
- }
-
- /// Get the cached result of an analysis pass for a Module.
- ///
- /// This method never runs the analysis.
- ///
- /// \returns null if there is no cached result.
- template <typename PassT> typename PassT::Result *getCachedResult(Module &M) {
- return MAM->getCachedResult<PassT>(M);
- }
-
- /// Get the result of an analysis pass for a MachineFunction.
- ///
- /// Runs the analysis if a cached result is not available.
- using Base::getResult;
-
- /// Get the cached result of an analysis pass for a MachineFunction.
- ///
- /// This method never runs the analysis.
- ///
- /// returns null if there is no cached result.
- using Base::getCachedResult;
-
- // FIXME: Add LoopAnalysisManager or CGSCCAnalysisManager if needed.
- FunctionAnalysisManager *FAM;
- ModuleAnalysisManager *MAM;
-};
-
-extern template class PassManager<MachineFunction>;
-
-/// MachineFunctionPassManager adds/removes below features to/from the base
-/// PassManager template instantiation.
-///
-/// - Support passes that implement doInitialization/doFinalization. This is for
-/// machine function passes to work on module level constructs. One such pass
-/// is AsmPrinter.
-///
-/// - Support machine module pass which runs over the module (for example,
-/// MachineOutliner). A machine module pass needs to define the method:
-///
-/// ```Error run(Module &, MachineFunctionAnalysisManager &)```
-///
-/// FIXME: machine module passes still need to define the usual machine
-/// function pass interface, namely,
-/// `PreservedAnalyses run(MachineFunction &,
-/// MachineFunctionAnalysisManager &)`
-/// But this interface wouldn't be executed. It is just a placeholder
-/// to satisfy the pass manager type-erased inteface. This
-/// special-casing of machine module pass is due to its limited use
-/// cases and the unnecessary complexity it may bring to the machine
-/// pass manager.
-///
-/// - The base class `run` method is replaced by an alternative `run` method.
-/// See details below.
-///
-/// - Support codegening in the SCC order. Users include interprocedural
-/// register allocation (IPRA).
-class MachineFunctionPassManager
- : public PassManager<MachineFunction, MachineFunctionAnalysisManager> {
- using Base = PassManager<MachineFunction, MachineFunctionAnalysisManager>;
-
-public:
- MachineFunctionPassManager(bool DebugLogging = false,
- bool RequireCodeGenSCCOrder = false,
- bool VerifyMachineFunction = false)
- : Base(DebugLogging), RequireCodeGenSCCOrder(RequireCodeGenSCCOrder),
- VerifyMachineFunction(VerifyMachineFunction) {}
- MachineFunctionPassManager(MachineFunctionPassManager &&) = default;
- MachineFunctionPassManager &
- operator=(MachineFunctionPassManager &&) = default;
-
- /// Run machine passes for a Module.
- ///
- /// The intended use is to start the codegen pipeline for a Module. The base
- /// class's `run` method is deliberately hidden by this due to the observation
- /// that we don't yet have the use cases of compositing two instances of
- /// machine pass managers, or compositing machine pass managers with other
- /// types of pass managers.
- Error run(Module &M, MachineFunctionAnalysisManager &MFAM);
-
- template <typename PassT> void addPass(PassT &&Pass) {
- Base::addPass(std::forward<PassT>(Pass));
- PassConceptT *P = Passes.back().get();
- addDoInitialization<PassT>(P);
- addDoFinalization<PassT>(P);
-
- // Add machine module pass.
- addRunOnModule<PassT>(P);
- }
-
-private:
- template <typename PassT>
- using has_init_t = decltype(std::declval<PassT &>().doInitialization(
- std::declval<Module &>(),
- std::declval<MachineFunctionAnalysisManager &>()));
-
- template <typename PassT>
- std::enable_if_t<!is_detected<has_init_t, PassT>::value>
- addDoInitialization(PassConceptT *Pass) {}
-
- template <typename PassT>
- std::enable_if_t<is_detected<has_init_t, PassT>::value>
- addDoInitialization(PassConceptT *Pass) {
- using PassModelT =
- detail::PassModel<MachineFunction, PassT, PreservedAnalyses,
- MachineFunctionAnalysisManager>;
- auto *P = static_cast<PassModelT *>(Pass);
- InitializationFuncs.emplace_back(
- [=](Module &M, MachineFunctionAnalysisManager &MFAM) {
- return P->Pass.doInitialization(M, MFAM);
- });
- }
-
- template <typename PassT>
- using has_fini_t = decltype(std::declval<PassT &>().doFinalization(
- std::declval<Module &>(),
- std::declval<MachineFunctionAnalysisManager &>()));
-
- template <typename PassT>
- std::enable_if_t<!is_detected<has_fini_t, PassT>::value>
- addDoFinalization(PassConceptT *Pass) {}
-
- template <typename PassT>
- std::enable_if_t<is_detected<has_fini_t, PassT>::value>
- addDoFinalization(PassConceptT *Pass) {
- using PassModelT =
- detail::PassModel<MachineFunction, PassT, PreservedAnalyses,
- MachineFunctionAnalysisManager>;
- auto *P = static_cast<PassModelT *>(Pass);
- FinalizationFuncs.emplace_back(
- [=](Module &M, MachineFunctionAnalysisManager &MFAM) {
- return P->Pass.doFinalization(M, MFAM);
- });
- }
-
- template <typename PassT>
- using is_machine_module_pass_t = decltype(std::declval<PassT &>().run(
- std::declval<Module &>(),
- std::declval<MachineFunctionAnalysisManager &>()));
-
- template <typename PassT>
- using is_machine_function_pass_t = decltype(std::declval<PassT &>().run(
- std::declval<MachineFunction &>(),
- std::declval<MachineFunctionAnalysisManager &>()));
-
- template <typename PassT>
- std::enable_if_t<!is_detected<is_machine_module_pass_t, PassT>::value>
- addRunOnModule(PassConceptT *Pass) {}
-
- template <typename PassT>
- std::enable_if_t<is_detected<is_machine_module_pass_t, PassT>::value>
- addRunOnModule(PassConceptT *Pass) {
- static_assert(is_detected<is_machine_function_pass_t, PassT>::value,
- "machine module pass needs to define machine function pass "
- "api. sorry.");
-
- using PassModelT =
- detail::PassModel<MachineFunction, PassT, PreservedAnalyses,
- MachineFunctionAnalysisManager>;
- auto *P = static_cast<PassModelT *>(Pass);
- MachineModulePasses.emplace(
- Passes.size() - 1,
- [=](Module &M, MachineFunctionAnalysisManager &MFAM) {
- return P->Pass.run(M, MFAM);
- });
- }
-
- using FuncTy = Error(Module &, MachineFunctionAnalysisManager &);
- SmallVector<llvm::unique_function<FuncTy>, 4> InitializationFuncs;
- SmallVector<llvm::unique_function<FuncTy>, 4> FinalizationFuncs;
-
- using PassIndex = decltype(Passes)::size_type;
- std::map<PassIndex, llvm::unique_function<FuncTy>> MachineModulePasses;
-
- // Run codegen in the SCC order.
- bool RequireCodeGenSCCOrder;
-
- bool VerifyMachineFunction;
-};
-
-} // end namespace llvm
-
-#endif // LLVM_CODEGEN_MACHINEPASSMANAGER_H
-
-#ifdef __GNUC__
-#pragma GCC diagnostic pop
-#endif
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- PassManager.h --- Pass management for CodeGen ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines the pass manager interface for codegen. The codegen
+// pipeline consists of only machine function passes. There is no container
+// relationship between IR module/function and machine function in terms of pass
+// manager organization. So there is no need for adaptor classes (for example
+// ModuleToMachineFunctionAdaptor). Since invalidation could only happen among
+// machine function passes, there is no proxy classes to handle cross-IR-unit
+// invalidation. IR analysis results are provided for machine function passes by
+// their respective analysis managers such as ModuleAnalysisManager and
+// FunctionAnalysisManager.
+//
+// TODO: Add MachineFunctionProperties support.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEPASSMANAGER_H
+#define LLVM_CODEGEN_MACHINEPASSMANAGER_H
+
+#include "llvm/ADT/FunctionExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/type_traits.h"
+
+namespace llvm {
+class Module;
+
+extern template class AnalysisManager<MachineFunction>;
+
+/// An AnalysisManager<MachineFunction> that also exposes IR analysis results.
+class MachineFunctionAnalysisManager : public AnalysisManager<MachineFunction> {
+public:
+ using Base = AnalysisManager<MachineFunction>;
+
+ MachineFunctionAnalysisManager() : Base(false), FAM(nullptr), MAM(nullptr) {}
+ MachineFunctionAnalysisManager(FunctionAnalysisManager &FAM,
+ ModuleAnalysisManager &MAM,
+ bool DebugLogging = false)
+ : Base(DebugLogging), FAM(&FAM), MAM(&MAM) {}
+ MachineFunctionAnalysisManager(MachineFunctionAnalysisManager &&) = default;
+ MachineFunctionAnalysisManager &
+ operator=(MachineFunctionAnalysisManager &&) = default;
+
+ /// Get the result of an analysis pass for a Function.
+ ///
+ /// Runs the analysis if a cached result is not available.
+ template <typename PassT> typename PassT::Result &getResult(Function &F) {
+ return FAM->getResult<PassT>(F);
+ }
+
+ /// Get the cached result of an analysis pass for a Function.
+ ///
+ /// This method never runs the analysis.
+ ///
+ /// \returns null if there is no cached result.
+ template <typename PassT>
+ typename PassT::Result *getCachedResult(Function &F) {
+ return FAM->getCachedResult<PassT>(F);
+ }
+
+ /// Get the result of an analysis pass for a Module.
+ ///
+ /// Runs the analysis if a cached result is not available.
+ template <typename PassT> typename PassT::Result &getResult(Module &M) {
+ return MAM->getResult<PassT>(M);
+ }
+
+ /// Get the cached result of an analysis pass for a Module.
+ ///
+ /// This method never runs the analysis.
+ ///
+ /// \returns null if there is no cached result.
+ template <typename PassT> typename PassT::Result *getCachedResult(Module &M) {
+ return MAM->getCachedResult<PassT>(M);
+ }
+
+ /// Get the result of an analysis pass for a MachineFunction.
+ ///
+ /// Runs the analysis if a cached result is not available.
+ using Base::getResult;
+
+ /// Get the cached result of an analysis pass for a MachineFunction.
+ ///
+ /// This method never runs the analysis.
+ ///
+ /// returns null if there is no cached result.
+ using Base::getCachedResult;
+
+ // FIXME: Add LoopAnalysisManager or CGSCCAnalysisManager if needed.
+ FunctionAnalysisManager *FAM;
+ ModuleAnalysisManager *MAM;
+};
+
+extern template class PassManager<MachineFunction>;
+
+/// MachineFunctionPassManager adds/removes below features to/from the base
+/// PassManager template instantiation.
+///
+/// - Support passes that implement doInitialization/doFinalization. This is for
+/// machine function passes to work on module level constructs. One such pass
+/// is AsmPrinter.
+///
+/// - Support machine module pass which runs over the module (for example,
+/// MachineOutliner). A machine module pass needs to define the method:
+///
+/// ```Error run(Module &, MachineFunctionAnalysisManager &)```
+///
+/// FIXME: machine module passes still need to define the usual machine
+/// function pass interface, namely,
+/// `PreservedAnalyses run(MachineFunction &,
+/// MachineFunctionAnalysisManager &)`
+/// But this interface wouldn't be executed. It is just a placeholder
+/// to satisfy the pass manager type-erased inteface. This
+/// special-casing of machine module pass is due to its limited use
+/// cases and the unnecessary complexity it may bring to the machine
+/// pass manager.
+///
+/// - The base class `run` method is replaced by an alternative `run` method.
+/// See details below.
+///
+/// - Support codegening in the SCC order. Users include interprocedural
+/// register allocation (IPRA).
+class MachineFunctionPassManager
+ : public PassManager<MachineFunction, MachineFunctionAnalysisManager> {
+ using Base = PassManager<MachineFunction, MachineFunctionAnalysisManager>;
+
+public:
+ MachineFunctionPassManager(bool DebugLogging = false,
+ bool RequireCodeGenSCCOrder = false,
+ bool VerifyMachineFunction = false)
+ : Base(DebugLogging), RequireCodeGenSCCOrder(RequireCodeGenSCCOrder),
+ VerifyMachineFunction(VerifyMachineFunction) {}
+ MachineFunctionPassManager(MachineFunctionPassManager &&) = default;
+ MachineFunctionPassManager &
+ operator=(MachineFunctionPassManager &&) = default;
+
+ /// Run machine passes for a Module.
+ ///
+ /// The intended use is to start the codegen pipeline for a Module. The base
+ /// class's `run` method is deliberately hidden by this due to the observation
+ /// that we don't yet have the use cases of compositing two instances of
+ /// machine pass managers, or compositing machine pass managers with other
+ /// types of pass managers.
+ Error run(Module &M, MachineFunctionAnalysisManager &MFAM);
+
+ template <typename PassT> void addPass(PassT &&Pass) {
+ Base::addPass(std::forward<PassT>(Pass));
+ PassConceptT *P = Passes.back().get();
+ addDoInitialization<PassT>(P);
+ addDoFinalization<PassT>(P);
+
+ // Add machine module pass.
+ addRunOnModule<PassT>(P);
+ }
+
+private:
+ template <typename PassT>
+ using has_init_t = decltype(std::declval<PassT &>().doInitialization(
+ std::declval<Module &>(),
+ std::declval<MachineFunctionAnalysisManager &>()));
+
+ template <typename PassT>
+ std::enable_if_t<!is_detected<has_init_t, PassT>::value>
+ addDoInitialization(PassConceptT *Pass) {}
+
+ template <typename PassT>
+ std::enable_if_t<is_detected<has_init_t, PassT>::value>
+ addDoInitialization(PassConceptT *Pass) {
+ using PassModelT =
+ detail::PassModel<MachineFunction, PassT, PreservedAnalyses,
+ MachineFunctionAnalysisManager>;
+ auto *P = static_cast<PassModelT *>(Pass);
+ InitializationFuncs.emplace_back(
+ [=](Module &M, MachineFunctionAnalysisManager &MFAM) {
+ return P->Pass.doInitialization(M, MFAM);
+ });
+ }
+
+ template <typename PassT>
+ using has_fini_t = decltype(std::declval<PassT &>().doFinalization(
+ std::declval<Module &>(),
+ std::declval<MachineFunctionAnalysisManager &>()));
+
+ template <typename PassT>
+ std::enable_if_t<!is_detected<has_fini_t, PassT>::value>
+ addDoFinalization(PassConceptT *Pass) {}
+
+ template <typename PassT>
+ std::enable_if_t<is_detected<has_fini_t, PassT>::value>
+ addDoFinalization(PassConceptT *Pass) {
+ using PassModelT =
+ detail::PassModel<MachineFunction, PassT, PreservedAnalyses,
+ MachineFunctionAnalysisManager>;
+ auto *P = static_cast<PassModelT *>(Pass);
+ FinalizationFuncs.emplace_back(
+ [=](Module &M, MachineFunctionAnalysisManager &MFAM) {
+ return P->Pass.doFinalization(M, MFAM);
+ });
+ }
+
+ template <typename PassT>
+ using is_machine_module_pass_t = decltype(std::declval<PassT &>().run(
+ std::declval<Module &>(),
+ std::declval<MachineFunctionAnalysisManager &>()));
+
+ template <typename PassT>
+ using is_machine_function_pass_t = decltype(std::declval<PassT &>().run(
+ std::declval<MachineFunction &>(),
+ std::declval<MachineFunctionAnalysisManager &>()));
+
+ template <typename PassT>
+ std::enable_if_t<!is_detected<is_machine_module_pass_t, PassT>::value>
+ addRunOnModule(PassConceptT *Pass) {}
+
+ template <typename PassT>
+ std::enable_if_t<is_detected<is_machine_module_pass_t, PassT>::value>
+ addRunOnModule(PassConceptT *Pass) {
+ static_assert(is_detected<is_machine_function_pass_t, PassT>::value,
+ "machine module pass needs to define machine function pass "
+ "api. sorry.");
+
+ using PassModelT =
+ detail::PassModel<MachineFunction, PassT, PreservedAnalyses,
+ MachineFunctionAnalysisManager>;
+ auto *P = static_cast<PassModelT *>(Pass);
+ MachineModulePasses.emplace(
+ Passes.size() - 1,
+ [=](Module &M, MachineFunctionAnalysisManager &MFAM) {
+ return P->Pass.run(M, MFAM);
+ });
+ }
+
+ using FuncTy = Error(Module &, MachineFunctionAnalysisManager &);
+ SmallVector<llvm::unique_function<FuncTy>, 4> InitializationFuncs;
+ SmallVector<llvm::unique_function<FuncTy>, 4> FinalizationFuncs;
+
+ using PassIndex = decltype(Passes)::size_type;
+ std::map<PassIndex, llvm::unique_function<FuncTy>> MachineModulePasses;
+
+ // Run codegen in the SCC order.
+ bool RequireCodeGenSCCOrder;
+
+ bool VerifyMachineFunction;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEPASSMANAGER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MachinePassRegistry.def b/contrib/libs/llvm12/include/llvm/CodeGen/MachinePassRegistry.def
index e9eaa5f770..da969ba6be 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MachinePassRegistry.def
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MachinePassRegistry.def
@@ -1,197 +1,197 @@
-//===- MachinePassRegistry.def - Registry of passes -------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is used as the registry of passes that are for target-independent
-// code generator.
-//
-//===----------------------------------------------------------------------===//
-
-// NOTE: NO INCLUDE GUARD DESIRED!
-
-#ifndef MODULE_ANALYSIS
-#define MODULE_ANALYSIS(NAME, PASS_NAME, CONSTRUCTOR)
-#endif
-MODULE_ANALYSIS("pass-instrumentation", PassInstrumentationAnalysis, (PIC))
-#undef MODULE_ANALYSIS
-
-#ifndef MODULE_PASS
-#define MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR)
-#endif
-MODULE_PASS("pre-isel-intrinsic-lowering", PreISelIntrinsicLoweringPass, ())
-#undef MODULE_PASS
-
-#ifndef FUNCTION_ANALYSIS
-#define FUNCTION_ANALYSIS(NAME, PASS_NAME, CONSTRUCTOR)
-#endif
-FUNCTION_ANALYSIS("pass-instrumentation", PassInstrumentationAnalysis, (PIC))
-FUNCTION_ANALYSIS("targetir", TargetIRAnalysis, (std::move(TM.getTargetIRAnalysis())))
-#undef FUNCTION_ANALYSIS
-
-#ifndef FUNCTION_PASS
-#define FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR)
-#endif
-FUNCTION_PASS("mergeicmps", MergeICmpsPass, ())
-FUNCTION_PASS("lower-constant-intrinsics", LowerConstantIntrinsicsPass, ())
-FUNCTION_PASS("unreachableblockelim", UnreachableBlockElimPass, ())
-FUNCTION_PASS("consthoist", ConstantHoistingPass, ())
-FUNCTION_PASS("partially-inline-libcalls", PartiallyInlineLibCallsPass, ())
-FUNCTION_PASS("ee-instrument", EntryExitInstrumenterPass, (false))
-FUNCTION_PASS("post-inline-ee-instrument", EntryExitInstrumenterPass, (true))
-FUNCTION_PASS("expand-reductions", ExpandReductionsPass, ())
-FUNCTION_PASS("lowerinvoke", LowerInvokePass, ())
-FUNCTION_PASS("scalarize-masked-mem-intrin", ScalarizeMaskedMemIntrinPass, ())
-FUNCTION_PASS("verify", VerifierPass, ())
-#undef FUNCTION_PASS
-
-#ifndef LOOP_PASS
-#define LOOP_PASS(NAME, PASS_NAME, CONSTRUCTOR)
-#endif
-LOOP_PASS("loop-reduce", LoopStrengthReducePass, ())
-#undef LOOP_PASS
-
-#ifndef MACHINE_MODULE_PASS
-#define MACHINE_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR)
-#endif
-#undef MACHINE_MODULE_PASS
-
-#ifndef MACHINE_FUNCTION_ANALYSIS
-#define MACHINE_FUNCTION_ANALYSIS(NAME, PASS_NAME, CONSTRUCTOR)
-#endif
-MACHINE_FUNCTION_ANALYSIS("pass-instrumentation", PassInstrumentationAnalysis, (PIC))
-// LiveVariables currently requires pure SSA form.
-// FIXME: Once TwoAddressInstruction pass no longer uses kill flags,
-// LiveVariables can be removed completely, and LiveIntervals can be directly
-// computed. (We still either need to regenerate kill flags after regalloc, or
-// preferably fix the scavenger to not depend on them).
-// MACHINE_FUNCTION_ANALYSIS("live-vars", LiveVariablesAnalysis())
-
-// MACHINE_FUNCTION_ANALYSIS("live-stacks", LiveStacksPass())
-// MACHINE_FUNCTION_ANALYSIS("slot-indexes", SlotIndexesAnalysis())
-// MACHINE_FUNCTION_ANALYSIS("edge-bundles", EdgeBundlesAnalysis())
-// MACHINE_FUNCTION_ANALYSIS("lazy-machine-bfi", LazyMachineBlockFrequencyInfoAnalysis())
-// MACHINE_FUNCTION_ANALYSIS("machine-bfi", MachineBlockFrequencyInfoAnalysis())
-// MACHINE_FUNCTION_ANALYSIS("machine-loops", MachineLoopInfoAnalysis())
-// MACHINE_FUNCTION_ANALYSIS("machine-dom-frontier", MachineDominanceFrontierAnalysis())
-// MACHINE_FUNCTION_ANALYSIS("machine-dom-tree", MachineDominatorTreeAnalysis())
-// MACHINE_FUNCTION_ANALYSIS("machine-ore", MachineOptimizationRemarkEmitterPassAnalysis())
-// MACHINE_FUNCTION_ANALYSIS("machine-post-dom-tree", MachinePostDominatorTreeAnalysis())
-// MACHINE_FUNCTION_ANALYSIS("machine-region-info", MachineRegionInfoPassAnalysis())
-// MACHINE_FUNCTION_ANALYSIS("machine-trace-metrics", MachineTraceMetricsAnalysis())
-// MACHINE_FUNCTION_ANALYSIS("reaching-def", ReachingDefAnalysisAnalysis())
-// MACHINE_FUNCTION_ANALYSIS("live-reg-matrix", LiveRegMatrixAnalysis())
-// MACHINE_FUNCTION_ANALYSIS("gc-analysis", GCMachineCodeAnalysisPass())
-#undef MACHINE_FUNCTION_ANALYSIS
-
-#ifndef MACHINE_FUNCTION_PASS
-#define MACHINE_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR)
-#endif
-// MACHINE_FUNCTION_PASS("mir-printer", PrintMIRPass, ())
-// MACHINE_FUNCTION_PASS("free-machine-function", FreeMachineFunctionPass, ())
-#undef MACHINE_FUNCTION_PASS
-
-// After a pass is converted to new pass manager, its entry should be moved from
-// dummy table to the normal one. For example, for a machine function pass,
-// DUMMY_MACHINE_FUNCTION_PASS to MACHINE_FUNCTION_PASS.
-
-#ifndef DUMMY_FUNCTION_PASS
-#define DUMMY_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR)
-#endif
-DUMMY_FUNCTION_PASS("expandmemcmp", ExpandMemCmpPass, ())
-DUMMY_FUNCTION_PASS("gc-lowering", GCLoweringPass, ())
-DUMMY_FUNCTION_PASS("shadow-stack-gc-lowering", ShadowStackGCLoweringPass, ())
-DUMMY_FUNCTION_PASS("sjljehprepare", SjLjEHPreparePass, ())
-DUMMY_FUNCTION_PASS("dwarfehprepare", DwarfEHPass, ())
-DUMMY_FUNCTION_PASS("winehprepare", WinEHPass, ())
-DUMMY_FUNCTION_PASS("wasmehprepare", WasmEHPass, ())
-DUMMY_FUNCTION_PASS("codegenprepare", CodeGenPreparePass, ())
-DUMMY_FUNCTION_PASS("safe-stack", SafeStackPass, ())
-DUMMY_FUNCTION_PASS("stack-protector", StackProtectorPass, ())
-DUMMY_FUNCTION_PASS("atomic-expand", AtomicExpandPass, ())
-DUMMY_FUNCTION_PASS("interleaved-access", InterleavedAccessPass, ())
-DUMMY_FUNCTION_PASS("indirectbr-expand", IndirectBrExpandPass, ())
-DUMMY_FUNCTION_PASS("cfguard-dispatch", CFGuardDispatchPass, ())
-DUMMY_FUNCTION_PASS("cfguard-check", CFGuardCheckPass, ())
-DUMMY_FUNCTION_PASS("gc-info-printer", GCInfoPrinterPass, ())
-#undef DUMMY_FUNCTION_PASS
-
-#ifndef DUMMY_MODULE_PASS
-#define DUMMY_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR)
-#endif
-DUMMY_MODULE_PASS("lower-emutls", LowerEmuTLSPass, ())
-#undef DUMMY_MODULE_PASS
-
-#ifndef DUMMY_MACHINE_MODULE_PASS
-#define DUMMY_MACHINE_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR)
-#endif
-DUMMY_MACHINE_MODULE_PASS("machine-outliner", MachineOutlinerPass, ())
-#undef DUMMY_MACHINE_MODULE_PASS
-
-#ifndef DUMMY_MACHINE_FUNCTION_PASS
-#define DUMMY_MACHINE_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR)
-#endif
-DUMMY_MACHINE_FUNCTION_PASS("mir-printer", PrintMIRPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("free-machine-function", FreeMachineFunctionPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("finalize-isel", FinalizeISelPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("localstackalloc", LocalStackSlotPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("shrink-wrap", ShrinkWrapPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("prologepilog", PrologEpilogInserterPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("postrapseudos", ExpandPostRAPseudosPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("implicit-null-checks", ImplicitNullChecksPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("postmisched", PostMachineSchedulerPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("machine-scheduler", MachineSchedulerPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("machine-cp", MachineCopyPropagationPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("post-RA-sched", PostRASchedulerPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("fentry-insert", FEntryInserterPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("xray-instrumentation", XRayInstrumentationPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("patchable-function", PatchableFunctionPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("reg-usage-propagation", RegUsageInfoPropagationPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("reg-usage-collector", RegUsageInfoCollectorPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("funclet-layout", FuncletLayoutPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("stackmap-liveness", StackMapLivenessPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("livedebugvalues", LiveDebugValuesPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("early-tailduplication", EarlyTailDuplicatePass, ())
-DUMMY_MACHINE_FUNCTION_PASS("opt-phis", OptimizePHIsPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("stack-coloring", StackColoringPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("dead-mi-elimination", DeadMachineInstructionElimPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("early-machinelicm", EarlyMachineLICMPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("machinelicm", MachineLICMPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("machine-cse", MachineCSEPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("machine-sink", MachineSinkingPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("postra-machine-sink", PostRAMachineSinkingPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("peephole-opt", PeepholeOptimizerPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("regalloc", RegAllocPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("virtregrewriter", VirtRegRewriterPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("stack-slot-coloring", StackSlotColoringPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("phi-node-elimination", PHIEliminationPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("twoaddressinstruction", TwoAddressInstructionPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("detect-dead-lanes", DetectDeadLanesPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("processimpdefs", ProcessImplicitDefsPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("liveintervals", LiveIntervalsPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("simple-register-coalescing", RegisterCoalescerPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("rename-independent-subregs", RenameIndependentSubregsPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("branch-folder", BranchFolderPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("tailduplication", TailDuplicatePass, ())
-DUMMY_MACHINE_FUNCTION_PASS("block-placement", MachineBlockPlacementPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("block-placement-stats", MachineBlockPlacementStatsPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("early-ifcvt", EarlyIfConverterPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("machine-combiner", MachineCombinerPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("lrshrink", LiveRangeShrinkPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("break-false-deps", BreakFalseDepsPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("cfi-instr-inserter", CFIInstrInserterPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("cfguard-longjmp", CFGuardLongjmpPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("ra-basic", RABasicPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("ra-fast", RAFastPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("ra-greedy", RAGreedyPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("ra-pbqp", RAPBQPPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("legalizer", LegalizerPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("irtranslator", IRTranslatorPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("regbankselect", RegBankSelectPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("instruction-select", InstructionSelectPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("reset-machine-function", ResetMachineFunctionPass, ())
-DUMMY_MACHINE_FUNCTION_PASS("machineverifier", MachineVerifierPass, ())
-#undef DUMMY_MACHINE_FUNCTION_PASS
+//===- MachinePassRegistry.def - Registry of passes -------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is used as the registry of passes that are for target-independent
+// code generator.
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+#ifndef MODULE_ANALYSIS
+#define MODULE_ANALYSIS(NAME, PASS_NAME, CONSTRUCTOR)
+#endif
+MODULE_ANALYSIS("pass-instrumentation", PassInstrumentationAnalysis, (PIC))
+#undef MODULE_ANALYSIS
+
+#ifndef MODULE_PASS
+#define MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR)
+#endif
+MODULE_PASS("pre-isel-intrinsic-lowering", PreISelIntrinsicLoweringPass, ())
+#undef MODULE_PASS
+
+#ifndef FUNCTION_ANALYSIS
+#define FUNCTION_ANALYSIS(NAME, PASS_NAME, CONSTRUCTOR)
+#endif
+FUNCTION_ANALYSIS("pass-instrumentation", PassInstrumentationAnalysis, (PIC))
+FUNCTION_ANALYSIS("targetir", TargetIRAnalysis, (std::move(TM.getTargetIRAnalysis())))
+#undef FUNCTION_ANALYSIS
+
+#ifndef FUNCTION_PASS
+#define FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR)
+#endif
+FUNCTION_PASS("mergeicmps", MergeICmpsPass, ())
+FUNCTION_PASS("lower-constant-intrinsics", LowerConstantIntrinsicsPass, ())
+FUNCTION_PASS("unreachableblockelim", UnreachableBlockElimPass, ())
+FUNCTION_PASS("consthoist", ConstantHoistingPass, ())
+FUNCTION_PASS("partially-inline-libcalls", PartiallyInlineLibCallsPass, ())
+FUNCTION_PASS("ee-instrument", EntryExitInstrumenterPass, (false))
+FUNCTION_PASS("post-inline-ee-instrument", EntryExitInstrumenterPass, (true))
+FUNCTION_PASS("expand-reductions", ExpandReductionsPass, ())
+FUNCTION_PASS("lowerinvoke", LowerInvokePass, ())
+FUNCTION_PASS("scalarize-masked-mem-intrin", ScalarizeMaskedMemIntrinPass, ())
+FUNCTION_PASS("verify", VerifierPass, ())
+#undef FUNCTION_PASS
+
+#ifndef LOOP_PASS
+#define LOOP_PASS(NAME, PASS_NAME, CONSTRUCTOR)
+#endif
+LOOP_PASS("loop-reduce", LoopStrengthReducePass, ())
+#undef LOOP_PASS
+
+#ifndef MACHINE_MODULE_PASS
+#define MACHINE_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR)
+#endif
+#undef MACHINE_MODULE_PASS
+
+#ifndef MACHINE_FUNCTION_ANALYSIS
+#define MACHINE_FUNCTION_ANALYSIS(NAME, PASS_NAME, CONSTRUCTOR)
+#endif
+MACHINE_FUNCTION_ANALYSIS("pass-instrumentation", PassInstrumentationAnalysis, (PIC))
+// LiveVariables currently requires pure SSA form.
+// FIXME: Once TwoAddressInstruction pass no longer uses kill flags,
+// LiveVariables can be removed completely, and LiveIntervals can be directly
+// computed. (We still either need to regenerate kill flags after regalloc, or
+// preferably fix the scavenger to not depend on them).
+// MACHINE_FUNCTION_ANALYSIS("live-vars", LiveVariablesAnalysis())
+
+// MACHINE_FUNCTION_ANALYSIS("live-stacks", LiveStacksPass())
+// MACHINE_FUNCTION_ANALYSIS("slot-indexes", SlotIndexesAnalysis())
+// MACHINE_FUNCTION_ANALYSIS("edge-bundles", EdgeBundlesAnalysis())
+// MACHINE_FUNCTION_ANALYSIS("lazy-machine-bfi", LazyMachineBlockFrequencyInfoAnalysis())
+// MACHINE_FUNCTION_ANALYSIS("machine-bfi", MachineBlockFrequencyInfoAnalysis())
+// MACHINE_FUNCTION_ANALYSIS("machine-loops", MachineLoopInfoAnalysis())
+// MACHINE_FUNCTION_ANALYSIS("machine-dom-frontier", MachineDominanceFrontierAnalysis())
+// MACHINE_FUNCTION_ANALYSIS("machine-dom-tree", MachineDominatorTreeAnalysis())
+// MACHINE_FUNCTION_ANALYSIS("machine-ore", MachineOptimizationRemarkEmitterPassAnalysis())
+// MACHINE_FUNCTION_ANALYSIS("machine-post-dom-tree", MachinePostDominatorTreeAnalysis())
+// MACHINE_FUNCTION_ANALYSIS("machine-region-info", MachineRegionInfoPassAnalysis())
+// MACHINE_FUNCTION_ANALYSIS("machine-trace-metrics", MachineTraceMetricsAnalysis())
+// MACHINE_FUNCTION_ANALYSIS("reaching-def", ReachingDefAnalysisAnalysis())
+// MACHINE_FUNCTION_ANALYSIS("live-reg-matrix", LiveRegMatrixAnalysis())
+// MACHINE_FUNCTION_ANALYSIS("gc-analysis", GCMachineCodeAnalysisPass())
+#undef MACHINE_FUNCTION_ANALYSIS
+
+#ifndef MACHINE_FUNCTION_PASS
+#define MACHINE_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR)
+#endif
+// MACHINE_FUNCTION_PASS("mir-printer", PrintMIRPass, ())
+// MACHINE_FUNCTION_PASS("free-machine-function", FreeMachineFunctionPass, ())
+#undef MACHINE_FUNCTION_PASS
+
+// After a pass is converted to new pass manager, its entry should be moved from
+// dummy table to the normal one. For example, for a machine function pass,
+// DUMMY_MACHINE_FUNCTION_PASS to MACHINE_FUNCTION_PASS.
+
+#ifndef DUMMY_FUNCTION_PASS
+#define DUMMY_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR)
+#endif
+DUMMY_FUNCTION_PASS("expandmemcmp", ExpandMemCmpPass, ())
+DUMMY_FUNCTION_PASS("gc-lowering", GCLoweringPass, ())
+DUMMY_FUNCTION_PASS("shadow-stack-gc-lowering", ShadowStackGCLoweringPass, ())
+DUMMY_FUNCTION_PASS("sjljehprepare", SjLjEHPreparePass, ())
+DUMMY_FUNCTION_PASS("dwarfehprepare", DwarfEHPass, ())
+DUMMY_FUNCTION_PASS("winehprepare", WinEHPass, ())
+DUMMY_FUNCTION_PASS("wasmehprepare", WasmEHPass, ())
+DUMMY_FUNCTION_PASS("codegenprepare", CodeGenPreparePass, ())
+DUMMY_FUNCTION_PASS("safe-stack", SafeStackPass, ())
+DUMMY_FUNCTION_PASS("stack-protector", StackProtectorPass, ())
+DUMMY_FUNCTION_PASS("atomic-expand", AtomicExpandPass, ())
+DUMMY_FUNCTION_PASS("interleaved-access", InterleavedAccessPass, ())
+DUMMY_FUNCTION_PASS("indirectbr-expand", IndirectBrExpandPass, ())
+DUMMY_FUNCTION_PASS("cfguard-dispatch", CFGuardDispatchPass, ())
+DUMMY_FUNCTION_PASS("cfguard-check", CFGuardCheckPass, ())
+DUMMY_FUNCTION_PASS("gc-info-printer", GCInfoPrinterPass, ())
+#undef DUMMY_FUNCTION_PASS
+
+#ifndef DUMMY_MODULE_PASS
+#define DUMMY_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR)
+#endif
+DUMMY_MODULE_PASS("lower-emutls", LowerEmuTLSPass, ())
+#undef DUMMY_MODULE_PASS
+
+#ifndef DUMMY_MACHINE_MODULE_PASS
+#define DUMMY_MACHINE_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR)
+#endif
+DUMMY_MACHINE_MODULE_PASS("machine-outliner", MachineOutlinerPass, ())
+#undef DUMMY_MACHINE_MODULE_PASS
+
+#ifndef DUMMY_MACHINE_FUNCTION_PASS
+#define DUMMY_MACHINE_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR)
+#endif
+DUMMY_MACHINE_FUNCTION_PASS("mir-printer", PrintMIRPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("free-machine-function", FreeMachineFunctionPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("finalize-isel", FinalizeISelPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("localstackalloc", LocalStackSlotPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("shrink-wrap", ShrinkWrapPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("prologepilog", PrologEpilogInserterPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("postrapseudos", ExpandPostRAPseudosPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("implicit-null-checks", ImplicitNullChecksPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("postmisched", PostMachineSchedulerPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("machine-scheduler", MachineSchedulerPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("machine-cp", MachineCopyPropagationPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("post-RA-sched", PostRASchedulerPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("fentry-insert", FEntryInserterPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("xray-instrumentation", XRayInstrumentationPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("patchable-function", PatchableFunctionPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("reg-usage-propagation", RegUsageInfoPropagationPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("reg-usage-collector", RegUsageInfoCollectorPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("funclet-layout", FuncletLayoutPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("stackmap-liveness", StackMapLivenessPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("livedebugvalues", LiveDebugValuesPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("early-tailduplication", EarlyTailDuplicatePass, ())
+DUMMY_MACHINE_FUNCTION_PASS("opt-phis", OptimizePHIsPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("stack-coloring", StackColoringPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("dead-mi-elimination", DeadMachineInstructionElimPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("early-machinelicm", EarlyMachineLICMPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("machinelicm", MachineLICMPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("machine-cse", MachineCSEPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("machine-sink", MachineSinkingPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("postra-machine-sink", PostRAMachineSinkingPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("peephole-opt", PeepholeOptimizerPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("regalloc", RegAllocPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("virtregrewriter", VirtRegRewriterPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("stack-slot-coloring", StackSlotColoringPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("phi-node-elimination", PHIEliminationPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("twoaddressinstruction", TwoAddressInstructionPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("detect-dead-lanes", DetectDeadLanesPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("processimpdefs", ProcessImplicitDefsPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("liveintervals", LiveIntervalsPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("simple-register-coalescing", RegisterCoalescerPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("rename-independent-subregs", RenameIndependentSubregsPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("branch-folder", BranchFolderPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("tailduplication", TailDuplicatePass, ())
+DUMMY_MACHINE_FUNCTION_PASS("block-placement", MachineBlockPlacementPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("block-placement-stats", MachineBlockPlacementStatsPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("early-ifcvt", EarlyIfConverterPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("machine-combiner", MachineCombinerPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("lrshrink", LiveRangeShrinkPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("break-false-deps", BreakFalseDepsPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("cfi-instr-inserter", CFIInstrInserterPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("cfguard-longjmp", CFGuardLongjmpPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("ra-basic", RABasicPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("ra-fast", RAFastPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("ra-greedy", RAGreedyPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("ra-pbqp", RAPBQPPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("legalizer", LegalizerPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("irtranslator", IRTranslatorPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("regbankselect", RegBankSelectPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("instruction-select", InstructionSelectPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("reset-machine-function", ResetMachineFunctionPass, ())
+DUMMY_MACHINE_FUNCTION_PASS("machineverifier", MachineVerifierPass, ())
+#undef DUMMY_MACHINE_FUNCTION_PASS
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MachinePipeliner.h b/contrib/libs/llvm12/include/llvm/CodeGen/MachinePipeliner.h
index 64ffaea914..29650f0f9c 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MachinePipeliner.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MachinePipeliner.h
@@ -56,7 +56,7 @@
namespace llvm {
-class AAResults;
+class AAResults;
class NodeSet;
class SMSchedule;
@@ -98,7 +98,7 @@ public:
bool runOnMachineFunction(MachineFunction &MF) override;
- void getAnalysisUsage(AnalysisUsage &AU) const override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
private:
void preprocessPhiNodes(MachineBasicBlock &B);
@@ -283,7 +283,7 @@ public:
static bool classof(const ScheduleDAGInstrs *DAG) { return true; }
private:
- void addLoopCarriedDependences(AAResults *AA);
+ void addLoopCarriedDependences(AAResults *AA);
void updatePhiDependences();
void changeDependences();
unsigned calculateResMII();
@@ -302,7 +302,7 @@ private:
void checkValidNodeOrder(const NodeSetType &Circuits) const;
bool schedulePipeline(SMSchedule &Schedule);
bool computeDelta(MachineInstr &MI, unsigned &Delta);
- MachineInstr *findDefInLoop(Register Reg);
+ MachineInstr *findDefInLoop(Register Reg);
bool canUseLastOffsetValue(MachineInstr *MI, unsigned &BasePos,
unsigned &OffsetPos, unsigned &NewBase,
int64_t &NewOffset);
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MachineRegisterInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/MachineRegisterInfo.h
index c758dd5a47..c7fbcacc69 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MachineRegisterInfo.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MachineRegisterInfo.h
@@ -449,22 +449,22 @@ public:
/// Return true if there is exactly one operand defining the specified
/// register.
bool hasOneDef(Register RegNo) const {
- return hasSingleElement(def_operands(RegNo));
- }
-
- /// Returns the defining operand if there is exactly one operand defining the
- /// specified register, otherwise nullptr.
- MachineOperand *getOneDef(Register Reg) const {
- def_iterator DI = def_begin(Reg);
- if (DI == def_end()) // No defs.
- return nullptr;
-
- def_iterator OneDef = DI;
- if (++DI == def_end())
- return &*OneDef;
- return nullptr; // Multiple defs.
- }
-
+ return hasSingleElement(def_operands(RegNo));
+ }
+
+ /// Returns the defining operand if there is exactly one operand defining the
+ /// specified register, otherwise nullptr.
+ MachineOperand *getOneDef(Register Reg) const {
+ def_iterator DI = def_begin(Reg);
+ if (DI == def_end()) // No defs.
+ return nullptr;
+
+ def_iterator OneDef = DI;
+ if (++DI == def_end())
+ return &*OneDef;
+ return nullptr; // Multiple defs.
+ }
+
/// use_iterator/use_begin/use_end - Walk all uses of the specified register.
using use_iterator =
defusechain_iterator<true, false, false, true, false, false>;
@@ -515,7 +515,7 @@ public:
/// hasOneUse - Return true if there is exactly one instruction using the
/// specified register.
bool hasOneUse(Register RegNo) const {
- return hasSingleElement(use_operands(RegNo));
+ return hasSingleElement(use_operands(RegNo));
}
/// use_nodbg_iterator/use_nodbg_begin/use_nodbg_end - Walk all uses of the
@@ -629,7 +629,7 @@ public:
/// Get an iterator over the pressure sets affected by the given physical or
/// virtual register. If RegUnit is physical, it must be a register unit (from
/// MCRegUnitIterator).
- PSetIterator getPressureSets(Register RegUnit) const;
+ PSetIterator getPressureSets(Register RegUnit) const;
//===--------------------------------------------------------------------===//
// Virtual Register Info
@@ -904,7 +904,7 @@ public:
///
/// Reserved registers may belong to an allocatable register class, but the
/// target has explicitly requested that they are not used.
- bool isReserved(MCRegister PhysReg) const {
+ bool isReserved(MCRegister PhysReg) const {
return getReservedRegs().test(PhysReg.id());
}
@@ -1184,13 +1184,13 @@ class PSetIterator {
public:
PSetIterator() = default;
- PSetIterator(Register RegUnit, const MachineRegisterInfo *MRI) {
+ PSetIterator(Register RegUnit, const MachineRegisterInfo *MRI) {
const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
- if (RegUnit.isVirtual()) {
+ if (RegUnit.isVirtual()) {
const TargetRegisterClass *RC = MRI->getRegClass(RegUnit);
PSet = TRI->getRegClassPressureSets(RC);
Weight = TRI->getRegClassWeight(RC).RegWeight;
- } else {
+ } else {
PSet = TRI->getRegUnitPressureSets(RegUnit);
Weight = TRI->getRegUnitWeight(RegUnit);
}
@@ -1212,8 +1212,8 @@ public:
}
};
-inline PSetIterator
-MachineRegisterInfo::getPressureSets(Register RegUnit) const {
+inline PSetIterator
+MachineRegisterInfo::getPressureSets(Register RegUnit) const {
return PSetIterator(RegUnit, this);
}
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MachineSSAUpdater.h b/contrib/libs/llvm12/include/llvm/CodeGen/MachineSSAUpdater.h
index 9f7949c79b..fd502f3f3e 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MachineSSAUpdater.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MachineSSAUpdater.h
@@ -69,7 +69,7 @@ public:
/// Initialize - Reset this object to get ready for a new set of SSA
/// updates.
void Initialize(Register V);
- void Initialize(const TargetRegisterClass *RC);
+ void Initialize(const TargetRegisterClass *RC);
/// AddAvailableValue - Indicate that a rewritten value is available at the
/// end of the specified block with the specified value.
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MachineStableHash.h b/contrib/libs/llvm12/include/llvm/CodeGen/MachineStableHash.h
index d1409e35d0..450d075ef6 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MachineStableHash.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MachineStableHash.h
@@ -1,41 +1,41 @@
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-parameter"
-#endif
-
-//===------------ MachineStableHash.h - MIR Stable Hashing Utilities ------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// Stable hashing for MachineInstr and MachineOperand. Useful or getting a
-// hash across runs, modules, etc.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_MACHINESTABLEHASH_H
-#define LLVM_CODEGEN_MACHINESTABLEHASH_H
-
-#include "llvm/CodeGen/StableHashing.h"
-
-namespace llvm {
-class MachineInstr;
-class MachineOperand;
-
-stable_hash stableHashValue(const MachineOperand &MO);
-stable_hash stableHashValue(const MachineInstr &MI, bool HashVRegs = false,
- bool HashConstantPoolIndices = false,
- bool HashMemOperands = false);
-
-} // namespace llvm
-
-#endif
-
-#ifdef __GNUC__
-#pragma GCC diagnostic pop
-#endif
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===------------ MachineStableHash.h - MIR Stable Hashing Utilities ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Stable hashing for MachineInstr and MachineOperand. Useful or getting a
+// hash across runs, modules, etc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINESTABLEHASH_H
+#define LLVM_CODEGEN_MACHINESTABLEHASH_H
+
+#include "llvm/CodeGen/StableHashing.h"
+
+namespace llvm {
+class MachineInstr;
+class MachineOperand;
+
+stable_hash stableHashValue(const MachineOperand &MO);
+stable_hash stableHashValue(const MachineInstr &MI, bool HashVRegs = false,
+ bool HashConstantPoolIndices = false,
+ bool HashMemOperands = false);
+
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MachineTraceMetrics.h b/contrib/libs/llvm12/include/llvm/CodeGen/MachineTraceMetrics.h
index 2d9da27f75..0c7c3da18d 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MachineTraceMetrics.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MachineTraceMetrics.h
@@ -147,13 +147,13 @@ public:
/// successors.
struct LiveInReg {
/// The virtual register required, or a register unit.
- Register Reg;
+ Register Reg;
/// For virtual registers: Minimum height of the defining instruction.
/// For regunits: Height of the highest user in the trace.
unsigned Height;
- LiveInReg(Register Reg, unsigned Height = 0) : Reg(Reg), Height(Height) {}
+ LiveInReg(Register Reg, unsigned Height = 0) : Reg(Reg), Height(Height) {}
};
/// Per-basic block information that relates to a specific trace through the
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/MultiHazardRecognizer.h b/contrib/libs/llvm12/include/llvm/CodeGen/MultiHazardRecognizer.h
index 6542ceb52d..b4c417e9fc 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/MultiHazardRecognizer.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/MultiHazardRecognizer.h
@@ -1,58 +1,58 @@
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-parameter"
-#endif
-
-//=- llvm/CodeGen/MultiHazardRecognizer.h - Scheduling Support ----*- C++ -*-=//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the MultiHazardRecognizer class, which is a wrapper
-// for a set of ScheduleHazardRecognizer instances
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_MULTIHAZARDRECOGNIZER_H
-#define LLVM_CODEGEN_MULTIHAZARDRECOGNIZER_H
-
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
-
-namespace llvm {
-
-class MachineInstr;
-class SUnit;
-
-class MultiHazardRecognizer : public ScheduleHazardRecognizer {
- SmallVector<std::unique_ptr<ScheduleHazardRecognizer>, 4> Recognizers;
-
-public:
- MultiHazardRecognizer() = default;
- void AddHazardRecognizer(std::unique_ptr<ScheduleHazardRecognizer> &&);
-
- bool atIssueLimit() const override;
- HazardType getHazardType(SUnit *, int Stalls = 0) override;
- void Reset() override;
- void EmitInstruction(SUnit *) override;
- void EmitInstruction(MachineInstr *) override;
- unsigned PreEmitNoops(SUnit *) override;
- unsigned PreEmitNoops(MachineInstr *) override;
- bool ShouldPreferAnother(SUnit *) override;
- void AdvanceCycle() override;
- void RecedeCycle() override;
- void EmitNoop() override;
-};
-
-} // end namespace llvm
-
-#endif // LLVM_CODEGEN_MULTIHAZARDRECOGNIZER_H
-
-#ifdef __GNUC__
-#pragma GCC diagnostic pop
-#endif
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//=- llvm/CodeGen/MultiHazardRecognizer.h - Scheduling Support ----*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the MultiHazardRecognizer class, which is a wrapper
+// for a set of ScheduleHazardRecognizer instances
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MULTIHAZARDRECOGNIZER_H
+#define LLVM_CODEGEN_MULTIHAZARDRECOGNIZER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+
+namespace llvm {
+
+class MachineInstr;
+class SUnit;
+
+class MultiHazardRecognizer : public ScheduleHazardRecognizer {
+ SmallVector<std::unique_ptr<ScheduleHazardRecognizer>, 4> Recognizers;
+
+public:
+ MultiHazardRecognizer() = default;
+ void AddHazardRecognizer(std::unique_ptr<ScheduleHazardRecognizer> &&);
+
+ bool atIssueLimit() const override;
+ HazardType getHazardType(SUnit *, int Stalls = 0) override;
+ void Reset() override;
+ void EmitInstruction(SUnit *) override;
+ void EmitInstruction(MachineInstr *) override;
+ unsigned PreEmitNoops(SUnit *) override;
+ unsigned PreEmitNoops(MachineInstr *) override;
+ bool ShouldPreferAnother(SUnit *) override;
+ void AdvanceCycle() override;
+ void RecedeCycle() override;
+ void EmitNoop() override;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MULTIHAZARDRECOGNIZER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/NonRelocatableStringpool.h b/contrib/libs/llvm12/include/llvm/CodeGen/NonRelocatableStringpool.h
index e88472a231..20805de987 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/NonRelocatableStringpool.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/NonRelocatableStringpool.h
@@ -46,7 +46,7 @@ public:
/// Get the offset of string \p S in the string table. This can insert a new
/// element or return the offset of a pre-existing one.
- uint64_t getStringOffset(StringRef S) { return getEntry(S).getOffset(); }
+ uint64_t getStringOffset(StringRef S) { return getEntry(S).getOffset(); }
/// Get permanent storage for \p S (but do not necessarily emit \p S in the
/// output section). A latter call to getStringOffset() with the same string
@@ -64,7 +64,7 @@ public:
private:
MapTy Strings;
- uint64_t CurrentEndOffset = 0;
+ uint64_t CurrentEndOffset = 0;
unsigned NumEntries = 0;
DwarfStringPoolEntryRef EmptyString;
std::function<StringRef(StringRef Input)> Translator;
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/Passes.h b/contrib/libs/llvm12/include/llvm/CodeGen/Passes.h
index 06c68498b9..d67b9b9aa0 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/Passes.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/Passes.h
@@ -51,16 +51,16 @@ namespace llvm {
/// the entry block.
FunctionPass *createUnreachableBlockEliminationPass();
- /// createBasicBlockSections Pass - This pass assigns sections to machine
- /// basic blocks and is enabled with -fbasic-block-sections. Buf is a memory
- /// buffer that contains the list of functions and basic block ids to
- /// selectively enable basic block sections.
- MachineFunctionPass *createBasicBlockSectionsPass(const MemoryBuffer *Buf);
-
- /// createMachineFunctionSplitterPass - This pass splits machine functions
- /// using profile information.
- MachineFunctionPass *createMachineFunctionSplitterPass();
-
+ /// createBasicBlockSections Pass - This pass assigns sections to machine
+ /// basic blocks and is enabled with -fbasic-block-sections. Buf is a memory
+ /// buffer that contains the list of functions and basic block ids to
+ /// selectively enable basic block sections.
+ MachineFunctionPass *createBasicBlockSectionsPass(const MemoryBuffer *Buf);
+
+ /// createMachineFunctionSplitterPass - This pass splits machine functions
+ /// using profile information.
+ MachineFunctionPass *createMachineFunctionSplitterPass();
+
/// MachineFunctionPrinter pass - This pass prints out the machine function to
/// the given stream as a debugging tool.
MachineFunctionPass *
@@ -474,9 +474,9 @@ namespace llvm {
/// Create Hardware Loop pass. \see HardwareLoops.cpp
FunctionPass *createHardwareLoopsPass();
- /// This pass inserts pseudo probe annotation for callsite profiling.
- FunctionPass *createPseudoProbeInserter();
-
+ /// This pass inserts pseudo probe annotation for callsite profiling.
+ FunctionPass *createPseudoProbeInserter();
+
/// Create IR Type Promotion pass. \see TypePromotion.cpp
FunctionPass *createTypePromotionPass();
@@ -489,16 +489,16 @@ namespace llvm {
/// info was generated by another source such as clang.
ModulePass *createStripDebugMachineModulePass(bool OnlyDebugified);
- /// Creates MIR Check Debug pass. \see MachineCheckDebugify.cpp
- ModulePass *createCheckDebugMachineModulePass();
-
+ /// Creates MIR Check Debug pass. \see MachineCheckDebugify.cpp
+ ModulePass *createCheckDebugMachineModulePass();
+
/// The pass fixups statepoint machine instruction to replace usage of
/// caller saved registers with stack slots.
extern char &FixupStatepointCallerSavedID;
-
- /// The pass transform load/store <256 x i32> to AMX load/store intrinsics
- /// or split the data to two <128 x i32>.
- FunctionPass *createX86LowerAMXTypePass();
+
+ /// The pass transform load/store <256 x i32> to AMX load/store intrinsics
+ /// or split the data to two <128 x i32>.
+ FunctionPass *createX86LowerAMXTypePass();
} // End llvm namespace
#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/RDFLiveness.h b/contrib/libs/llvm12/include/llvm/CodeGen/RDFLiveness.h
index cf0c14f6b4..7a0320d7bd 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/RDFLiveness.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/RDFLiveness.h
@@ -25,8 +25,8 @@
#include "llvm/MC/LaneBitmask.h"
#include <map>
#include <set>
-#include <unordered_map>
-#include <unordered_set>
+#include <unordered_map>
+#include <unordered_set>
#include <utility>
namespace llvm {
@@ -37,32 +37,32 @@ class MachineDominatorTree;
class MachineRegisterInfo;
class TargetRegisterInfo;
-} // namespace llvm
-
-namespace llvm {
+} // namespace llvm
+
+namespace llvm {
namespace rdf {
-namespace detail {
-
-using NodeRef = std::pair<NodeId, LaneBitmask>;
-
-} // namespace detail
-} // namespace rdf
-} // namespace llvm
-
-namespace std {
-
-template <> struct hash<llvm::rdf::detail::NodeRef> {
- std::size_t operator()(llvm::rdf::detail::NodeRef R) const {
- return std::hash<llvm::rdf::NodeId>{}(R.first) ^
- std::hash<llvm::LaneBitmask::Type>{}(R.second.getAsInteger());
- }
-};
-
-} // namespace std
-
-namespace llvm {
-namespace rdf {
-
+namespace detail {
+
+using NodeRef = std::pair<NodeId, LaneBitmask>;
+
+} // namespace detail
+} // namespace rdf
+} // namespace llvm
+
+namespace std {
+
+template <> struct hash<llvm::rdf::detail::NodeRef> {
+ std::size_t operator()(llvm::rdf::detail::NodeRef R) const {
+ return std::hash<llvm::rdf::NodeId>{}(R.first) ^
+ std::hash<llvm::LaneBitmask::Type>{}(R.second.getAsInteger());
+ }
+};
+
+} // namespace std
+
+namespace llvm {
+namespace rdf {
+
struct Liveness {
public:
// This is really a std::map, except that it provides a non-trivial
@@ -79,9 +79,9 @@ namespace rdf {
std::map<MachineBasicBlock*,RegisterAggr> Map;
};
- using NodeRef = detail::NodeRef;
- using NodeRefSet = std::unordered_set<NodeRef>;
- using RefMap = std::unordered_map<RegisterId, NodeRefSet>;
+ using NodeRef = detail::NodeRef;
+ using NodeRefSet = std::unordered_set<NodeRef>;
+ using RefMap = std::unordered_map<RegisterId, NodeRefSet>;
Liveness(MachineRegisterInfo &mri, const DataFlowGraph &g)
: DFG(g), TRI(g.getTRI()), PRI(g.getPRI()), MDT(g.getDT()),
@@ -142,14 +142,14 @@ namespace rdf {
// Cache of mapping from node ids (for RefNodes) to the containing
// basic blocks. Not computing it each time for each node reduces
// the liveness calculation time by a large fraction.
- DenseMap<NodeId, MachineBasicBlock *> NBMap;
+ DenseMap<NodeId, MachineBasicBlock *> NBMap;
// Phi information:
//
// RealUseMap
// map: NodeId -> (map: RegisterId -> NodeRefSet)
// phi id -> (map: register -> set of reached non-phi uses)
- DenseMap<NodeId, RefMap> RealUseMap;
+ DenseMap<NodeId, RefMap> RealUseMap;
// Inverse iterated dominance frontier.
std::map<MachineBasicBlock*,std::set<MachineBasicBlock*>> IIDF;
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/RDFRegisters.h b/contrib/libs/llvm12/include/llvm/CodeGen/RDFRegisters.h
index 58a5f7e6a0..7d77206395 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/RDFRegisters.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/RDFRegisters.h
@@ -98,11 +98,11 @@ namespace rdf {
bool operator< (const RegisterRef &RR) const {
return Reg < RR.Reg || (Reg == RR.Reg && Mask < RR.Mask);
}
-
- size_t hash() const {
- return std::hash<RegisterId>{}(Reg) ^
- std::hash<LaneBitmask::Type>{}(Mask.getAsInteger());
- }
+
+ size_t hash() const {
+ return std::hash<RegisterId>{}(Reg) ^
+ std::hash<LaneBitmask::Type>{}(Mask.getAsInteger());
+ }
};
@@ -138,10 +138,10 @@ namespace rdf {
return MaskInfos[Register::stackSlot2Index(MaskId)].Units;
}
- const BitVector &getUnitAliases(uint32_t U) const {
- return AliasInfos[U].Regs;
- }
-
+ const BitVector &getUnitAliases(uint32_t U) const {
+ return AliasInfos[U].Regs;
+ }
+
RegisterRef mapTo(RegisterRef RR, unsigned R) const;
const TargetRegisterInfo &getTRI() const { return TRI; }
@@ -156,16 +156,16 @@ namespace rdf {
struct MaskInfo {
BitVector Units;
};
- struct AliasInfo {
- BitVector Regs;
- };
+ struct AliasInfo {
+ BitVector Regs;
+ };
const TargetRegisterInfo &TRI;
IndexedSet<const uint32_t*> RegMasks;
std::vector<RegInfo> RegInfos;
std::vector<UnitInfo> UnitInfos;
std::vector<MaskInfo> MaskInfos;
- std::vector<AliasInfo> AliasInfos;
+ std::vector<AliasInfo> AliasInfos;
bool aliasRR(RegisterRef RA, RegisterRef RB) const;
bool aliasRM(RegisterRef RR, RegisterRef RM) const;
@@ -177,15 +177,15 @@ namespace rdf {
: Units(pri.getTRI().getNumRegUnits()), PRI(pri) {}
RegisterAggr(const RegisterAggr &RG) = default;
- unsigned count() const { return Units.count(); }
+ unsigned count() const { return Units.count(); }
bool empty() const { return Units.none(); }
bool hasAliasOf(RegisterRef RR) const;
bool hasCoverOf(RegisterRef RR) const;
- bool operator==(const RegisterAggr &A) const {
- return DenseMapInfo<BitVector>::isEqual(Units, A.Units);
- }
-
+ bool operator==(const RegisterAggr &A) const {
+ return DenseMapInfo<BitVector>::isEqual(Units, A.Units);
+ }
+
static bool isCoverOf(RegisterRef RA, RegisterRef RB,
const PhysicalRegisterInfo &PRI) {
return RegisterAggr(PRI).insert(RA).hasCoverOf(RB);
@@ -202,10 +202,10 @@ namespace rdf {
RegisterRef clearIn(RegisterRef RR) const;
RegisterRef makeRegRef() const;
- size_t hash() const {
- return DenseMapInfo<BitVector>::getHashValue(Units);
- }
-
+ size_t hash() const {
+ return DenseMapInfo<BitVector>::getHashValue(Units);
+ }
+
void print(raw_ostream &OS) const;
struct rr_iterator {
@@ -260,29 +260,29 @@ namespace rdf {
};
raw_ostream &operator<< (raw_ostream &OS, const PrintLaneMaskOpt &P);
- raw_ostream &operator<< (raw_ostream &OS, const RegisterAggr &A);
+ raw_ostream &operator<< (raw_ostream &OS, const RegisterAggr &A);
} // end namespace rdf
} // end namespace llvm
-namespace std {
- template <> struct hash<llvm::rdf::RegisterRef> {
- size_t operator()(llvm::rdf::RegisterRef A) const {
- return A.hash();
- }
- };
- template <> struct hash<llvm::rdf::RegisterAggr> {
- size_t operator()(const llvm::rdf::RegisterAggr &A) const {
- return A.hash();
- }
- };
- template <> struct equal_to<llvm::rdf::RegisterAggr> {
- bool operator()(const llvm::rdf::RegisterAggr &A,
- const llvm::rdf::RegisterAggr &B) const {
- return A == B;
- }
- };
-}
+namespace std {
+ template <> struct hash<llvm::rdf::RegisterRef> {
+ size_t operator()(llvm::rdf::RegisterRef A) const {
+ return A.hash();
+ }
+ };
+ template <> struct hash<llvm::rdf::RegisterAggr> {
+ size_t operator()(const llvm::rdf::RegisterAggr &A) const {
+ return A.hash();
+ }
+ };
+ template <> struct equal_to<llvm::rdf::RegisterAggr> {
+ bool operator()(const llvm::rdf::RegisterAggr &A,
+ const llvm::rdf::RegisterAggr &B) const {
+ return A == B;
+ }
+ };
+}
#endif // LLVM_LIB_TARGET_HEXAGON_RDFREGISTERS_H
#ifdef __GNUC__
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/ReachingDefAnalysis.h b/contrib/libs/llvm12/include/llvm/CodeGen/ReachingDefAnalysis.h
index f95f8df814..bb078a64bf 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/ReachingDefAnalysis.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/ReachingDefAnalysis.h
@@ -146,25 +146,25 @@ public:
/// Provides the instruction id of the closest reaching def instruction of
/// PhysReg that reaches MI, relative to the begining of MI's basic block.
- int getReachingDef(MachineInstr *MI, MCRegister PhysReg) const;
+ int getReachingDef(MachineInstr *MI, MCRegister PhysReg) const;
/// Return whether A and B use the same def of PhysReg.
- bool hasSameReachingDef(MachineInstr *A, MachineInstr *B,
- MCRegister PhysReg) const;
+ bool hasSameReachingDef(MachineInstr *A, MachineInstr *B,
+ MCRegister PhysReg) const;
/// Return whether the reaching def for MI also is live out of its parent
/// block.
- bool isReachingDefLiveOut(MachineInstr *MI, MCRegister PhysReg) const;
+ bool isReachingDefLiveOut(MachineInstr *MI, MCRegister PhysReg) const;
/// Return the local MI that produces the live out value for PhysReg, or
/// nullptr for a non-live out or non-local def.
MachineInstr *getLocalLiveOutMIDef(MachineBasicBlock *MBB,
- MCRegister PhysReg) const;
+ MCRegister PhysReg) const;
/// If a single MachineInstr creates the reaching definition, then return it.
/// Otherwise return null.
- MachineInstr *getUniqueReachingMIDef(MachineInstr *MI,
- MCRegister PhysReg) const;
+ MachineInstr *getUniqueReachingMIDef(MachineInstr *MI,
+ MCRegister PhysReg) const;
/// If a single MachineInstr creates the reaching definition, for MIs operand
/// at Idx, then return it. Otherwise return null.
@@ -176,46 +176,46 @@ public:
/// Provide whether the register has been defined in the same basic block as,
/// and before, MI.
- bool hasLocalDefBefore(MachineInstr *MI, MCRegister PhysReg) const;
+ bool hasLocalDefBefore(MachineInstr *MI, MCRegister PhysReg) const;
/// Return whether the given register is used after MI, whether it's a local
/// use or a live out.
- bool isRegUsedAfter(MachineInstr *MI, MCRegister PhysReg) const;
+ bool isRegUsedAfter(MachineInstr *MI, MCRegister PhysReg) const;
/// Return whether the given register is defined after MI.
- bool isRegDefinedAfter(MachineInstr *MI, MCRegister PhysReg) const;
+ bool isRegDefinedAfter(MachineInstr *MI, MCRegister PhysReg) const;
/// Provides the clearance - the number of instructions since the closest
/// reaching def instuction of PhysReg that reaches MI.
- int getClearance(MachineInstr *MI, MCRegister PhysReg) const;
+ int getClearance(MachineInstr *MI, MCRegister PhysReg) const;
/// Provides the uses, in the same block as MI, of register that MI defines.
/// This does not consider live-outs.
- void getReachingLocalUses(MachineInstr *MI, MCRegister PhysReg,
+ void getReachingLocalUses(MachineInstr *MI, MCRegister PhysReg,
InstSet &Uses) const;
/// Search MBB for a definition of PhysReg and insert it into Defs. If no
/// definition is found, recursively search the predecessor blocks for them.
- void getLiveOuts(MachineBasicBlock *MBB, MCRegister PhysReg, InstSet &Defs,
+ void getLiveOuts(MachineBasicBlock *MBB, MCRegister PhysReg, InstSet &Defs,
BlockSet &VisitedBBs) const;
- void getLiveOuts(MachineBasicBlock *MBB, MCRegister PhysReg,
- InstSet &Defs) const;
+ void getLiveOuts(MachineBasicBlock *MBB, MCRegister PhysReg,
+ InstSet &Defs) const;
/// For the given block, collect the instructions that use the live-in
/// value of the provided register. Return whether the value is still
/// live on exit.
- bool getLiveInUses(MachineBasicBlock *MBB, MCRegister PhysReg,
+ bool getLiveInUses(MachineBasicBlock *MBB, MCRegister PhysReg,
InstSet &Uses) const;
/// Collect the users of the value stored in PhysReg, which is defined
/// by MI.
- void getGlobalUses(MachineInstr *MI, MCRegister PhysReg, InstSet &Uses) const;
-
- /// Collect all possible definitions of the value stored in PhysReg, which is
- /// used by MI.
- void getGlobalReachingDefs(MachineInstr *MI, MCRegister PhysReg,
- InstSet &Defs) const;
+ void getGlobalUses(MachineInstr *MI, MCRegister PhysReg, InstSet &Uses) const;
+ /// Collect all possible definitions of the value stored in PhysReg, which is
+ /// used by MI.
+ void getGlobalReachingDefs(MachineInstr *MI, MCRegister PhysReg,
+ InstSet &Defs) const;
+
/// Return whether From can be moved forwards to just before To.
bool isSafeToMoveForwards(MachineInstr *From, MachineInstr *To) const;
@@ -238,13 +238,13 @@ public:
/// Return whether a MachineInstr could be inserted at MI and safely define
/// the given register without affecting the program.
- bool isSafeToDefRegAt(MachineInstr *MI, MCRegister PhysReg) const;
+ bool isSafeToDefRegAt(MachineInstr *MI, MCRegister PhysReg) const;
/// Return whether a MachineInstr could be inserted at MI and safely define
/// the given register without affecting the program, ignoring any effects
/// on the provided instructions.
- bool isSafeToDefRegAt(MachineInstr *MI, MCRegister PhysReg,
- InstSet &Ignore) const;
+ bool isSafeToDefRegAt(MachineInstr *MI, MCRegister PhysReg,
+ InstSet &Ignore) const;
private:
/// Set up LiveRegs by merging predecessor live-out values.
@@ -279,8 +279,8 @@ private:
/// Provides the instruction of the closest reaching def instruction of
/// PhysReg that reaches MI, relative to the begining of MI's basic block.
- MachineInstr *getReachingLocalMIDef(MachineInstr *MI,
- MCRegister PhysReg) const;
+ MachineInstr *getReachingLocalMIDef(MachineInstr *MI,
+ MCRegister PhysReg) const;
};
} // namespace llvm
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/RegAllocPBQP.h b/contrib/libs/llvm12/include/llvm/CodeGen/RegAllocPBQP.h
index bbf491aa5b..47dcc0f554 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/RegAllocPBQP.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/RegAllocPBQP.h
@@ -29,8 +29,8 @@
#include "llvm/CodeGen/PBQP/Math.h"
#include "llvm/CodeGen/PBQP/ReductionRules.h"
#include "llvm/CodeGen/PBQP/Solution.h"
-#include "llvm/CodeGen/Register.h"
-#include "llvm/MC/MCRegister.h"
+#include "llvm/CodeGen/Register.h"
+#include "llvm/MC/MCRegister.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
@@ -105,13 +105,13 @@ public:
AllowedRegVector() = default;
AllowedRegVector(AllowedRegVector &&) = default;
- AllowedRegVector(const std::vector<MCRegister> &OptVec)
- : NumOpts(OptVec.size()), Opts(new MCRegister[NumOpts]) {
+ AllowedRegVector(const std::vector<MCRegister> &OptVec)
+ : NumOpts(OptVec.size()), Opts(new MCRegister[NumOpts]) {
std::copy(OptVec.begin(), OptVec.end(), Opts.get());
}
unsigned size() const { return NumOpts; }
- MCRegister operator[](size_t I) const { return Opts[I]; }
+ MCRegister operator[](size_t I) const { return Opts[I]; }
bool operator==(const AllowedRegVector &Other) const {
if (NumOpts != Other.NumOpts)
@@ -125,12 +125,12 @@ public:
private:
unsigned NumOpts = 0;
- std::unique_ptr<MCRegister[]> Opts;
+ std::unique_ptr<MCRegister[]> Opts;
};
inline hash_code hash_value(const AllowedRegVector &OptRegs) {
- MCRegister *OStart = OptRegs.Opts.get();
- MCRegister *OEnd = OptRegs.Opts.get() + OptRegs.NumOpts;
+ MCRegister *OStart = OptRegs.Opts.get();
+ MCRegister *OEnd = OptRegs.Opts.get() + OptRegs.NumOpts;
return hash_combine(OptRegs.NumOpts,
hash_combine_range(OStart, OEnd));
}
@@ -152,11 +152,11 @@ public:
LiveIntervals &LIS;
MachineBlockFrequencyInfo &MBFI;
- void setNodeIdForVReg(Register VReg, GraphBase::NodeId NId) {
- VRegToNodeId[VReg.id()] = NId;
+ void setNodeIdForVReg(Register VReg, GraphBase::NodeId NId) {
+ VRegToNodeId[VReg.id()] = NId;
}
- GraphBase::NodeId getNodeIdForVReg(Register VReg) const {
+ GraphBase::NodeId getNodeIdForVReg(Register VReg) const {
auto VRegItr = VRegToNodeId.find(VReg);
if (VRegItr == VRegToNodeId.end())
return GraphBase::invalidNodeId();
@@ -168,7 +168,7 @@ public:
}
private:
- DenseMap<Register, GraphBase::NodeId> VRegToNodeId;
+ DenseMap<Register, GraphBase::NodeId> VRegToNodeId;
AllowedRegVecPool AllowedRegVecs;
};
@@ -206,8 +206,8 @@ public:
NodeMetadata(NodeMetadata &&) = default;
NodeMetadata& operator=(NodeMetadata &&) = default;
- void setVReg(Register VReg) { this->VReg = VReg; }
- Register getVReg() const { return VReg; }
+ void setVReg(Register VReg) { this->VReg = VReg; }
+ Register getVReg() const { return VReg; }
void setAllowedRegs(GraphMetadata::AllowedRegVecRef AllowedRegs) {
this->AllowedRegs = std::move(AllowedRegs);
@@ -265,7 +265,7 @@ private:
unsigned NumOpts = 0;
unsigned DeniedOpts = 0;
std::unique_ptr<unsigned[]> OptUnsafeEdges;
- Register VReg;
+ Register VReg;
GraphMetadata::AllowedRegVecRef AllowedRegs;
#ifndef NDEBUG
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/Register.h b/contrib/libs/llvm12/include/llvm/CodeGen/Register.h
index c76978991a..1741bcd649 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/Register.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/Register.h
@@ -47,24 +47,24 @@ public:
/// frame index in a variable that normally holds a register. isStackSlot()
/// returns true if Reg is in the range used for stack slots.
///
- /// FIXME: remove in favor of member.
+ /// FIXME: remove in favor of member.
static bool isStackSlot(unsigned Reg) {
return MCRegister::isStackSlot(Reg);
}
- /// Return true if this is a stack slot.
- bool isStack() const { return MCRegister::isStackSlot(Reg); }
-
+ /// Return true if this is a stack slot.
+ bool isStack() const { return MCRegister::isStackSlot(Reg); }
+
/// Compute the frame index from a register value representing a stack slot.
- static int stackSlot2Index(Register Reg) {
- assert(Reg.isStack() && "Not a stack slot");
+ static int stackSlot2Index(Register Reg) {
+ assert(Reg.isStack() && "Not a stack slot");
return int(Reg - MCRegister::FirstStackSlot);
}
/// Convert a non-negative frame index to a stack slot register value.
- static Register index2StackSlot(int FI) {
+ static Register index2StackSlot(int FI) {
assert(FI >= 0 && "Cannot hold a negative frame index.");
- return Register(FI + MCRegister::FirstStackSlot);
+ return Register(FI + MCRegister::FirstStackSlot);
}
/// Return true if the specified register number is in
@@ -76,19 +76,19 @@ public:
/// Return true if the specified register number is in
/// the virtual register namespace.
static bool isVirtualRegister(unsigned Reg) {
- return Reg & MCRegister::VirtualRegFlag && !isStackSlot(Reg);
+ return Reg & MCRegister::VirtualRegFlag && !isStackSlot(Reg);
}
/// Convert a virtual register number to a 0-based index.
/// The first virtual register in a function will get the index 0.
- static unsigned virtReg2Index(Register Reg) {
+ static unsigned virtReg2Index(Register Reg) {
assert(isVirtualRegister(Reg) && "Not a virtual register");
return Reg & ~MCRegister::VirtualRegFlag;
}
/// Convert a 0-based index to a virtual register number.
/// This is the inverse operation of VirtReg2IndexFunctor below.
- static Register index2VirtReg(unsigned Index) {
+ static Register index2VirtReg(unsigned Index) {
assert(Index < (1u << 31) && "Index too large for virtual register range.");
return Index | MCRegister::VirtualRegFlag;
}
@@ -121,15 +121,15 @@ public:
return MCRegister(Reg);
}
- /// Utility to check-convert this value to a MCRegister. The caller is
- /// expected to have already validated that this Register is, indeed,
- /// physical.
- MCRegister asMCReg() const {
- assert(Reg == MCRegister::NoRegister ||
- MCRegister::isPhysicalRegister(Reg));
- return MCRegister(Reg);
- }
-
+ /// Utility to check-convert this value to a MCRegister. The caller is
+ /// expected to have already validated that this Register is, indeed,
+ /// physical.
+ MCRegister asMCReg() const {
+ assert(Reg == MCRegister::NoRegister ||
+ MCRegister::isPhysicalRegister(Reg));
+ return MCRegister(Reg);
+ }
+
bool isValid() const { return Reg != MCRegister::NoRegister; }
/// Comparisons between register objects
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/RegisterPressure.h b/contrib/libs/llvm12/include/llvm/CodeGen/RegisterPressure.h
index 11702d47e2..b8c675889d 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/RegisterPressure.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/RegisterPressure.h
@@ -44,10 +44,10 @@ class MachineRegisterInfo;
class RegisterClassInfo;
struct RegisterMaskPair {
- Register RegUnit; ///< Virtual register or register unit.
+ Register RegUnit; ///< Virtual register or register unit.
LaneBitmask LaneMask;
- RegisterMaskPair(Register RegUnit, LaneBitmask LaneMask)
+ RegisterMaskPair(Register RegUnit, LaneBitmask LaneMask)
: RegUnit(RegUnit), LaneMask(LaneMask) {}
};
@@ -164,7 +164,7 @@ public:
const_iterator begin() const { return &PressureChanges[0]; }
const_iterator end() const { return &PressureChanges[MaxPSets]; }
- void addPressureChange(Register RegUnit, bool IsDec,
+ void addPressureChange(Register RegUnit, bool IsDec,
const MachineRegisterInfo *MRI);
void dump(const TargetRegisterInfo &TRI) const;
@@ -282,24 +282,24 @@ private:
RegSet Regs;
unsigned NumRegUnits;
- unsigned getSparseIndexFromReg(Register Reg) const {
- if (Reg.isVirtual())
+ unsigned getSparseIndexFromReg(Register Reg) const {
+ if (Reg.isVirtual())
return Register::virtReg2Index(Reg) + NumRegUnits;
assert(Reg < NumRegUnits);
return Reg;
}
- Register getRegFromSparseIndex(unsigned SparseIndex) const {
+ Register getRegFromSparseIndex(unsigned SparseIndex) const {
if (SparseIndex >= NumRegUnits)
- return Register::index2VirtReg(SparseIndex - NumRegUnits);
- return Register(SparseIndex);
+ return Register::index2VirtReg(SparseIndex - NumRegUnits);
+ return Register(SparseIndex);
}
public:
void clear();
void init(const MachineRegisterInfo &MRI);
- LaneBitmask contains(Register Reg) const {
+ LaneBitmask contains(Register Reg) const {
unsigned SparseIndex = getSparseIndexFromReg(Reg);
RegSet::const_iterator I = Regs.find(SparseIndex);
if (I == Regs.end())
@@ -339,7 +339,7 @@ public:
template<typename ContainerT>
void appendTo(ContainerT &To) const {
for (const IndexMaskPair &P : Regs) {
- Register Reg = getRegFromSparseIndex(P.Index);
+ Register Reg = getRegFromSparseIndex(P.Index);
if (P.LaneMask.any())
To.push_back(RegisterMaskPair(Reg, P.LaneMask));
}
@@ -397,7 +397,7 @@ class RegPressureTracker {
LiveRegSet LiveRegs;
/// Set of vreg defs that start a live range.
- SparseSet<Register, VirtReg2IndexFunctor> UntiedDefs;
+ SparseSet<Register, VirtReg2IndexFunctor> UntiedDefs;
/// Live-through pressure.
std::vector<unsigned> LiveThruPressure;
@@ -539,7 +539,7 @@ public:
return getDownwardPressure(MI, PressureResult, MaxPressureResult);
}
- bool hasUntiedDef(Register VirtReg) const {
+ bool hasUntiedDef(Register VirtReg) const {
return UntiedDefs.count(VirtReg);
}
@@ -555,9 +555,9 @@ protected:
/// after the current position.
SlotIndex getCurrSlot() const;
- void increaseRegPressure(Register RegUnit, LaneBitmask PreviousMask,
+ void increaseRegPressure(Register RegUnit, LaneBitmask PreviousMask,
LaneBitmask NewMask);
- void decreaseRegPressure(Register RegUnit, LaneBitmask PreviousMask,
+ void decreaseRegPressure(Register RegUnit, LaneBitmask PreviousMask,
LaneBitmask NewMask);
void bumpDeadDefs(ArrayRef<RegisterMaskPair> DeadDefs);
@@ -568,9 +568,9 @@ protected:
void discoverLiveInOrOut(RegisterMaskPair Pair,
SmallVectorImpl<RegisterMaskPair> &LiveInOrOut);
- LaneBitmask getLastUsedLanes(Register RegUnit, SlotIndex Pos) const;
- LaneBitmask getLiveLanesAt(Register RegUnit, SlotIndex Pos) const;
- LaneBitmask getLiveThroughAt(Register RegUnit, SlotIndex Pos) const;
+ LaneBitmask getLastUsedLanes(Register RegUnit, SlotIndex Pos) const;
+ LaneBitmask getLiveLanesAt(Register RegUnit, SlotIndex Pos) const;
+ LaneBitmask getLiveThroughAt(Register RegUnit, SlotIndex Pos) const;
};
void dumpRegSetPressure(ArrayRef<unsigned> SetPressure,
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/RegisterScavenging.h b/contrib/libs/llvm12/include/llvm/CodeGen/RegisterScavenging.h
index 51712f3270..54104dcf36 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/RegisterScavenging.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/RegisterScavenging.h
@@ -201,10 +201,10 @@ private:
void determineKillsAndDefs();
/// Add all Reg Units that Reg contains to BV.
- void addRegUnits(BitVector &BV, MCRegister Reg);
+ void addRegUnits(BitVector &BV, MCRegister Reg);
/// Remove all Reg Units that \p Reg contains from \p BV.
- void removeRegUnits(BitVector &BV, MCRegister Reg);
+ void removeRegUnits(BitVector &BV, MCRegister Reg);
/// Return the candidate register that is unused for the longest after
/// StartMI. UseMI is set to the instruction where the search stopped.
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/RuntimeLibcalls.h b/contrib/libs/llvm12/include/llvm/CodeGen/RuntimeLibcalls.h
index 3aed932f92..9307012e8b 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/RuntimeLibcalls.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/RuntimeLibcalls.h
@@ -22,7 +22,7 @@
#define LLVM_CODEGEN_RUNTIMELIBCALLS_H
#include "llvm/CodeGen/ValueTypes.h"
-#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/AtomicOrdering.h"
namespace llvm {
namespace RTLIB {
@@ -68,10 +68,10 @@ namespace RTLIB {
/// UNKNOWN_LIBCALL if there is none.
Libcall getSYNC(unsigned Opc, MVT VT);
- /// Return the outline atomics value for the given opcode, atomic ordering
- /// and type, or UNKNOWN_LIBCALL if there is none.
- Libcall getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order, MVT VT);
-
+ /// Return the outline atomics value for the given opcode, atomic ordering
+ /// and type, or UNKNOWN_LIBCALL if there is none.
+ Libcall getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order, MVT VT);
+
/// getMEMCPY_ELEMENT_UNORDERED_ATOMIC - Return
/// MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given element size or
/// UNKNOW_LIBCALL if there is none.
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/ScheduleDAGInstrs.h b/contrib/libs/llvm12/include/llvm/CodeGen/ScheduleDAGInstrs.h
index 0ac94e6038..01e55707ec 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/ScheduleDAGInstrs.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/ScheduleDAGInstrs.h
@@ -275,11 +275,11 @@ namespace llvm {
return SU->SchedClass;
}
- /// IsReachable - Checks if SU is reachable from TargetSU.
- bool IsReachable(SUnit *SU, SUnit *TargetSU) {
- return Topo.IsReachable(SU, TargetSU);
- }
-
+ /// IsReachable - Checks if SU is reachable from TargetSU.
+ bool IsReachable(SUnit *SU, SUnit *TargetSU) {
+ return Topo.IsReachable(SU, TargetSU);
+ }
+
/// Returns an iterator to the top of the current scheduling region.
MachineBasicBlock::iterator begin() const { return RegionBegin; }
@@ -395,7 +395,7 @@ namespace llvm {
/// Returns an existing SUnit for this MI, or nullptr.
inline SUnit *ScheduleDAGInstrs::getSUnit(MachineInstr *MI) const {
- return MISUnitMap.lookup(MI);
+ return MISUnitMap.lookup(MI);
}
} // end namespace llvm
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/ScheduleHazardRecognizer.h b/contrib/libs/llvm12/include/llvm/CodeGen/ScheduleHazardRecognizer.h
index 8a3d25f4a9..baf035211c 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/ScheduleHazardRecognizer.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/ScheduleHazardRecognizer.h
@@ -64,7 +64,7 @@ public:
/// other instruction is available, issue it first.
/// * NoopHazard: issuing this instruction would break the program. If
/// some other instruction can be issued, do so, otherwise issue a noop.
- virtual HazardType getHazardType(SUnit *, int Stalls = 0) {
+ virtual HazardType getHazardType(SUnit *, int Stalls = 0) {
return NoHazard;
}
@@ -121,14 +121,14 @@ public:
// Default implementation: count it as a cycle.
AdvanceCycle();
}
-
- /// EmitNoops - This callback is invoked when noops were added to the
- /// instruction stream.
- virtual void EmitNoops(unsigned Quantity) {
- // Default implementation: count it as a cycle.
- for (unsigned i = 0; i < Quantity; ++i)
- EmitNoop();
- }
+
+ /// EmitNoops - This callback is invoked when noops were added to the
+ /// instruction stream.
+ virtual void EmitNoops(unsigned Quantity) {
+ // Default implementation: count it as a cycle.
+ for (unsigned i = 0; i < Quantity; ++i)
+ EmitNoop();
+ }
};
} // end namespace llvm
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/SelectionDAG.h b/contrib/libs/llvm12/include/llvm/CodeGen/SelectionDAG.h
index 6dd6fcb303..1e031095f6 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/SelectionDAG.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/SelectionDAG.h
@@ -70,7 +70,7 @@ class ConstantFP;
class ConstantInt;
class DataLayout;
struct fltSemantics;
-class FunctionLoweringInfo;
+class FunctionLoweringInfo;
class GlobalValue;
struct KnownBits;
class LegacyDivergenceAnalysis;
@@ -338,29 +338,29 @@ public:
virtual void anchor();
};
- /// Help to insert SDNodeFlags automatically in transforming. Use
- /// RAII to save and resume flags in current scope.
- class FlagInserter {
- SelectionDAG &DAG;
- SDNodeFlags Flags;
- FlagInserter *LastInserter;
-
- public:
- FlagInserter(SelectionDAG &SDAG, SDNodeFlags Flags)
- : DAG(SDAG), Flags(Flags),
- LastInserter(SDAG.getFlagInserter()) {
- SDAG.setFlagInserter(this);
- }
- FlagInserter(SelectionDAG &SDAG, SDNode *N)
- : FlagInserter(SDAG, N->getFlags()) {}
-
- FlagInserter(const FlagInserter &) = delete;
- FlagInserter &operator=(const FlagInserter &) = delete;
- ~FlagInserter() { DAG.setFlagInserter(LastInserter); }
-
- const SDNodeFlags getFlags() const { return Flags; }
- };
-
+ /// Help to insert SDNodeFlags automatically in transforming. Use
+ /// RAII to save and resume flags in current scope.
+ class FlagInserter {
+ SelectionDAG &DAG;
+ SDNodeFlags Flags;
+ FlagInserter *LastInserter;
+
+ public:
+ FlagInserter(SelectionDAG &SDAG, SDNodeFlags Flags)
+ : DAG(SDAG), Flags(Flags),
+ LastInserter(SDAG.getFlagInserter()) {
+ SDAG.setFlagInserter(this);
+ }
+ FlagInserter(SelectionDAG &SDAG, SDNode *N)
+ : FlagInserter(SDAG, N->getFlags()) {}
+
+ FlagInserter(const FlagInserter &) = delete;
+ FlagInserter &operator=(const FlagInserter &) = delete;
+ ~FlagInserter() { DAG.setFlagInserter(LastInserter); }
+
+ const SDNodeFlags getFlags() const { return Flags; }
+ };
+
/// When true, additional steps are taken to
/// ensure that getConstant() and similar functions return DAG nodes that
/// have legal types. This is important after type legalization since
@@ -463,9 +463,9 @@ public:
ProfileSummaryInfo *getPSI() const { return PSI; }
BlockFrequencyInfo *getBFI() const { return BFI; }
- FlagInserter *getFlagInserter() { return Inserter; }
- void setFlagInserter(FlagInserter *FI) { Inserter = FI; }
-
+ FlagInserter *getFlagInserter() { return Inserter; }
+ void setFlagInserter(FlagInserter *FI) { Inserter = FI; }
+
/// Just dump dot graph to a user-provided path and title.
/// This doesn't open the dot viewer program and
/// helps visualization when outside debugging session.
@@ -901,7 +901,7 @@ public:
/// Returns sum of the base pointer and offset.
/// Unlike getObjectPtrOffset this does not set NoUnsignedWrap by default.
- SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL,
+ SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL,
const SDNodeFlags Flags = SDNodeFlags());
SDValue getMemBasePlusOffset(SDValue Base, SDValue Offset, const SDLoc &DL,
const SDNodeFlags Flags = SDNodeFlags());
@@ -909,7 +909,7 @@ public:
/// Create an add instruction with appropriate flags when used for
/// addressing some offset of an object. i.e. if a load is split into multiple
/// components, create an add nuw from the base pointer to the offset.
- SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset) {
+ SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset) {
SDNodeFlags Flags;
Flags.setNoUnsignedWrap(true);
return getMemBasePlusOffset(Ptr, Offset, SL, Flags);
@@ -976,31 +976,31 @@ public:
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
ArrayRef<SDUse> Ops);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
- ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
+ ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
SDValue getNode(unsigned Opcode, const SDLoc &DL, ArrayRef<EVT> ResultTys,
ArrayRef<SDValue> Ops);
SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
- ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
-
- // Use flags from current flag inserter.
- SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
- ArrayRef<SDValue> Ops);
- SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
- ArrayRef<SDValue> Ops);
- SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand);
- SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
- SDValue N2);
- SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
- SDValue N2, SDValue N3);
-
+ ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
+
+ // Use flags from current flag inserter.
+ SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
+ ArrayRef<SDValue> Ops);
+ SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
+ ArrayRef<SDValue> Ops);
+ SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand);
+ SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
+ SDValue N2);
+ SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
+ SDValue N2, SDValue N3);
+
// Specialize based on number of operands.
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand,
- const SDNodeFlags Flags);
+ const SDNodeFlags Flags);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
- SDValue N2, const SDNodeFlags Flags);
+ SDValue N2, const SDNodeFlags Flags);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
- SDValue N2, SDValue N3, const SDNodeFlags Flags);
+ SDValue N2, SDValue N3, const SDNodeFlags Flags);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
SDValue N2, SDValue N3, SDValue N4);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
@@ -1210,12 +1210,12 @@ public:
SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain,
int FrameIndex, int64_t Size, int64_t Offset = -1);
- /// Creates a PseudoProbeSDNode with function GUID `Guid` and
- /// the index of the block `Index` it is probing, as well as the attributes
- /// `attr` of the probe.
- SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid,
- uint64_t Index, uint32_t Attr);
-
+ /// Creates a PseudoProbeSDNode with function GUID `Guid` and
+ /// the index of the block `Index` it is probing, as well as the attributes
+ /// `attr` of the probe.
+ SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid,
+ uint64_t Index, uint32_t Attr);
+
/// Create a MERGE_VALUES node from the given operands.
SDValue getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl);
@@ -1225,15 +1225,15 @@ public:
/// This function will set the MOLoad flag on MMOFlags, but you can set it if
/// you want. The MOStore flag must not be set.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
- MachinePointerInfo PtrInfo,
- MaybeAlign Alignment = MaybeAlign(),
+ MachinePointerInfo PtrInfo,
+ MaybeAlign Alignment = MaybeAlign(),
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr);
/// FIXME: Remove once transition to Align is over.
inline SDValue
getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
- MachinePointerInfo PtrInfo, unsigned Alignment,
+ MachinePointerInfo PtrInfo, unsigned Alignment,
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr) {
@@ -1245,14 +1245,14 @@ public:
SDValue
getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain,
SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT,
- MaybeAlign Alignment = MaybeAlign(),
+ MaybeAlign Alignment = MaybeAlign(),
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes());
/// FIXME: Remove once transition to Align is over.
inline SDValue
getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain,
SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT,
- unsigned Alignment,
+ unsigned Alignment,
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes()) {
return getExtLoad(ExtType, dl, VT, Chain, Ptr, PtrInfo, MemVT,
@@ -1269,12 +1269,12 @@ public:
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr);
- inline SDValue getLoad(
- ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl,
- SDValue Chain, SDValue Ptr, SDValue Offset, MachinePointerInfo PtrInfo,
- EVT MemVT, MaybeAlign Alignment = MaybeAlign(),
- MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
- const AAMDNodes &AAInfo = AAMDNodes(), const MDNode *Ranges = nullptr) {
+ inline SDValue getLoad(
+ ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl,
+ SDValue Chain, SDValue Ptr, SDValue Offset, MachinePointerInfo PtrInfo,
+ EVT MemVT, MaybeAlign Alignment = MaybeAlign(),
+ MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
+ const AAMDNodes &AAInfo = AAMDNodes(), const MDNode *Ranges = nullptr) {
// Ensures that codegen never sees a None Alignment.
return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, PtrInfo, MemVT,
Alignment.getValueOr(getEVTAlign(MemVT)), MMOFlags, AAInfo,
@@ -1284,7 +1284,7 @@ public:
inline SDValue
getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
- MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment,
+ MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment,
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr) {
@@ -1307,7 +1307,7 @@ public:
const AAMDNodes &AAInfo = AAMDNodes());
inline SDValue
getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
- MachinePointerInfo PtrInfo, MaybeAlign Alignment = MaybeAlign(),
+ MachinePointerInfo PtrInfo, MaybeAlign Alignment = MaybeAlign(),
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes()) {
return getStore(Chain, dl, Val, Ptr, PtrInfo,
@@ -1317,7 +1317,7 @@ public:
/// FIXME: Remove once transition to Align is over.
inline SDValue
getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
- MachinePointerInfo PtrInfo, unsigned Alignment,
+ MachinePointerInfo PtrInfo, unsigned Alignment,
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes()) {
return getStore(Chain, dl, Val, Ptr, PtrInfo, MaybeAlign(Alignment),
@@ -1332,8 +1332,8 @@ public:
const AAMDNodes &AAInfo = AAMDNodes());
inline SDValue
getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
- MachinePointerInfo PtrInfo, EVT SVT,
- MaybeAlign Alignment = MaybeAlign(),
+ MachinePointerInfo PtrInfo, EVT SVT,
+ MaybeAlign Alignment = MaybeAlign(),
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes()) {
return getTruncStore(Chain, dl, Val, Ptr, PtrInfo, SVT,
@@ -1343,7 +1343,7 @@ public:
/// FIXME: Remove once transition to Align is over.
inline SDValue
getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
- MachinePointerInfo PtrInfo, EVT SVT, unsigned Alignment,
+ MachinePointerInfo PtrInfo, EVT SVT, unsigned Alignment,
MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
const AAMDNodes &AAInfo = AAMDNodes()) {
return getTruncStore(Chain, dl, Val, Ptr, PtrInfo, SVT,
@@ -1369,11 +1369,11 @@ public:
ISD::MemIndexedMode AM);
SDValue getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
- ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy);
+ ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy);
SDValue getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl,
ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
- ISD::MemIndexType IndexType,
- bool IsTruncating = false);
+ ISD::MemIndexType IndexType,
+ bool IsTruncating = false);
/// Construct a node to track a Value* through the backend.
SDValue getSrcValue(const Value *v);
@@ -1438,9 +1438,9 @@ public:
void setNodeMemRefs(MachineSDNode *N,
ArrayRef<MachineMemOperand *> NewMemRefs);
- // Calculate divergence of node \p N based on its operands.
- bool calculateDivergence(SDNode *N);
-
+ // Calculate divergence of node \p N based on its operands.
+ bool calculateDivergence(SDNode *N);
+
// Propagates the change in divergence to users
void updateDivergence(SDNode * N);
@@ -1518,14 +1518,14 @@ public:
SDValue Operand, SDValue Subreg);
/// Get the specified node if it's already available, or else return NULL.
- SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTList,
- ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
- SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTList,
- ArrayRef<SDValue> Ops);
-
- /// Check if a node exists without modifying its flags.
- bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef<SDValue> Ops);
-
+ SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTList,
+ ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
+ SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTList,
+ ArrayRef<SDValue> Ops);
+
+ /// Check if a node exists without modifying its flags.
+ bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef<SDValue> Ops);
+
/// Creates a SDDbgValue node.
SDDbgValue *getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N,
unsigned R, bool IsIndirect, const DebugLoc &DL,
@@ -1598,15 +1598,15 @@ public:
/// chain to the token factor. This ensures that the new memory node will have
/// the same relative memory dependency position as the old load. Returns the
/// new merged load chain.
- SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain);
-
- /// If an existing load has uses of its chain, create a token factor node with
- /// that chain and the new memory node's chain and update users of the old
- /// chain to the token factor. This ensures that the new memory node will have
- /// the same relative memory dependency position as the old load. Returns the
- /// new merged load chain.
- SDValue makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, SDValue NewMemOp);
-
+ SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain);
+
+ /// If an existing load has uses of its chain, create a token factor node with
+ /// that chain and the new memory node's chain and update users of the old
+ /// chain to the token factor. This ensures that the new memory node will have
+ /// the same relative memory dependency position as the old load. Returns the
+ /// new merged load chain.
+ SDValue makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, SDValue NewMemOp);
+
/// Topological-sort the AllNodes list and a
/// assign a unique node id for each node in the DAG based on their
/// topological order. Returns the number of nodes.
@@ -1843,8 +1843,8 @@ public:
/// for \p DemandedElts.
///
/// NOTE: The function will return true for a demanded splat of UNDEF values.
- bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts,
- unsigned Depth = 0);
+ bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts,
+ unsigned Depth = 0);
/// Test whether \p V has a splatted value.
bool isSplatValue(SDValue V, bool AllowUndefs = false);
@@ -1966,14 +1966,14 @@ public:
}
/// Test whether the given value is a constant int or similar node.
- SDNode *isConstantIntBuildVectorOrConstantInt(SDValue N) const;
+ SDNode *isConstantIntBuildVectorOrConstantInt(SDValue N) const;
/// Test whether the given value is a constant FP or similar node.
- SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N) const ;
+ SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N) const ;
/// \returns true if \p N is any kind of constant or build_vector of
/// constants, int or float. If a vector, it may not necessarily be a splat.
- inline bool isConstantValueOfAnyType(SDValue N) const {
+ inline bool isConstantValueOfAnyType(SDValue N) const {
return isConstantIntBuildVectorOrConstantInt(N) ||
isConstantFPBuildVectorOrConstantFP(N);
}
@@ -2021,10 +2021,10 @@ public:
bool shouldOptForSize() const;
- /// Get the (commutative) neutral element for the given opcode, if it exists.
- SDValue getNeutralElement(unsigned Opcode, const SDLoc &DL, EVT VT,
- SDNodeFlags Flags);
-
+ /// Get the (commutative) neutral element for the given opcode, if it exists.
+ SDValue getNeutralElement(unsigned Opcode, const SDLoc &DL, EVT VT,
+ SDNodeFlags Flags);
+
private:
void InsertNode(SDNode *N);
bool RemoveNodeFromCSEMaps(SDNode *N);
@@ -2065,8 +2065,8 @@ private:
std::map<std::pair<std::string, unsigned>, SDNode *> TargetExternalSymbols;
DenseMap<MCSymbol *, SDNode *> MCSymbols;
-
- FlagInserter *Inserter = nullptr;
+
+ FlagInserter *Inserter = nullptr;
};
template <> struct GraphTraits<SelectionDAG*> : public GraphTraits<SDNode*> {
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/SelectionDAGNodes.h b/contrib/libs/llvm12/include/llvm/CodeGen/SelectionDAGNodes.h
index 67be071a77..e82bdb429f 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -92,43 +92,43 @@ namespace ISD {
/// Node predicates
-/// If N is a BUILD_VECTOR or SPLAT_VECTOR node whose elements are all the
-/// same constant or undefined, return true and return the constant value in
-/// \p SplatValue.
-bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);
-
-/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
-/// all of the elements are ~0 or undef. If \p BuildVectorOnly is set to
-/// true, it only checks BUILD_VECTOR.
-bool isConstantSplatVectorAllOnes(const SDNode *N,
- bool BuildVectorOnly = false);
-
-/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
-/// all of the elements are 0 or undef. If \p BuildVectorOnly is set to true, it
-/// only checks BUILD_VECTOR.
-bool isConstantSplatVectorAllZeros(const SDNode *N,
- bool BuildVectorOnly = false);
-
-/// Return true if the specified node is a BUILD_VECTOR where all of the
-/// elements are ~0 or undef.
-bool isBuildVectorAllOnes(const SDNode *N);
-
-/// Return true if the specified node is a BUILD_VECTOR where all of the
-/// elements are 0 or undef.
-bool isBuildVectorAllZeros(const SDNode *N);
-
-/// Return true if the specified node is a BUILD_VECTOR node of all
-/// ConstantSDNode or undef.
-bool isBuildVectorOfConstantSDNodes(const SDNode *N);
-
-/// Return true if the specified node is a BUILD_VECTOR node of all
-/// ConstantFPSDNode or undef.
-bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
-
-/// Return true if the node has at least one operand and all operands of the
-/// specified node are ISD::UNDEF.
-bool allOperandsUndef(const SDNode *N);
-
+/// If N is a BUILD_VECTOR or SPLAT_VECTOR node whose elements are all the
+/// same constant or undefined, return true and return the constant value in
+/// \p SplatValue.
+bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);
+
+/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
+/// all of the elements are ~0 or undef. If \p BuildVectorOnly is set to
+/// true, it only checks BUILD_VECTOR.
+bool isConstantSplatVectorAllOnes(const SDNode *N,
+ bool BuildVectorOnly = false);
+
+/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
+/// all of the elements are 0 or undef. If \p BuildVectorOnly is set to true, it
+/// only checks BUILD_VECTOR.
+bool isConstantSplatVectorAllZeros(const SDNode *N,
+ bool BuildVectorOnly = false);
+
+/// Return true if the specified node is a BUILD_VECTOR where all of the
+/// elements are ~0 or undef.
+bool isBuildVectorAllOnes(const SDNode *N);
+
+/// Return true if the specified node is a BUILD_VECTOR where all of the
+/// elements are 0 or undef.
+bool isBuildVectorAllZeros(const SDNode *N);
+
+/// Return true if the specified node is a BUILD_VECTOR node of all
+/// ConstantSDNode or undef.
+bool isBuildVectorOfConstantSDNodes(const SDNode *N);
+
+/// Return true if the specified node is a BUILD_VECTOR node of all
+/// ConstantFPSDNode or undef.
+bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
+
+/// Return true if the node has at least one operand and all operands of the
+/// specified node are ISD::UNDEF.
+bool allOperandsUndef(const SDNode *N);
+
} // end namespace ISD
//===----------------------------------------------------------------------===//
@@ -200,8 +200,8 @@ public:
return getValueType().getSizeInBits();
}
- uint64_t getScalarValueSizeInBits() const {
- return getValueType().getScalarType().getFixedSizeInBits();
+ uint64_t getScalarValueSizeInBits() const {
+ return getValueType().getScalarType().getFixedSizeInBits();
}
// Forwarding methods - These forward to the corresponding methods in SDNode.
@@ -398,8 +398,8 @@ private:
public:
/// Default constructor turns off all optimization flags.
SDNodeFlags()
- : NoUnsignedWrap(false), NoSignedWrap(false), Exact(false), NoNaNs(false),
- NoInfs(false), NoSignedZeros(false), AllowReciprocal(false),
+ : NoUnsignedWrap(false), NoSignedWrap(false), Exact(false), NoNaNs(false),
+ NoInfs(false), NoSignedZeros(false), AllowReciprocal(false),
AllowContract(false), ApproximateFuncs(false),
AllowReassociation(false), NoFPExcept(false) {}
@@ -415,17 +415,17 @@ public:
}
// These are mutators for each flag.
- void setNoUnsignedWrap(bool b) { NoUnsignedWrap = b; }
- void setNoSignedWrap(bool b) { NoSignedWrap = b; }
- void setExact(bool b) { Exact = b; }
- void setNoNaNs(bool b) { NoNaNs = b; }
- void setNoInfs(bool b) { NoInfs = b; }
- void setNoSignedZeros(bool b) { NoSignedZeros = b; }
- void setAllowReciprocal(bool b) { AllowReciprocal = b; }
- void setAllowContract(bool b) { AllowContract = b; }
- void setApproximateFuncs(bool b) { ApproximateFuncs = b; }
- void setAllowReassociation(bool b) { AllowReassociation = b; }
- void setNoFPExcept(bool b) { NoFPExcept = b; }
+ void setNoUnsignedWrap(bool b) { NoUnsignedWrap = b; }
+ void setNoSignedWrap(bool b) { NoSignedWrap = b; }
+ void setExact(bool b) { Exact = b; }
+ void setNoNaNs(bool b) { NoNaNs = b; }
+ void setNoInfs(bool b) { NoInfs = b; }
+ void setNoSignedZeros(bool b) { NoSignedZeros = b; }
+ void setAllowReciprocal(bool b) { AllowReciprocal = b; }
+ void setAllowContract(bool b) { AllowContract = b; }
+ void setApproximateFuncs(bool b) { ApproximateFuncs = b; }
+ void setAllowReassociation(bool b) { AllowReassociation = b; }
+ void setNoFPExcept(bool b) { NoFPExcept = b; }
// These are accessors for each flag.
bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
@@ -440,8 +440,8 @@ public:
bool hasAllowReassociation() const { return AllowReassociation; }
bool hasNoFPExcept() const { return NoFPExcept; }
- /// Clear any flags in this flag set that aren't also set in Flags. All
- /// flags will be cleared if Flags are undefined.
+ /// Clear any flags in this flag set that aren't also set in Flags. All
+ /// flags will be cleared if Flags are undefined.
void intersectWith(const SDNodeFlags Flags) {
NoUnsignedWrap &= Flags.NoUnsignedWrap;
NoSignedWrap &= Flags.NoSignedWrap;
@@ -533,7 +533,7 @@ BEGIN_TWO_BYTE_PACK()
class LoadSDNodeBitfields {
friend class LoadSDNode;
friend class MaskedLoadSDNode;
- friend class MaskedGatherSDNode;
+ friend class MaskedGatherSDNode;
uint16_t : NumLSBaseSDNodeBits;
@@ -544,7 +544,7 @@ BEGIN_TWO_BYTE_PACK()
class StoreSDNodeBitfields {
friend class StoreSDNode;
friend class MaskedStoreSDNode;
- friend class MaskedScatterSDNode;
+ friend class MaskedScatterSDNode;
uint16_t : NumLSBaseSDNodeBits;
@@ -696,7 +696,7 @@ public:
bool use_empty() const { return UseList == nullptr; }
/// Return true if there is exactly one use of this node.
- bool hasOneUse() const { return hasSingleElement(uses()); }
+ bool hasOneUse() const { return hasSingleElement(uses()); }
/// Return the number of uses of this node. This method takes
/// time proportional to the number of uses.
@@ -1353,18 +1353,18 @@ public:
}
const SDValue &getChain() const { return getOperand(0); }
-
+
const SDValue &getBasePtr() const {
- switch (getOpcode()) {
- case ISD::STORE:
- case ISD::MSTORE:
- return getOperand(2);
- case ISD::MGATHER:
- case ISD::MSCATTER:
- return getOperand(3);
- default:
- return getOperand(1);
- }
+ switch (getOpcode()) {
+ case ISD::STORE:
+ case ISD::MSTORE:
+ return getOperand(2);
+ case ISD::MGATHER:
+ case ISD::MSCATTER:
+ return getOperand(3);
+ default:
+ return getOperand(1);
+ }
}
// Methods to support isa and dyn_cast
@@ -1768,32 +1768,32 @@ public:
}
};
-/// This SDNode is used for PSEUDO_PROBE values, which are the function guid and
-/// the index of the basic block being probed. A pseudo probe serves as a place
-/// holder and will be removed at the end of compilation. It does not have any
-/// operand because we do not want the instruction selection to deal with any.
-class PseudoProbeSDNode : public SDNode {
- friend class SelectionDAG;
- uint64_t Guid;
- uint64_t Index;
- uint32_t Attributes;
-
- PseudoProbeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &Dl,
- SDVTList VTs, uint64_t Guid, uint64_t Index, uint32_t Attr)
- : SDNode(Opcode, Order, Dl, VTs), Guid(Guid), Index(Index),
- Attributes(Attr) {}
-
-public:
- uint64_t getGuid() const { return Guid; }
- uint64_t getIndex() const { return Index; }
- uint32_t getAttributes() const { return Attributes; }
-
- // Methods to support isa and dyn_cast
- static bool classof(const SDNode *N) {
- return N->getOpcode() == ISD::PSEUDO_PROBE;
- }
-};
-
+/// This SDNode is used for PSEUDO_PROBE values, which are the function guid and
+/// the index of the basic block being probed. A pseudo probe serves as a place
+/// holder and will be removed at the end of compilation. It does not have any
+/// operand because we do not want the instruction selection to deal with any.
+class PseudoProbeSDNode : public SDNode {
+ friend class SelectionDAG;
+ uint64_t Guid;
+ uint64_t Index;
+ uint32_t Attributes;
+
+ PseudoProbeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &Dl,
+ SDVTList VTs, uint64_t Guid, uint64_t Index, uint32_t Attr)
+ : SDNode(Opcode, Order, Dl, VTs), Guid(Guid), Index(Index),
+ Attributes(Attr) {}
+
+public:
+ uint64_t getGuid() const { return Guid; }
+ uint64_t getIndex() const { return Index; }
+ uint32_t getAttributes() const { return Attributes; }
+
+ // Methods to support isa and dyn_cast
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::PSEUDO_PROBE;
+ }
+};
+
class JumpTableSDNode : public SDNode {
friend class SelectionDAG;
@@ -1954,33 +1954,33 @@ public:
/// the vector width and set the bits where elements are undef.
SDValue getSplatValue(BitVector *UndefElements = nullptr) const;
- /// Find the shortest repeating sequence of values in the build vector.
- ///
- /// e.g. { u, X, u, X, u, u, X, u } -> { X }
- /// { X, Y, u, Y, u, u, X, u } -> { X, Y }
- ///
- /// Currently this must be a power-of-2 build vector.
- /// The DemandedElts mask indicates the elements that must be present,
- /// undemanded elements in Sequence may be null (SDValue()). If passed a
- /// non-null UndefElements bitvector, it will resize it to match the original
- /// vector width and set the bits where elements are undef. If result is
- /// false, Sequence will be empty.
- bool getRepeatedSequence(const APInt &DemandedElts,
- SmallVectorImpl<SDValue> &Sequence,
- BitVector *UndefElements = nullptr) const;
-
- /// Find the shortest repeating sequence of values in the build vector.
- ///
- /// e.g. { u, X, u, X, u, u, X, u } -> { X }
- /// { X, Y, u, Y, u, u, X, u } -> { X, Y }
- ///
- /// Currently this must be a power-of-2 build vector.
- /// If passed a non-null UndefElements bitvector, it will resize it to match
- /// the original vector width and set the bits where elements are undef.
- /// If result is false, Sequence will be empty.
- bool getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence,
- BitVector *UndefElements = nullptr) const;
-
+ /// Find the shortest repeating sequence of values in the build vector.
+ ///
+ /// e.g. { u, X, u, X, u, u, X, u } -> { X }
+ /// { X, Y, u, Y, u, u, X, u } -> { X, Y }
+ ///
+ /// Currently this must be a power-of-2 build vector.
+ /// The DemandedElts mask indicates the elements that must be present,
+ /// undemanded elements in Sequence may be null (SDValue()). If passed a
+ /// non-null UndefElements bitvector, it will resize it to match the original
+ /// vector width and set the bits where elements are undef. If result is
+ /// false, Sequence will be empty.
+ bool getRepeatedSequence(const APInt &DemandedElts,
+ SmallVectorImpl<SDValue> &Sequence,
+ BitVector *UndefElements = nullptr) const;
+
+ /// Find the shortest repeating sequence of values in the build vector.
+ ///
+ /// e.g. { u, X, u, X, u, u, X, u } -> { X }
+ /// { X, Y, u, Y, u, u, X, u } -> { X, Y }
+ ///
+ /// Currently this must be a power-of-2 build vector.
+ /// If passed a non-null UndefElements bitvector, it will resize it to match
+ /// the original vector width and set the bits where elements are undef.
+ /// If result is false, Sequence will be empty.
+ bool getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence,
+ BitVector *UndefElements = nullptr) const;
+
/// Returns the demanded splatted constant or null if this is not a constant
/// splat.
///
@@ -2436,9 +2436,9 @@ public:
ISD::MemIndexType getIndexType() const {
return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode);
}
- void setIndexType(ISD::MemIndexType IndexType) {
- LSBaseSDNodeBits.AddressingMode = IndexType;
- }
+ void setIndexType(ISD::MemIndexType IndexType) {
+ LSBaseSDNodeBits.AddressingMode = IndexType;
+ }
bool isIndexScaled() const {
return (getIndexType() == ISD::SIGNED_SCALED) ||
(getIndexType() == ISD::UNSIGNED_SCALED);
@@ -2471,18 +2471,18 @@ public:
MaskedGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
EVT MemVT, MachineMemOperand *MMO,
- ISD::MemIndexType IndexType, ISD::LoadExtType ETy)
+ ISD::MemIndexType IndexType, ISD::LoadExtType ETy)
: MaskedGatherScatterSDNode(ISD::MGATHER, Order, dl, VTs, MemVT, MMO,
- IndexType) {
- LoadSDNodeBits.ExtTy = ETy;
- }
+ IndexType) {
+ LoadSDNodeBits.ExtTy = ETy;
+ }
const SDValue &getPassThru() const { return getOperand(1); }
- ISD::LoadExtType getExtensionType() const {
- return ISD::LoadExtType(LoadSDNodeBits.ExtTy);
- }
-
+ ISD::LoadExtType getExtensionType() const {
+ return ISD::LoadExtType(LoadSDNodeBits.ExtTy);
+ }
+
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::MGATHER;
}
@@ -2496,17 +2496,17 @@ public:
MaskedScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
EVT MemVT, MachineMemOperand *MMO,
- ISD::MemIndexType IndexType, bool IsTrunc)
+ ISD::MemIndexType IndexType, bool IsTrunc)
: MaskedGatherScatterSDNode(ISD::MSCATTER, Order, dl, VTs, MemVT, MMO,
- IndexType) {
- StoreSDNodeBits.IsTruncating = IsTrunc;
- }
-
- /// Return true if the op does a truncation before store.
- /// For integers this is the same as doing a TRUNCATE and storing the result.
- /// For floats, it is the same as doing an FP_ROUND and storing the result.
- bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
-
+ IndexType) {
+ StoreSDNodeBits.IsTruncating = IsTrunc;
+ }
+
+ /// Return true if the op does a truncation before store.
+ /// For integers this is the same as doing a TRUNCATE and storing the result.
+ /// For floats, it is the same as doing an FP_ROUND and storing the result.
+ bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
+
const SDValue &getValue() const { return getOperand(1); }
static bool classof(const SDNode *N) {
@@ -2655,8 +2655,8 @@ template <> struct GraphTraits<SDNode*> {
/// with 4 and 8 byte pointer alignment, respectively.
using LargestSDNode = AlignedCharArrayUnion<AtomicSDNode, TargetIndexSDNode,
BlockAddressSDNode,
- GlobalAddressSDNode,
- PseudoProbeSDNode>;
+ GlobalAddressSDNode,
+ PseudoProbeSDNode>;
/// The SDNode class with the greatest alignment requirement.
using MostAlignedSDNode = GlobalAddressSDNode;
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/SelectionDAGTargetInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/SelectionDAGTargetInfo.h
index ea589cf12a..75cf58105b 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/SelectionDAGTargetInfo.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/SelectionDAGTargetInfo.h
@@ -92,7 +92,7 @@ public:
return SDValue();
}
- /// Emit target-specific code that performs a memcmp/bcmp, in cases where that is
+ /// Emit target-specific code that performs a memcmp/bcmp, in cases where that is
/// faster than a libcall. The first returned SDValue is the result of the
/// memcmp and the second is the chain. Both SDValues can be null if a normal
/// libcall should be used.
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/SlotIndexes.h b/contrib/libs/llvm12/include/llvm/CodeGen/SlotIndexes.h
index c25f9ec9a3..9b7b676bfc 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/SlotIndexes.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/SlotIndexes.h
@@ -611,23 +611,23 @@ class raw_ostream;
}
/// Add the given MachineBasicBlock into the maps.
- /// If it contains any instructions then they must already be in the maps.
- /// This is used after a block has been split by moving some suffix of its
- /// instructions into a newly created block.
- void insertMBBInMaps(MachineBasicBlock *mbb) {
- assert(mbb != &mbb->getParent()->front() &&
- "Can't insert a new block at the beginning of a function.");
- auto prevMBB = std::prev(MachineFunction::iterator(mbb));
-
- // Create a new entry to be used for the start of mbb and the end of
- // prevMBB.
- IndexListEntry *startEntry = createEntry(nullptr, 0);
- IndexListEntry *endEntry = getMBBEndIdx(&*prevMBB).listEntry();
- IndexListEntry *insEntry =
- mbb->empty() ? endEntry
- : getInstructionIndex(mbb->front()).listEntry();
- IndexList::iterator newItr =
- indexList.insert(insEntry->getIterator(), startEntry);
+ /// If it contains any instructions then they must already be in the maps.
+ /// This is used after a block has been split by moving some suffix of its
+ /// instructions into a newly created block.
+ void insertMBBInMaps(MachineBasicBlock *mbb) {
+ assert(mbb != &mbb->getParent()->front() &&
+ "Can't insert a new block at the beginning of a function.");
+ auto prevMBB = std::prev(MachineFunction::iterator(mbb));
+
+ // Create a new entry to be used for the start of mbb and the end of
+ // prevMBB.
+ IndexListEntry *startEntry = createEntry(nullptr, 0);
+ IndexListEntry *endEntry = getMBBEndIdx(&*prevMBB).listEntry();
+ IndexListEntry *insEntry =
+ mbb->empty() ? endEntry
+ : getInstructionIndex(mbb->front()).listEntry();
+ IndexList::iterator newItr =
+ indexList.insert(insEntry->getIterator(), startEntry);
SlotIndex startIdx(startEntry, SlotIndex::Slot_Block);
SlotIndex endIdx(endEntry, SlotIndex::Slot_Block);
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/StableHashing.h b/contrib/libs/llvm12/include/llvm/CodeGen/StableHashing.h
index f8a8c2f18f..a108bd88d1 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/StableHashing.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/StableHashing.h
@@ -1,123 +1,123 @@
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-parameter"
-#endif
-
-//===- llvm/CodeGen/StableHashing.h - Utilities for stable hashing * C++ *-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file provides types and functions for computing and combining stable
-// hashes. Stable hashes can be useful for hashing across different modules,
-// processes, or compiler runs.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_STABLEHASHING_H
-#define LLVM_CODEGEN_STABLEHASHING_H
-
-#include "llvm/ADT/StringRef.h"
-
-namespace llvm {
-
-/// An opaque object representing a stable hash code. It can be serialized,
-/// deserialized, and is stable across processes and executions.
-using stable_hash = uint64_t;
-
-// Implementation details
-namespace hashing {
-namespace detail {
-
-// Stable hashes are based on the 64-bit FNV-1 hash:
-// https://en.wikipedia.org/wiki/Fowler-Noll-Vo_hash_function
-
-const uint64_t FNV_PRIME_64 = 1099511628211u;
-const uint64_t FNV_OFFSET_64 = 14695981039346656037u;
-
-inline void stable_hash_append(stable_hash &Hash, const char Value) {
- Hash = Hash ^ (Value & 0xFF);
- Hash = Hash * FNV_PRIME_64;
-}
-
-inline void stable_hash_append(stable_hash &Hash, stable_hash Value) {
- for (unsigned I = 0; I < 8; ++I) {
- stable_hash_append(Hash, static_cast<char>(Value));
- Value >>= 8;
- }
-}
-
-} // namespace detail
-} // namespace hashing
-
-inline stable_hash stable_hash_combine(stable_hash A, stable_hash B) {
- stable_hash Hash = hashing::detail::FNV_OFFSET_64;
- hashing::detail::stable_hash_append(Hash, A);
- hashing::detail::stable_hash_append(Hash, B);
- return Hash;
-}
-
-inline stable_hash stable_hash_combine(stable_hash A, stable_hash B,
- stable_hash C) {
- stable_hash Hash = hashing::detail::FNV_OFFSET_64;
- hashing::detail::stable_hash_append(Hash, A);
- hashing::detail::stable_hash_append(Hash, B);
- hashing::detail::stable_hash_append(Hash, C);
- return Hash;
-}
-
-inline stable_hash stable_hash_combine(stable_hash A, stable_hash B,
- stable_hash C, stable_hash D) {
- stable_hash Hash = hashing::detail::FNV_OFFSET_64;
- hashing::detail::stable_hash_append(Hash, A);
- hashing::detail::stable_hash_append(Hash, B);
- hashing::detail::stable_hash_append(Hash, C);
- hashing::detail::stable_hash_append(Hash, D);
- return Hash;
-}
-
-/// Compute a stable_hash for a sequence of values.
-///
-/// This hashes a sequence of values. It produces the same stable_hash as
-/// 'stable_hash_combine(a, b, c, ...)', but can run over arbitrary sized
-/// sequences and is significantly faster given pointers and types which
-/// can be hashed as a sequence of bytes.
-template <typename InputIteratorT>
-stable_hash stable_hash_combine_range(InputIteratorT First,
- InputIteratorT Last) {
- stable_hash Hash = hashing::detail::FNV_OFFSET_64;
- for (auto I = First; I != Last; ++I)
- hashing::detail::stable_hash_append(Hash, *I);
- return Hash;
-}
-
-inline stable_hash stable_hash_combine_array(const stable_hash *P, size_t C) {
- stable_hash Hash = hashing::detail::FNV_OFFSET_64;
- for (size_t I = 0; I < C; ++I)
- hashing::detail::stable_hash_append(Hash, P[I]);
- return Hash;
-}
-
-inline stable_hash stable_hash_combine_string(const StringRef &S) {
- return stable_hash_combine_range(S.begin(), S.end());
-}
-
-inline stable_hash stable_hash_combine_string(const char *C) {
- stable_hash Hash = hashing::detail::FNV_OFFSET_64;
- while (*C)
- hashing::detail::stable_hash_append(Hash, *(C++));
- return Hash;
-}
-
-} // namespace llvm
-
-#endif
-
-#ifdef __GNUC__
-#pragma GCC diagnostic pop
-#endif
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/CodeGen/StableHashing.h - Utilities for stable hashing * C++ *-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides types and functions for computing and combining stable
+// hashes. Stable hashes can be useful for hashing across different modules,
+// processes, or compiler runs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_STABLEHASHING_H
+#define LLVM_CODEGEN_STABLEHASHING_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+/// An opaque object representing a stable hash code. It can be serialized,
+/// deserialized, and is stable across processes and executions.
+using stable_hash = uint64_t;
+
+// Implementation details
+namespace hashing {
+namespace detail {
+
+// Stable hashes are based on the 64-bit FNV-1 hash:
+// https://en.wikipedia.org/wiki/Fowler-Noll-Vo_hash_function
+
+const uint64_t FNV_PRIME_64 = 1099511628211u;
+const uint64_t FNV_OFFSET_64 = 14695981039346656037u;
+
+inline void stable_hash_append(stable_hash &Hash, const char Value) {
+ Hash = Hash ^ (Value & 0xFF);
+ Hash = Hash * FNV_PRIME_64;
+}
+
+inline void stable_hash_append(stable_hash &Hash, stable_hash Value) {
+ for (unsigned I = 0; I < 8; ++I) {
+ stable_hash_append(Hash, static_cast<char>(Value));
+ Value >>= 8;
+ }
+}
+
+} // namespace detail
+} // namespace hashing
+
+inline stable_hash stable_hash_combine(stable_hash A, stable_hash B) {
+ stable_hash Hash = hashing::detail::FNV_OFFSET_64;
+ hashing::detail::stable_hash_append(Hash, A);
+ hashing::detail::stable_hash_append(Hash, B);
+ return Hash;
+}
+
+inline stable_hash stable_hash_combine(stable_hash A, stable_hash B,
+ stable_hash C) {
+ stable_hash Hash = hashing::detail::FNV_OFFSET_64;
+ hashing::detail::stable_hash_append(Hash, A);
+ hashing::detail::stable_hash_append(Hash, B);
+ hashing::detail::stable_hash_append(Hash, C);
+ return Hash;
+}
+
+inline stable_hash stable_hash_combine(stable_hash A, stable_hash B,
+ stable_hash C, stable_hash D) {
+ stable_hash Hash = hashing::detail::FNV_OFFSET_64;
+ hashing::detail::stable_hash_append(Hash, A);
+ hashing::detail::stable_hash_append(Hash, B);
+ hashing::detail::stable_hash_append(Hash, C);
+ hashing::detail::stable_hash_append(Hash, D);
+ return Hash;
+}
+
+/// Compute a stable_hash for a sequence of values.
+///
+/// This hashes a sequence of values. It produces the same stable_hash as
+/// 'stable_hash_combine(a, b, c, ...)', but can run over arbitrary sized
+/// sequences and is significantly faster given pointers and types which
+/// can be hashed as a sequence of bytes.
+template <typename InputIteratorT>
+stable_hash stable_hash_combine_range(InputIteratorT First,
+ InputIteratorT Last) {
+ stable_hash Hash = hashing::detail::FNV_OFFSET_64;
+ for (auto I = First; I != Last; ++I)
+ hashing::detail::stable_hash_append(Hash, *I);
+ return Hash;
+}
+
+inline stable_hash stable_hash_combine_array(const stable_hash *P, size_t C) {
+ stable_hash Hash = hashing::detail::FNV_OFFSET_64;
+ for (size_t I = 0; I < C; ++I)
+ hashing::detail::stable_hash_append(Hash, P[I]);
+ return Hash;
+}
+
+inline stable_hash stable_hash_combine_string(const StringRef &S) {
+ return stable_hash_combine_range(S.begin(), S.end());
+}
+
+inline stable_hash stable_hash_combine_string(const char *C) {
+ stable_hash Hash = hashing::detail::FNV_OFFSET_64;
+ while (*C)
+ hashing::detail::stable_hash_append(Hash, *(C++));
+ return Hash;
+}
+
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/StackMaps.h b/contrib/libs/llvm12/include/llvm/CodeGen/StackMaps.h
index 4f7c32e0a3..4453c25b1f 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/StackMaps.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/StackMaps.h
@@ -155,13 +155,13 @@ public:
/// <StackMaps::ConstantOp>, <calling convention>,
/// <StackMaps::ConstantOp>, <statepoint flags>,
/// <StackMaps::ConstantOp>, <num deopt args>, [deopt args...],
-/// <StackMaps::ConstantOp>, <num gc pointer args>, [gc pointer args...],
-/// <StackMaps::ConstantOp>, <num gc allocas>, [gc allocas args...],
-/// <StackMaps::ConstantOp>, <num entries in gc map>, [base/derived pairs]
-/// base/derived pairs in gc map are logical indices into <gc pointer args>
-/// section.
-/// All gc pointers assigned to VRegs produce new value (in form of MI Def
-/// operand) and are tied to it.
+/// <StackMaps::ConstantOp>, <num gc pointer args>, [gc pointer args...],
+/// <StackMaps::ConstantOp>, <num gc allocas>, [gc allocas args...],
+/// <StackMaps::ConstantOp>, <num entries in gc map>, [base/derived pairs]
+/// base/derived pairs in gc map are logical indices into <gc pointer args>
+/// section.
+/// All gc pointers assigned to VRegs produce new value (in form of MI Def
+/// operand) and are tied to it.
class StatepointOpers {
// TODO:: we should change the STATEPOINT representation so that CC and
// Flags should be part of meta operands, with args and deopt operands, and
@@ -177,23 +177,23 @@ class StatepointOpers {
enum { CCOffset = 1, FlagsOffset = 3, NumDeoptOperandsOffset = 5 };
public:
- explicit StatepointOpers(const MachineInstr *MI) : MI(MI) {
- NumDefs = MI->getNumDefs();
- }
+ explicit StatepointOpers(const MachineInstr *MI) : MI(MI) {
+ NumDefs = MI->getNumDefs();
+ }
/// Get index of statepoint ID operand.
- unsigned getIDPos() const { return NumDefs + IDPos; }
+ unsigned getIDPos() const { return NumDefs + IDPos; }
/// Get index of Num Patch Bytes operand.
- unsigned getNBytesPos() const { return NumDefs + NBytesPos; }
+ unsigned getNBytesPos() const { return NumDefs + NBytesPos; }
/// Get index of Num Call Arguments operand.
- unsigned getNCallArgsPos() const { return NumDefs + NCallArgsPos; }
+ unsigned getNCallArgsPos() const { return NumDefs + NCallArgsPos; }
/// Get starting index of non call related arguments
/// (calling convention, statepoint flags, vm state and gc state).
unsigned getVarIdx() const {
- return MI->getOperand(NumDefs + NCallArgsPos).getImm() + MetaEnd + NumDefs;
+ return MI->getOperand(NumDefs + NCallArgsPos).getImm() + MetaEnd + NumDefs;
}
/// Get index of Calling Convention operand.
@@ -208,16 +208,16 @@ public:
}
/// Return the ID for the given statepoint.
- uint64_t getID() const { return MI->getOperand(NumDefs + IDPos).getImm(); }
+ uint64_t getID() const { return MI->getOperand(NumDefs + IDPos).getImm(); }
/// Return the number of patchable bytes the given statepoint should emit.
uint32_t getNumPatchBytes() const {
- return MI->getOperand(NumDefs + NBytesPos).getImm();
+ return MI->getOperand(NumDefs + NBytesPos).getImm();
}
/// Return the target of the underlying call.
const MachineOperand &getCallTarget() const {
- return MI->getOperand(NumDefs + CallTargetPos);
+ return MI->getOperand(NumDefs + CallTargetPos);
}
/// Return the calling convention.
@@ -228,31 +228,31 @@ public:
/// Return the statepoint flags.
uint64_t getFlags() const { return MI->getOperand(getFlagsIdx()).getImm(); }
- uint64_t getNumDeoptArgs() const {
- return MI->getOperand(getNumDeoptArgsIdx()).getImm();
- }
-
- /// Get index of number of gc map entries.
- unsigned getNumGcMapEntriesIdx();
-
- /// Get index of number of gc allocas.
- unsigned getNumAllocaIdx();
-
- /// Get index of number of GC pointers.
- unsigned getNumGCPtrIdx();
-
- /// Get index of first GC pointer operand of -1 if there are none.
- int getFirstGCPtrIdx();
-
- /// Get vector of base/derived pairs from statepoint.
- /// Elements are indices into GC Pointer operand list (logical).
- /// Returns number of elements in GCMap.
- unsigned
- getGCPointerMap(SmallVectorImpl<std::pair<unsigned, unsigned>> &GCMap);
-
+ uint64_t getNumDeoptArgs() const {
+ return MI->getOperand(getNumDeoptArgsIdx()).getImm();
+ }
+
+ /// Get index of number of gc map entries.
+ unsigned getNumGcMapEntriesIdx();
+
+ /// Get index of number of gc allocas.
+ unsigned getNumAllocaIdx();
+
+ /// Get index of number of GC pointers.
+ unsigned getNumGCPtrIdx();
+
+ /// Get index of first GC pointer operand of -1 if there are none.
+ int getFirstGCPtrIdx();
+
+ /// Get vector of base/derived pairs from statepoint.
+ /// Elements are indices into GC Pointer operand list (logical).
+ /// Returns number of elements in GCMap.
+ unsigned
+ getGCPointerMap(SmallVectorImpl<std::pair<unsigned, unsigned>> &GCMap);
+
private:
const MachineInstr *MI;
- unsigned NumDefs;
+ unsigned NumDefs;
};
class StackMaps {
@@ -294,10 +294,10 @@ public:
StackMaps(AsmPrinter &AP);
- /// Get index of next meta operand.
- /// Similar to parseOperand, but does not actually parses operand meaning.
- static unsigned getNextMetaArgIdx(const MachineInstr *MI, unsigned CurIdx);
-
+ /// Get index of next meta operand.
+ /// Similar to parseOperand, but does not actually parses operand meaning.
+ static unsigned getNextMetaArgIdx(const MachineInstr *MI, unsigned CurIdx);
+
void reset() {
CSInfos.clear();
ConstPool.clear();
@@ -370,13 +370,13 @@ private:
MachineInstr::const_mop_iterator MOE, LocationVec &Locs,
LiveOutVec &LiveOuts) const;
- /// Specialized parser of statepoint operands.
- /// They do not directly correspond to StackMap record entries.
- void parseStatepointOpers(const MachineInstr &MI,
- MachineInstr::const_mop_iterator MOI,
- MachineInstr::const_mop_iterator MOE,
- LocationVec &Locations, LiveOutVec &LiveOuts);
-
+ /// Specialized parser of statepoint operands.
+ /// They do not directly correspond to StackMap record entries.
+ void parseStatepointOpers(const MachineInstr &MI,
+ MachineInstr::const_mop_iterator MOI,
+ MachineInstr::const_mop_iterator MOE,
+ LocationVec &Locations, LiveOutVec &LiveOuts);
+
/// Create a live-out register record for the given register @p Reg.
LiveOutReg createLiveOutReg(unsigned Reg,
const TargetRegisterInfo *TRI) const;
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/SwitchLoweringUtils.h b/contrib/libs/llvm12/include/llvm/CodeGen/SwitchLoweringUtils.h
index c2c565d9e7..0589b8cfa9 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/SwitchLoweringUtils.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/SwitchLoweringUtils.h
@@ -17,21 +17,21 @@
#define LLVM_CODEGEN_SWITCHLOWERINGUTILS_H
#include "llvm/ADT/SmallVector.h"
-#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
-#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/InstrTypes.h"
#include "llvm/Support/BranchProbability.h"
-#include <vector>
+#include <vector>
namespace llvm {
-class BlockFrequencyInfo;
-class ConstantInt;
+class BlockFrequencyInfo;
+class ConstantInt;
class FunctionLoweringInfo;
class MachineBasicBlock;
-class ProfileSummaryInfo;
-class TargetLowering;
-class TargetMachine;
+class ProfileSummaryInfo;
+class TargetLowering;
+class TargetMachine;
namespace SwitchCG {
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/TargetCallingConv.h b/contrib/libs/llvm12/include/llvm/CodeGen/TargetCallingConv.h
index fde9c476e3..1133bb8844 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/TargetCallingConv.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/TargetCallingConv.h
@@ -38,7 +38,7 @@ namespace ISD {
unsigned IsInReg : 1; ///< Passed in register
unsigned IsSRet : 1; ///< Hidden struct-ret ptr
unsigned IsByVal : 1; ///< Struct passed by value
- unsigned IsByRef : 1; ///< Passed in memory
+ unsigned IsByRef : 1; ///< Passed in memory
unsigned IsNest : 1; ///< Nested fn static chain
unsigned IsReturned : 1; ///< Always returned
unsigned IsSplit : 1;
@@ -51,31 +51,31 @@ namespace ISD {
unsigned IsHva : 1; ///< HVA field for
unsigned IsHvaStart : 1; ///< HVA structure start
unsigned IsSecArgPass : 1; ///< Second argument
- unsigned ByValOrByRefAlign : 4; ///< Log 2 of byval/byref alignment
+ unsigned ByValOrByRefAlign : 4; ///< Log 2 of byval/byref alignment
unsigned OrigAlign : 5; ///< Log 2 of original alignment
unsigned IsInConsecutiveRegsLast : 1;
unsigned IsInConsecutiveRegs : 1;
unsigned IsCopyElisionCandidate : 1; ///< Argument copy elision candidate
unsigned IsPointer : 1;
- unsigned ByValOrByRefSize; ///< Byval or byref struct size
+ unsigned ByValOrByRefSize; ///< Byval or byref struct size
unsigned PointerAddrSpace; ///< Address space of pointer argument
- /// Set the alignment used by byref or byval parameters.
- void setAlignImpl(Align A) {
- ByValOrByRefAlign = encode(A);
- assert(getNonZeroByValAlign() == A && "bitfield overflow");
- }
-
+ /// Set the alignment used by byref or byval parameters.
+ void setAlignImpl(Align A) {
+ ByValOrByRefAlign = encode(A);
+ assert(getNonZeroByValAlign() == A && "bitfield overflow");
+ }
+
public:
ArgFlagsTy()
- : IsZExt(0), IsSExt(0), IsInReg(0), IsSRet(0), IsByVal(0), IsByRef(0),
- IsNest(0), IsReturned(0), IsSplit(0), IsInAlloca(0), IsPreallocated(0),
+ : IsZExt(0), IsSExt(0), IsInReg(0), IsSRet(0), IsByVal(0), IsByRef(0),
+ IsNest(0), IsReturned(0), IsSplit(0), IsInAlloca(0), IsPreallocated(0),
IsSplitEnd(0), IsSwiftSelf(0), IsSwiftError(0), IsCFGuardTarget(0),
- IsHva(0), IsHvaStart(0), IsSecArgPass(0), ByValOrByRefAlign(0),
- OrigAlign(0), IsInConsecutiveRegsLast(0), IsInConsecutiveRegs(0),
- IsCopyElisionCandidate(0), IsPointer(0), ByValOrByRefSize(0),
+ IsHva(0), IsHvaStart(0), IsSecArgPass(0), ByValOrByRefAlign(0),
+ OrigAlign(0), IsInConsecutiveRegsLast(0), IsInConsecutiveRegs(0),
+ IsCopyElisionCandidate(0), IsPointer(0), ByValOrByRefSize(0),
PointerAddrSpace(0) {
static_assert(sizeof(*this) == 3 * sizeof(unsigned), "flags are too big");
}
@@ -95,9 +95,9 @@ namespace ISD {
bool isByVal() const { return IsByVal; }
void setByVal() { IsByVal = 1; }
- bool isByRef() const { return IsByRef; }
- void setByRef() { IsByRef = 1; }
-
+ bool isByRef() const { return IsByRef; }
+ void setByRef() { IsByRef = 1; }
+
bool isInAlloca() const { return IsInAlloca; }
void setInAlloca() { IsInAlloca = 1; }
@@ -129,12 +129,12 @@ namespace ISD {
void setReturned() { IsReturned = 1; }
bool isInConsecutiveRegs() const { return IsInConsecutiveRegs; }
- void setInConsecutiveRegs(bool Flag = true) { IsInConsecutiveRegs = Flag; }
+ void setInConsecutiveRegs(bool Flag = true) { IsInConsecutiveRegs = Flag; }
bool isInConsecutiveRegsLast() const { return IsInConsecutiveRegsLast; }
- void setInConsecutiveRegsLast(bool Flag = true) {
- IsInConsecutiveRegsLast = Flag;
- }
+ void setInConsecutiveRegsLast(bool Flag = true) {
+ IsInConsecutiveRegsLast = Flag;
+ }
bool isSplit() const { return IsSplit; }
void setSplit() { IsSplit = 1; }
@@ -150,24 +150,24 @@ namespace ISD {
LLVM_ATTRIBUTE_DEPRECATED(unsigned getByValAlign() const,
"Use getNonZeroByValAlign() instead") {
- MaybeAlign A = decodeMaybeAlign(ByValOrByRefAlign);
+ MaybeAlign A = decodeMaybeAlign(ByValOrByRefAlign);
return A ? A->value() : 0;
}
Align getNonZeroByValAlign() const {
- MaybeAlign A = decodeMaybeAlign(ByValOrByRefAlign);
+ MaybeAlign A = decodeMaybeAlign(ByValOrByRefAlign);
assert(A && "ByValAlign must be defined");
return *A;
}
void setByValAlign(Align A) {
- assert(isByVal() && !isByRef());
- setAlignImpl(A);
- }
-
- void setByRefAlign(Align A) {
- assert(!isByVal() && isByRef());
- setAlignImpl(A);
+ assert(isByVal() && !isByRef());
+ setAlignImpl(A);
}
+ void setByRefAlign(Align A) {
+ assert(!isByVal() && isByRef());
+ setAlignImpl(A);
+ }
+
LLVM_ATTRIBUTE_DEPRECATED(unsigned getOrigAlign() const,
"Use getNonZeroOrigAlign() instead") {
MaybeAlign A = decodeMaybeAlign(OrigAlign);
@@ -181,24 +181,24 @@ namespace ISD {
assert(getNonZeroOrigAlign() == A && "bitfield overflow");
}
- unsigned getByValSize() const {
- assert(isByVal() && !isByRef());
- return ByValOrByRefSize;
- }
- void setByValSize(unsigned S) {
- assert(isByVal() && !isByRef());
- ByValOrByRefSize = S;
- }
-
- unsigned getByRefSize() const {
- assert(!isByVal() && isByRef());
- return ByValOrByRefSize;
- }
- void setByRefSize(unsigned S) {
- assert(!isByVal() && isByRef());
- ByValOrByRefSize = S;
- }
-
+ unsigned getByValSize() const {
+ assert(isByVal() && !isByRef());
+ return ByValOrByRefSize;
+ }
+ void setByValSize(unsigned S) {
+ assert(isByVal() && !isByRef());
+ ByValOrByRefSize = S;
+ }
+
+ unsigned getByRefSize() const {
+ assert(!isByVal() && isByRef());
+ return ByValOrByRefSize;
+ }
+ void setByRefSize(unsigned S) {
+ assert(!isByVal() && isByRef());
+ ByValOrByRefSize = S;
+ }
+
unsigned getPointerAddrSpace() const { return PointerAddrSpace; }
void setPointerAddrSpace(unsigned AS) { PointerAddrSpace = AS; }
};
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/TargetFrameLowering.h b/contrib/libs/llvm12/include/llvm/CodeGen/TargetFrameLowering.h
index 54d4c624d2..a53e3093d0 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/TargetFrameLowering.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/TargetFrameLowering.h
@@ -21,7 +21,7 @@
#define LLVM_CODEGEN_TARGETFRAMELOWERING_H
#include "llvm/CodeGen/MachineBasicBlock.h"
-#include "llvm/Support/TypeSize.h"
+#include "llvm/Support/TypeSize.h"
#include <vector>
namespace llvm {
@@ -34,7 +34,7 @@ namespace TargetStackID {
enum Value {
Default = 0,
SGPRSpill = 1,
- ScalableVector = 2,
+ ScalableVector = 2,
NoAlloc = 255
};
}
@@ -305,8 +305,8 @@ public:
/// getFrameIndexReference - This method should return the base register
/// and offset used to reference a frame index location. The offset is
/// returned directly, and the base register is returned via FrameReg.
- virtual StackOffset getFrameIndexReference(const MachineFunction &MF, int FI,
- Register &FrameReg) const;
+ virtual StackOffset getFrameIndexReference(const MachineFunction &MF, int FI,
+ Register &FrameReg) const;
/// Same as \c getFrameIndexReference, except that the stack pointer (as
/// opposed to the frame pointer) will be the preferred value for \p
@@ -314,10 +314,10 @@ public:
/// use offsets from RSP. If \p IgnoreSPUpdates is true, the returned
/// offset is only guaranteed to be valid with respect to the value of SP at
/// the end of the prologue.
- virtual StackOffset
- getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI,
- Register &FrameReg,
- bool IgnoreSPUpdates) const {
+ virtual StackOffset
+ getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI,
+ Register &FrameReg,
+ bool IgnoreSPUpdates) const {
// Always safe to dispatch to getFrameIndexReference.
return getFrameIndexReference(MF, FI, FrameReg);
}
@@ -325,8 +325,8 @@ public:
/// getNonLocalFrameIndexReference - This method returns the offset used to
/// reference a frame index location. The offset can be from either FP/BP/SP
/// based on which base register is returned by llvm.localaddress.
- virtual StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF,
- int FI) const {
+ virtual StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF,
+ int FI) const {
// By default, dispatch to getFrameIndexReference. Interested targets can
// override this.
Register FrameReg;
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/TargetInstrInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/TargetInstrInfo.h
index 7db4e4a6e8..a85ad4f600 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/TargetInstrInfo.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/TargetInstrInfo.h
@@ -32,7 +32,7 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineOutliner.h"
-#include "llvm/CodeGen/RegisterClassInfo.h"
+#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/Support/BranchProbability.h"
@@ -88,15 +88,15 @@ struct RegImmPair {
RegImmPair(Register Reg, int64_t Imm) : Reg(Reg), Imm(Imm) {}
};
-/// Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
-/// It holds the register values, the scale value and the displacement.
-struct ExtAddrMode {
- Register BaseReg;
- Register ScaledReg;
- int64_t Scale;
- int64_t Displacement;
-};
-
+/// Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
+/// It holds the register values, the scale value and the displacement.
+struct ExtAddrMode {
+ Register BaseReg;
+ Register ScaledReg;
+ int64_t Scale;
+ int64_t Displacement;
+};
+
//---------------------------------------------------------------------------
///
/// TargetInstrInfo - Interface to description of machine instruction set
@@ -356,12 +356,12 @@ public:
unsigned &Size, unsigned &Offset,
const MachineFunction &MF) const;
- /// Return true if the given instruction is terminator that is unspillable,
- /// according to isUnspillableTerminatorImpl.
- bool isUnspillableTerminator(const MachineInstr *MI) const {
- return MI->isTerminator() && isUnspillableTerminatorImpl(MI);
- }
-
+ /// Return true if the given instruction is terminator that is unspillable,
+ /// according to isUnspillableTerminatorImpl.
+ bool isUnspillableTerminator(const MachineInstr *MI) const {
+ return MI->isTerminator() && isUnspillableTerminatorImpl(MI);
+ }
+
/// Returns the size in bytes of the specified MachineInstr, or ~0U
/// when this function is not implemented by a target.
virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const {
@@ -747,7 +747,7 @@ public:
return nullptr;
}
- /// Analyze the loop code, return true if it cannot be understood. Upon
+ /// Analyze the loop code, return true if it cannot be understood. Upon
/// success, this function returns false and returns information about the
/// induction variable and compare instruction used at the end.
virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst,
@@ -794,7 +794,7 @@ public:
/// Second variant of isProfitableToIfCvt. This one
/// checks for the case where two basic blocks from true and false path
- /// of a if-then-else (diamond) are predicated on mutually exclusive
+ /// of a if-then-else (diamond) are predicated on mutually exclusive
/// predicates, where the probability of the true path being taken is given
/// by Probability, and Confidence is a measure of our confidence that it
/// will be properly predicted.
@@ -968,17 +968,17 @@ protected:
return None;
}
- /// Return true if the given terminator MI is not expected to spill. This
- /// sets the live interval as not spillable and adjusts phi node lowering to
- /// not introduce copies after the terminator. Use with care, these are
- /// currently used for hardware loop intrinsics in very controlled situations,
- /// created prior to registry allocation in loops that only have single phi
- /// users for the terminators value. They may run out of registers if not used
- /// carefully.
- virtual bool isUnspillableTerminatorImpl(const MachineInstr *MI) const {
- return false;
- }
-
+ /// Return true if the given terminator MI is not expected to spill. This
+ /// sets the live interval as not spillable and adjusts phi node lowering to
+ /// not introduce copies after the terminator. Use with care, these are
+ /// currently used for hardware loop intrinsics in very controlled situations,
+ /// created prior to registry allocation in loops that only have single phi
+ /// users for the terminators value. They may run out of registers if not used
+ /// carefully.
+ virtual bool isUnspillableTerminatorImpl(const MachineInstr *MI) const {
+ return false;
+ }
+
public:
/// If the specific machine instruction is a instruction that moves/copies
/// value from one register to another register return destination and source
@@ -1002,15 +1002,15 @@ public:
return None;
}
- /// Returns true if MI is an instruction that defines Reg to have a constant
- /// value and the value is recorded in ImmVal. The ImmVal is a result that
- /// should be interpreted as modulo size of Reg.
- virtual bool getConstValDefinedInReg(const MachineInstr &MI,
- const Register Reg,
- int64_t &ImmVal) const {
- return false;
- }
-
+ /// Returns true if MI is an instruction that defines Reg to have a constant
+ /// value and the value is recorded in ImmVal. The ImmVal is a result that
+ /// should be interpreted as modulo size of Reg.
+ virtual bool getConstValDefinedInReg(const MachineInstr &MI,
+ const Register Reg,
+ int64_t &ImmVal) const {
+ return false;
+ }
+
/// Store the specified register of the given register class to the specified
/// stack frame index. The store instruction is to be added to the given
/// machine basic block before the specified machine instruction. If isKill
@@ -1084,24 +1084,24 @@ public:
/// faster sequence.
/// \param Root - Instruction that could be combined with one of its operands
/// \param Patterns - Vector of possible combination patterns
- virtual bool
- getMachineCombinerPatterns(MachineInstr &Root,
- SmallVectorImpl<MachineCombinerPattern> &Patterns,
- bool DoRegPressureReduce) const;
-
- /// Return true if target supports reassociation of instructions in machine
- /// combiner pass to reduce register pressure for a given BB.
- virtual bool
- shouldReduceRegisterPressure(MachineBasicBlock *MBB,
- RegisterClassInfo *RegClassInfo) const {
- return false;
- }
-
- /// Fix up the placeholder we may add in genAlternativeCodeSequence().
- virtual void
- finalizeInsInstrs(MachineInstr &Root, MachineCombinerPattern &P,
- SmallVectorImpl<MachineInstr *> &InsInstrs) const {}
-
+ virtual bool
+ getMachineCombinerPatterns(MachineInstr &Root,
+ SmallVectorImpl<MachineCombinerPattern> &Patterns,
+ bool DoRegPressureReduce) const;
+
+ /// Return true if target supports reassociation of instructions in machine
+ /// combiner pass to reduce register pressure for a given BB.
+ virtual bool
+ shouldReduceRegisterPressure(MachineBasicBlock *MBB,
+ RegisterClassInfo *RegClassInfo) const {
+ return false;
+ }
+
+ /// Fix up the placeholder we may add in genAlternativeCodeSequence().
+ virtual void
+ finalizeInsInstrs(MachineInstr &Root, MachineCombinerPattern &P,
+ SmallVectorImpl<MachineInstr *> &InsInstrs) const {}
+
/// Return true when a code sequence can improve throughput. It
/// should be called only for instructions in loops.
/// \param Pattern - combiner pattern
@@ -1305,11 +1305,11 @@ public:
bool &OffsetIsScalable,
const TargetRegisterInfo *TRI) const;
- /// Get zero or more base operands and the byte offset of an instruction that
- /// reads/writes memory. Note that there may be zero base operands if the
- /// instruction accesses a constant address.
+ /// Get zero or more base operands and the byte offset of an instruction that
+ /// reads/writes memory. Note that there may be zero base operands if the
+ /// instruction accesses a constant address.
/// It returns false if MI does not read/write memory.
- /// It returns false if base operands and offset could not be determined.
+ /// It returns false if base operands and offset could not be determined.
/// It is not guaranteed to always recognize base operands and offsets in all
/// cases.
virtual bool getMemOperandsWithOffsetWidth(
@@ -1328,27 +1328,27 @@ public:
return false;
}
- /// Target dependent implementation to get the values constituting the address
- /// MachineInstr that is accessing memory. These values are returned as a
- /// struct ExtAddrMode which contains all relevant information to make up the
- /// address.
- virtual Optional<ExtAddrMode>
- getAddrModeFromMemoryOp(const MachineInstr &MemI,
- const TargetRegisterInfo *TRI) const {
- return None;
- }
-
- /// Returns true if MI's Def is NullValueReg, and the MI
- /// does not change the Zero value. i.e. cases such as rax = shr rax, X where
- /// NullValueReg = rax. Note that if the NullValueReg is non-zero, this
- /// function can return true even if becomes zero. Specifically cases such as
- /// NullValueReg = shl NullValueReg, 63.
- virtual bool preservesZeroValueInReg(const MachineInstr *MI,
- const Register NullValueReg,
- const TargetRegisterInfo *TRI) const {
- return false;
- }
-
+ /// Target dependent implementation to get the values constituting the address
+ /// MachineInstr that is accessing memory. These values are returned as a
+ /// struct ExtAddrMode which contains all relevant information to make up the
+ /// address.
+ virtual Optional<ExtAddrMode>
+ getAddrModeFromMemoryOp(const MachineInstr &MemI,
+ const TargetRegisterInfo *TRI) const {
+ return None;
+ }
+
+ /// Returns true if MI's Def is NullValueReg, and the MI
+ /// does not change the Zero value. i.e. cases such as rax = shr rax, X where
+ /// NullValueReg = rax. Note that if the NullValueReg is non-zero, this
+ /// function can return true even if becomes zero. Specifically cases such as
+ /// NullValueReg = shl NullValueReg, 63.
+ virtual bool preservesZeroValueInReg(const MachineInstr *MI,
+ const Register NullValueReg,
+ const TargetRegisterInfo *TRI) const {
+ return false;
+ }
+
/// If the instruction is an increment of a constant value, return the amount.
virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const {
return false;
@@ -1383,11 +1383,11 @@ public:
virtual void insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const;
- /// Insert noops into the instruction stream at the specified point.
- virtual void insertNoops(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned Quantity) const;
-
+ /// Insert noops into the instruction stream at the specified point.
+ virtual void insertNoops(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned Quantity) const;
+
/// Return the noop instruction to use for a noop.
virtual void getNoop(MCInst &NopInst) const;
@@ -1439,13 +1439,13 @@ public:
/// If the specified instruction defines any predicate
/// or condition code register(s) used for predication, returns true as well
/// as the definition predicate(s) by reference.
- /// SkipDead should be set to false at any point that dead
- /// predicate instructions should be considered as being defined.
- /// A dead predicate instruction is one that is guaranteed to be removed
- /// after a call to PredicateInstruction.
- virtual bool ClobbersPredicate(MachineInstr &MI,
- std::vector<MachineOperand> &Pred,
- bool SkipDead) const {
+ /// SkipDead should be set to false at any point that dead
+ /// predicate instructions should be considered as being defined.
+ /// A dead predicate instruction is one that is guaranteed to be removed
+ /// after a call to PredicateInstruction.
+ virtual bool ClobbersPredicate(MachineInstr &MI,
+ std::vector<MachineOperand> &Pred,
+ bool SkipDead) const {
return false;
}
@@ -1531,7 +1531,7 @@ public:
/// the machine instruction generated due to folding.
virtual MachineInstr *optimizeLoadInstr(MachineInstr &MI,
const MachineRegisterInfo *MRI,
- Register &FoldAsLoadDefReg,
+ Register &FoldAsLoadDefReg,
MachineInstr *&DefMI) const {
return nullptr;
}
@@ -1716,7 +1716,7 @@ public:
/// This hook works similarly to getPartialRegUpdateClearance, except that it
/// does not take an operand index. Instead sets \p OpNum to the index of the
/// unused register.
- virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum,
+ virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const {
// The default implementation returns 0 for no undef register dependency.
return 0;
@@ -1777,21 +1777,21 @@ public:
return 5;
}
- /// Return the maximal number of alias checks on memory operands. For
- /// instructions with more than one memory operands, the alias check on a
- /// single MachineInstr pair has quadratic overhead and results in
- /// unacceptable performance in the worst case. The limit here is to clamp
- /// that maximal checks performed. Usually, that's the product of memory
- /// operand numbers from that pair of MachineInstr to be checked. For
- /// instance, with two MachineInstrs with 4 and 5 memory operands
- /// correspondingly, a total of 20 checks are required. With this limit set to
- /// 16, their alias check is skipped. We choose to limit the product instead
- /// of the individual instruction as targets may have special MachineInstrs
- /// with a considerably high number of memory operands, such as `ldm` in ARM.
- /// Setting this limit per MachineInstr would result in either too high
- /// overhead or too rigid restriction.
- virtual unsigned getMemOperandAACheckLimit() const { return 16; }
-
+ /// Return the maximal number of alias checks on memory operands. For
+ /// instructions with more than one memory operands, the alias check on a
+ /// single MachineInstr pair has quadratic overhead and results in
+ /// unacceptable performance in the worst case. The limit here is to clamp
+ /// that maximal checks performed. Usually, that's the product of memory
+ /// operand numbers from that pair of MachineInstr to be checked. For
+ /// instance, with two MachineInstrs with 4 and 5 memory operands
+ /// correspondingly, a total of 20 checks are required. With this limit set to
+ /// 16, their alias check is skipped. We choose to limit the product instead
+ /// of the individual instruction as targets may have special MachineInstrs
+ /// with a considerably high number of memory operands, such as `ldm` in ARM.
+ /// Setting this limit per MachineInstr would result in either too high
+ /// overhead or too rigid restriction.
+ virtual unsigned getMemOperandAACheckLimit() const { return 16; }
+
/// Return an array that contains the ids of the target indices (used for the
/// TargetIndex machine operand) and their names.
///
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/TargetLowering.h b/contrib/libs/llvm12/include/llvm/CodeGen/TargetLowering.h
index ae06e715a0..d9c9c6f0cf 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/TargetLowering.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/TargetLowering.h
@@ -285,7 +285,7 @@ public:
bool IsSRet : 1;
bool IsNest : 1;
bool IsByVal : 1;
- bool IsByRef : 1;
+ bool IsByRef : 1;
bool IsInAlloca : 1;
bool IsPreallocated : 1;
bool IsReturned : 1;
@@ -298,7 +298,7 @@ public:
ArgListEntry()
: IsSExt(false), IsZExt(false), IsInReg(false), IsSRet(false),
- IsNest(false), IsByVal(false), IsByRef(false), IsInAlloca(false),
+ IsNest(false), IsByVal(false), IsByRef(false), IsInAlloca(false),
IsPreallocated(false), IsReturned(false), IsSwiftSelf(false),
IsSwiftError(false), IsCFGuardTarget(false) {}
@@ -382,13 +382,13 @@ public:
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
bool LegalTypes = true) const;
- /// Return the preferred type to use for a shift opcode, given the shifted
- /// amount type is \p ShiftValueTy.
- LLVM_READONLY
- virtual LLT getPreferredShiftAmountTy(LLT ShiftValueTy) const {
- return ShiftValueTy;
- }
-
+ /// Return the preferred type to use for a shift opcode, given the shifted
+ /// amount type is \p ShiftValueTy.
+ LLVM_READONLY
+ virtual LLT getPreferredShiftAmountTy(LLT ShiftValueTy) const {
+ return ShiftValueTy;
+ }
+
/// Returns the type to be used for the index operand of:
/// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
/// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
@@ -434,7 +434,7 @@ public:
virtual TargetLoweringBase::LegalizeTypeAction
getPreferredVectorAction(MVT VT) const {
// The default action for one element vectors is to scalarize
- if (VT.getVectorElementCount().isScalar())
+ if (VT.getVectorElementCount().isScalar())
return TypeScalarizeVector;
// The default action for an odd-width vector is to widen.
if (!VT.isPow2VectorType())
@@ -612,12 +612,12 @@ public:
return false;
}
- /// Return the maximum number of "x & (x - 1)" operations that can be done
- /// instead of deferring to a custom CTPOP.
- virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const {
- return 1;
- }
-
+ /// Return the maximum number of "x & (x - 1)" operations that can be done
+ /// instead of deferring to a custom CTPOP.
+ virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const {
+ return 1;
+ }
+
/// Return true if instruction generated for equality comparison is folded
/// with instruction generated for signed comparison.
virtual bool isEqualityCmpFoldedWithSignedCmp() const { return true; }
@@ -1106,13 +1106,13 @@ public:
/// Return true if the specified operation is legal on this target or can be
/// made legal with custom lowering. This is used to help guide high-level
- /// lowering decisions. LegalOnly is an optional convenience for code paths
- /// traversed pre and post legalisation.
- bool isOperationLegalOrCustom(unsigned Op, EVT VT,
- bool LegalOnly = false) const {
- if (LegalOnly)
- return isOperationLegal(Op, VT);
-
+ /// lowering decisions. LegalOnly is an optional convenience for code paths
+ /// traversed pre and post legalisation.
+ bool isOperationLegalOrCustom(unsigned Op, EVT VT,
+ bool LegalOnly = false) const {
+ if (LegalOnly)
+ return isOperationLegal(Op, VT);
+
return (VT == MVT::Other || isTypeLegal(VT)) &&
(getOperationAction(Op, VT) == Legal ||
getOperationAction(Op, VT) == Custom);
@@ -1120,13 +1120,13 @@ public:
/// Return true if the specified operation is legal on this target or can be
/// made legal using promotion. This is used to help guide high-level lowering
- /// decisions. LegalOnly is an optional convenience for code paths traversed
- /// pre and post legalisation.
- bool isOperationLegalOrPromote(unsigned Op, EVT VT,
- bool LegalOnly = false) const {
- if (LegalOnly)
- return isOperationLegal(Op, VT);
-
+ /// decisions. LegalOnly is an optional convenience for code paths traversed
+ /// pre and post legalisation.
+ bool isOperationLegalOrPromote(unsigned Op, EVT VT,
+ bool LegalOnly = false) const {
+ if (LegalOnly)
+ return isOperationLegal(Op, VT);
+
return (VT == MVT::Other || isTypeLegal(VT)) &&
(getOperationAction(Op, VT) == Legal ||
getOperationAction(Op, VT) == Promote);
@@ -1134,13 +1134,13 @@ public:
/// Return true if the specified operation is legal on this target or can be
/// made legal with custom lowering or using promotion. This is used to help
- /// guide high-level lowering decisions. LegalOnly is an optional convenience
- /// for code paths traversed pre and post legalisation.
- bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT,
- bool LegalOnly = false) const {
- if (LegalOnly)
- return isOperationLegal(Op, VT);
-
+ /// guide high-level lowering decisions. LegalOnly is an optional convenience
+ /// for code paths traversed pre and post legalisation.
+ bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT,
+ bool LegalOnly = false) const {
+ if (LegalOnly)
+ return isOperationLegal(Op, VT);
+
return (VT == MVT::Other || isTypeLegal(VT)) &&
(getOperationAction(Op, VT) == Legal ||
getOperationAction(Op, VT) == Custom ||
@@ -1325,10 +1325,10 @@ public:
getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
}
- // Returns true if VT is a legal index type for masked gathers/scatters
- // on this target
- virtual bool shouldRemoveExtendFromGSIndex(EVT VT) const { return false; }
-
+ // Returns true if VT is a legal index type for masked gathers/scatters
+ // on this target
+ virtual bool shouldRemoveExtendFromGSIndex(EVT VT) const { return false; }
+
/// Return how the condition code should be treated: either it is legal, needs
/// to be expanded to some other code sequence, or the target has a custom
/// expander for it.
@@ -1665,11 +1665,11 @@ public:
const MachineMemOperand &MMO,
bool *Fast = nullptr) const;
- /// LLT handling variant.
- bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, LLT Ty,
- const MachineMemOperand &MMO,
- bool *Fast = nullptr) const;
-
+ /// LLT handling variant.
+ bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, LLT Ty,
+ const MachineMemOperand &MMO,
+ bool *Fast = nullptr) const;
+
/// Returns the target specific optimal type for load and store operations as
/// a result of memset, memcpy, and memmove lowering.
/// It returns EVT::Other if the type should be determined using generic
@@ -1710,7 +1710,7 @@ public:
/// If a physical register, this specifies the register that
/// llvm.savestack/llvm.restorestack should save and restore.
- Register getStackPointerRegisterToSaveRestore() const {
+ Register getStackPointerRegisterToSaveRestore() const {
return StackPointerRegisterToSaveRestore;
}
@@ -1802,7 +1802,7 @@ public:
/// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
/// are happy to sink it into basic blocks. A cast may be free, but not
/// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer.
- virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const;
+ virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const;
/// Return true if the pointer arguments to CI should be aligned by aligning
/// the object whose address is being passed. If so then MinSize is set to the
@@ -2792,10 +2792,10 @@ public:
return false;
}
- /// Does this target require the clearing of high-order bits in a register
- /// passed to the fp16 to fp conversion library function.
- virtual bool shouldKeepZExtForFP16Conv() const { return false; }
-
+ /// Does this target require the clearing of high-order bits in a register
+ /// passed to the fp16 to fp conversion library function.
+ virtual bool shouldKeepZExtForFP16Conv() const { return false; }
+
//===--------------------------------------------------------------------===//
// Runtime Library hooks
//
@@ -4216,7 +4216,7 @@ public:
// Lower custom output constraints. If invalid, return SDValue().
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Flag,
- const SDLoc &DL,
+ const SDLoc &DL,
const AsmOperandInfo &OpInfo,
SelectionDAG &DAG) const;
@@ -4283,20 +4283,20 @@ public:
return SDValue();
}
- /// Return a target-dependent comparison result if the input operand is
- /// suitable for use with a square root estimate calculation. For example, the
- /// comparison may check if the operand is NAN, INF, zero, normal, etc. The
- /// result should be used as the condition operand for a select or branch.
- virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG,
- const DenormalMode &Mode) const;
-
- /// Return a target-dependent result if the input operand is not suitable for
- /// use with a square root estimate calculation.
- virtual SDValue getSqrtResultForDenormInput(SDValue Operand,
- SelectionDAG &DAG) const {
- return DAG.getConstantFP(0.0, SDLoc(Operand), Operand.getValueType());
- }
-
+ /// Return a target-dependent comparison result if the input operand is
+ /// suitable for use with a square root estimate calculation. For example, the
+ /// comparison may check if the operand is NAN, INF, zero, normal, etc. The
+ /// result should be used as the condition operand for a select or branch.
+ virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG,
+ const DenormalMode &Mode) const;
+
+ /// Return a target-dependent result if the input operand is not suitable for
+ /// use with a square root estimate calculation.
+ virtual SDValue getSqrtResultForDenormInput(SDValue Operand,
+ SelectionDAG &DAG) const {
+ return DAG.getConstantFP(0.0, SDLoc(Operand), Operand.getValueType());
+ }
+
//===--------------------------------------------------------------------===//
// Legalization utility functions
//
@@ -4311,7 +4311,7 @@ public:
/// \param RL Low bits of the RHS of the MUL. See LL for meaning
/// \param RH High bits of the RHS of the MUL. See LL for meaning.
/// \returns true if the node has been expanded, false if it has not
- bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS,
+ bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS,
SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT,
SelectionDAG &DAG, MulExpansionKind Kind,
SDValue LL = SDValue(), SDValue LH = SDValue(),
@@ -4339,12 +4339,12 @@ public:
/// Expand rotations.
/// \param N Node to expand
- /// \param AllowVectorOps expand vector rotate, this should only be performed
- /// if the legalization is happening outside of LegalizeVectorOps
+ /// \param AllowVectorOps expand vector rotate, this should only be performed
+ /// if the legalization is happening outside of LegalizeVectorOps
/// \param Result output after conversion
/// \returns True, if the expansion was successful, false otherwise
- bool expandROT(SDNode *N, bool AllowVectorOps, SDValue &Result,
- SelectionDAG &DAG) const;
+ bool expandROT(SDNode *N, bool AllowVectorOps, SDValue &Result,
+ SelectionDAG &DAG) const;
/// Expand float(f32) to SINT(i64) conversion
/// \param N Node to expand
@@ -4371,11 +4371,11 @@ public:
/// Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const;
- /// Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max.
- /// \param N Node to expand
- /// \returns The expansion result
- SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const;
-
+ /// Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max.
+ /// \param N Node to expand
+ /// \returns The expansion result
+ SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const;
+
/// Expand CTPOP nodes. Expands vector/scalar CTPOP nodes,
/// vector nodes can only succeed if all operations are legal/custom.
/// \param N Node to expand
@@ -4402,10 +4402,10 @@ public:
/// (ABS x) -> (XOR (ADD x, (SRA x, type_size)), (SRA x, type_size))
/// \param N Node to expand
/// \param Result output after conversion
- /// \param IsNegative indicate negated abs
+ /// \param IsNegative indicate negated abs
/// \returns True, if the expansion was successful, false otherwise
- bool expandABS(SDNode *N, SDValue &Result, SelectionDAG &DAG,
- bool IsNegative = false) const;
+ bool expandABS(SDNode *N, SDValue &Result, SelectionDAG &DAG,
+ bool IsNegative = false) const;
/// Turn load of vector type into a load of the individual elements.
/// \param LD load to expand
@@ -4445,18 +4445,18 @@ public:
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT,
SDValue Index) const;
- /// Method for building the DAG expansion of ISD::[US][MIN|MAX]. This
- /// method accepts integers as its arguments.
- SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const;
-
+ /// Method for building the DAG expansion of ISD::[US][MIN|MAX]. This
+ /// method accepts integers as its arguments.
+ SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const;
+
/// Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT. This
/// method accepts integers as its arguments.
SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const;
- /// Method for building the DAG expansion of ISD::[US]SHLSAT. This
- /// method accepts integers as its arguments.
- SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const;
-
+ /// Method for building the DAG expansion of ISD::[US]SHLSAT. This
+ /// method accepts integers as its arguments.
+ SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const;
+
/// Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT]. This
/// method accepts integers as its arguments.
SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const;
@@ -4488,9 +4488,9 @@ public:
/// only the first Count elements of the vector are used.
SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const;
- /// Expand a VECREDUCE_SEQ_* into an explicit ordered calculation.
- SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const;
-
+ /// Expand a VECREDUCE_SEQ_* into an explicit ordered calculation.
+ SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const;
+
/// Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal.
/// Returns true if the expansion was successful.
bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const;
@@ -4545,10 +4545,10 @@ public:
// combiner can fold the new nodes.
SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const;
- /// Give targets the chance to reduce the number of distinct addresing modes.
- ISD::MemIndexType getCanonicalIndexType(ISD::MemIndexType IndexType,
- EVT MemVT, SDValue Offsets) const;
-
+ /// Give targets the chance to reduce the number of distinct addresing modes.
+ ISD::MemIndexType getCanonicalIndexType(ISD::MemIndexType IndexType,
+ EVT MemVT, SDValue Offsets) const;
+
private:
SDValue foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
const SDLoc &DL, DAGCombinerInfo &DCI) const;
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h b/contrib/libs/llvm12/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
index a53f94ddba..c8ca33fd39 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
@@ -28,7 +28,7 @@ namespace llvm {
class GlobalValue;
class MachineModuleInfo;
-class MachineFunction;
+class MachineFunction;
class MCContext;
class MCExpr;
class MCSection;
@@ -45,7 +45,7 @@ protected:
MCSymbolRefExpr::VK_None;
public:
- TargetLoweringObjectFileELF();
+ TargetLoweringObjectFileELF();
~TargetLoweringObjectFileELF() override = default;
void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
@@ -70,8 +70,8 @@ public:
MCSection *getSectionForJumpTable(const Function &F,
const TargetMachine &TM) const override;
- MCSection *getSectionForLSDA(const Function &F,
- const TargetMachine &TM) const override;
+ MCSection *getSectionForLSDA(const Function &F,
+ const TargetMachine &TM) const override;
MCSection *
getSectionForMachineBasicBlock(const Function &F,
@@ -104,9 +104,9 @@ public:
const GlobalValue *RHS,
const TargetMachine &TM) const override;
- const MCExpr *lowerDSOLocalEquivalent(const DSOLocalEquivalent *Equiv,
- const TargetMachine &TM) const override;
-
+ const MCExpr *lowerDSOLocalEquivalent(const DSOLocalEquivalent *Equiv,
+ const TargetMachine &TM) const override;
+
MCSection *getSectionForCommandLines() const override;
};
@@ -155,7 +155,7 @@ public:
class TargetLoweringObjectFileCOFF : public TargetLoweringObjectFile {
mutable unsigned NextUniqueID = 0;
- const TargetMachine *TM = nullptr;
+ const TargetMachine *TM = nullptr;
public:
~TargetLoweringObjectFileCOFF() override = default;
@@ -190,9 +190,9 @@ public:
MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind,
const Constant *C,
Align &Alignment) const override;
-
-private:
- void emitLinkerDirectives(MCStreamer &Streamer, Module &M) const;
+
+private:
+ void emitLinkerDirectives(MCStreamer &Streamer, Module &M) const;
};
class TargetLoweringObjectFileWasm : public TargetLoweringObjectFile {
@@ -227,10 +227,10 @@ public:
TargetLoweringObjectFileXCOFF() = default;
~TargetLoweringObjectFileXCOFF() override = default;
- static bool ShouldEmitEHBlock(const MachineFunction *MF);
-
- static MCSymbol *getEHInfoTableSymbol(const MachineFunction *MF);
-
+ static bool ShouldEmitEHBlock(const MachineFunction *MF);
+
+ static MCSymbol *getEHInfoTableSymbol(const MachineFunction *MF);
+
void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
bool shouldPutJumpTableInFunctionSection(bool UsesLabelDifference,
@@ -260,13 +260,13 @@ public:
const Constant *C,
Align &Alignment) const override;
- static XCOFF::StorageClass getStorageClassForGlobal(const GlobalValue *GV);
+ static XCOFF::StorageClass getStorageClassForGlobal(const GlobalValue *GV);
MCSection *
getSectionForFunctionDescriptor(const Function *F,
const TargetMachine &TM) const override;
- MCSection *getSectionForTOCEntry(const MCSymbol *Sym,
- const TargetMachine &TM) const override;
+ MCSection *getSectionForTOCEntry(const MCSymbol *Sym,
+ const TargetMachine &TM) const override;
/// For external functions, this will always return a function descriptor
/// csect.
@@ -278,7 +278,7 @@ public:
MCSymbol *getTargetSymbol(const GlobalValue *GV,
const TargetMachine &TM) const override;
- MCSymbol *getFunctionEntryPointSymbol(const GlobalValue *Func,
+ MCSymbol *getFunctionEntryPointSymbol(const GlobalValue *Func,
const TargetMachine &TM) const override;
};
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/TargetPassConfig.h b/contrib/libs/llvm12/include/llvm/CodeGen/TargetPassConfig.h
index 5137f56968..d15c47c1d0 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/TargetPassConfig.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/TargetPassConfig.h
@@ -32,7 +32,7 @@ struct MachineSchedContext;
class PassConfigImpl;
class ScheduleDAGInstrs;
class CSEConfigBase;
-class PassInstrumentationCallbacks;
+class PassInstrumentationCallbacks;
// The old pass manager infrastructure is hidden in a legacy namespace now.
namespace legacy {
@@ -195,7 +195,7 @@ public:
/// Insert InsertedPassID pass after TargetPassID pass.
void insertPass(AnalysisID TargetPassID, IdentifyingPassPtr InsertedPassID,
- bool VerifyAfter = true);
+ bool VerifyAfter = true);
/// Allow the target to enable a specific standard pass by default.
void enablePass(AnalysisID PassID) { substitutePass(PassID, PassID); }
@@ -321,17 +321,17 @@ public:
/// Add a pass to remove debug info from the MIR.
void addStripDebugPass();
- /// Add a pass to check synthesized debug info for MIR.
- void addCheckDebugPass();
-
+ /// Add a pass to check synthesized debug info for MIR.
+ void addCheckDebugPass();
+
/// Add standard passes before a pass that's about to be added. For example,
/// the DebugifyMachineModulePass if it is enabled.
void addMachinePrePasses(bool AllowDebugify = true);
/// Add standard passes after a pass that has just been added. For example,
/// the MachineVerifier if it is enabled.
- void addMachinePostPasses(const std::string &Banner, bool AllowVerify = true,
- bool AllowStrip = true);
+ void addMachinePostPasses(const std::string &Banner, bool AllowVerify = true,
+ bool AllowStrip = true);
/// Check whether or not GlobalISel should abort on error.
/// When this is disabled, GlobalISel will fall back on SDISel instead of
@@ -454,28 +454,28 @@ protected:
/// Return the pass that was added, or zero if no pass was added.
/// @p verifyAfter if true and adding a machine function pass add an extra
/// machine verification pass afterwards.
- AnalysisID addPass(AnalysisID PassID, bool verifyAfter = true);
+ AnalysisID addPass(AnalysisID PassID, bool verifyAfter = true);
/// Add a pass to the PassManager if that pass is supposed to be run, as
/// determined by the StartAfter and StopAfter options. Takes ownership of the
/// pass.
/// @p verifyAfter if true and adding a machine function pass add an extra
/// machine verification pass afterwards.
- void addPass(Pass *P, bool verifyAfter = true);
+ void addPass(Pass *P, bool verifyAfter = true);
/// addMachinePasses helper to create the target-selected or overriden
/// regalloc pass.
virtual FunctionPass *createRegAllocPass(bool Optimized);
- /// Add core register allocator passes which do the actual register assignment
+ /// Add core register allocator passes which do the actual register assignment
/// and rewriting. \returns true if any passes were added.
- virtual bool addRegAssignAndRewriteFast();
- virtual bool addRegAssignAndRewriteOptimized();
+ virtual bool addRegAssignAndRewriteFast();
+ virtual bool addRegAssignAndRewriteOptimized();
};
-void registerCodeGenCallback(PassInstrumentationCallbacks &PIC,
- LLVMTargetMachine &);
-
+void registerCodeGenCallback(PassInstrumentationCallbacks &PIC,
+ LLVMTargetMachine &);
+
} // end namespace llvm
#endif // LLVM_CODEGEN_TARGETPASSCONFIG_H
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/TargetRegisterInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/TargetRegisterInfo.h
index 66f6653835..9b7248afd3 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/TargetRegisterInfo.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/TargetRegisterInfo.h
@@ -41,7 +41,7 @@
namespace llvm {
class BitVector;
-class DIExpression;
+class DIExpression;
class LiveRegMatrix;
class MachineFunction;
class MachineInstr;
@@ -95,21 +95,21 @@ public:
/// Return true if the specified register is included in this register class.
/// This does not include virtual registers.
- bool contains(Register Reg) const {
+ bool contains(Register Reg) const {
/// FIXME: Historically this function has returned false when given vregs
/// but it should probably only receive physical registers
- if (!Reg.isPhysical())
+ if (!Reg.isPhysical())
return false;
- return MC->contains(Reg.asMCReg());
+ return MC->contains(Reg.asMCReg());
}
/// Return true if both registers are in this class.
- bool contains(Register Reg1, Register Reg2) const {
+ bool contains(Register Reg1, Register Reg2) const {
/// FIXME: Historically this function has returned false when given a vregs
/// but it should probably only receive physical registers
- if (!Reg1.isPhysical() || !Reg2.isPhysical())
+ if (!Reg1.isPhysical() || !Reg2.isPhysical())
return false;
- return MC->contains(Reg1.asMCReg(), Reg2.asMCReg());
+ return MC->contains(Reg1.asMCReg(), Reg2.asMCReg());
}
/// Return the cost of copying a value between two registers in this class.
@@ -393,12 +393,12 @@ public:
/// The registers may be virtual registers.
bool regsOverlap(Register regA, Register regB) const {
if (regA == regB) return true;
- if (!regA.isPhysical() || !regB.isPhysical())
+ if (!regA.isPhysical() || !regB.isPhysical())
return false;
// Regunits are numerically ordered. Find a common unit.
- MCRegUnitIterator RUA(regA.asMCReg(), this);
- MCRegUnitIterator RUB(regB.asMCReg(), this);
+ MCRegUnitIterator RUA(regA.asMCReg(), this);
+ MCRegUnitIterator RUB(regB.asMCReg(), this);
do {
if (*RUA == *RUB) return true;
if (*RUA < *RUB) ++RUA;
@@ -408,9 +408,9 @@ public:
}
/// Returns true if Reg contains RegUnit.
- bool hasRegUnit(MCRegister Reg, Register RegUnit) const {
+ bool hasRegUnit(MCRegister Reg, Register RegUnit) const {
for (MCRegUnitIterator Units(Reg, this); Units.isValid(); ++Units)
- if (Register(*Units) == RegUnit)
+ if (Register(*Units) == RegUnit)
return true;
return false;
}
@@ -422,16 +422,16 @@ public:
virtual Register lookThruCopyLike(Register SrcReg,
const MachineRegisterInfo *MRI) const;
- /// Find the original SrcReg unless it is the target of a copy-like operation,
- /// in which case we chain backwards through all such operations to the
- /// ultimate source register. If a physical register is encountered, we stop
- /// the search.
- /// Return the original SrcReg if all the definitions in the chain only have
- /// one user and not a physical register.
- virtual Register
- lookThruSingleUseCopyChain(Register SrcReg,
- const MachineRegisterInfo *MRI) const;
-
+ /// Find the original SrcReg unless it is the target of a copy-like operation,
+ /// in which case we chain backwards through all such operations to the
+ /// ultimate source register. If a physical register is encountered, we stop
+ /// the search.
+ /// Return the original SrcReg if all the definitions in the chain only have
+ /// one user and not a physical register.
+ virtual Register
+ lookThruSingleUseCopyChain(Register SrcReg,
+ const MachineRegisterInfo *MRI) const;
+
/// Return a null-terminated list of all of the callee-saved registers on
/// this target. The register should be in the order of desired callee-save
/// stack frame offset. The first register is closest to the incoming stack
@@ -466,13 +466,13 @@ public:
return nullptr;
}
- /// Return a register mask for the registers preserved by the unwinder,
- /// or nullptr if no custom mask is needed.
- virtual const uint32_t *
- getCustomEHPadPreservedMask(const MachineFunction &MF) const {
- return nullptr;
- }
-
+ /// Return a register mask for the registers preserved by the unwinder,
+ /// or nullptr if no custom mask is needed.
+ virtual const uint32_t *
+ getCustomEHPadPreservedMask(const MachineFunction &MF) const {
+ return nullptr;
+ }
+
/// Return a register mask that clobbers everything.
virtual const uint32_t *getNoPreservedMask() const {
llvm_unreachable("target does not provide no preserved mask");
@@ -918,11 +918,11 @@ public:
return false;
}
- /// Insert defining instruction(s) for a pointer to FrameIdx before
- /// insertion point I. Return materialized frame pointer.
- virtual Register materializeFrameBaseRegister(MachineBasicBlock *MBB,
- int FrameIdx,
- int64_t Offset) const {
+ /// Insert defining instruction(s) for a pointer to FrameIdx before
+ /// insertion point I. Return materialized frame pointer.
+ virtual Register materializeFrameBaseRegister(MachineBasicBlock *MBB,
+ int FrameIdx,
+ int64_t Offset) const {
llvm_unreachable("materializeFrameBaseRegister does not exist on this "
"target");
}
@@ -941,15 +941,15 @@ public:
llvm_unreachable("isFrameOffsetLegal does not exist on this target");
}
- /// Gets the DWARF expression opcodes for \p Offset.
- virtual void getOffsetOpcodes(const StackOffset &Offset,
- SmallVectorImpl<uint64_t> &Ops) const;
-
- /// Prepends a DWARF expression for \p Offset to DIExpression \p Expr.
- DIExpression *
- prependOffsetExpression(const DIExpression *Expr, unsigned PrependFlags,
- const StackOffset &Offset) const;
-
+ /// Gets the DWARF expression opcodes for \p Offset.
+ virtual void getOffsetOpcodes(const StackOffset &Offset,
+ SmallVectorImpl<uint64_t> &Ops) const;
+
+ /// Prepends a DWARF expression for \p Offset to DIExpression \p Expr.
+ DIExpression *
+ prependOffsetExpression(const DIExpression *Expr, unsigned PrependFlags,
+ const StackOffset &Offset) const;
+
/// Spill the register so it can be used by the register scavenger.
/// Return true if the register was spilled, false otherwise.
/// If this function does not spill the register, the scavenger
@@ -1003,36 +1003,36 @@ public:
virtual bool shouldRegionSplitForVirtReg(const MachineFunction &MF,
const LiveInterval &VirtReg) const;
- /// Last chance recoloring has a high compile time cost especially for
- /// targets with a lot of registers.
- /// This method is used to decide whether or not \p VirtReg should
- /// go through this expensive heuristic.
- /// When this target hook is hit, by returning false, there is a high
- /// chance that the register allocation will fail altogether (usually with
- /// "ran out of registers").
- /// That said, this error usually points to another problem in the
- /// optimization pipeline.
- virtual bool
- shouldUseLastChanceRecoloringForVirtReg(const MachineFunction &MF,
- const LiveInterval &VirtReg) const {
- return true;
- }
-
- /// Deferred spilling delays the spill insertion of a virtual register
- /// after every other allocation. By deferring the spilling, it is
- /// sometimes possible to eliminate that spilling altogether because
- /// something else could have been eliminated, thus leaving some space
- /// for the virtual register.
- /// However, this comes with a compile time impact because it adds one
- /// more stage to the greedy register allocator.
- /// This method is used to decide whether \p VirtReg should use the deferred
- /// spilling stage instead of being spilled right away.
- virtual bool
- shouldUseDeferredSpillingForVirtReg(const MachineFunction &MF,
- const LiveInterval &VirtReg) const {
- return false;
- }
-
+ /// Last chance recoloring has a high compile time cost especially for
+ /// targets with a lot of registers.
+ /// This method is used to decide whether or not \p VirtReg should
+ /// go through this expensive heuristic.
+ /// When this target hook is hit, by returning false, there is a high
+ /// chance that the register allocation will fail altogether (usually with
+ /// "ran out of registers").
+ /// That said, this error usually points to another problem in the
+ /// optimization pipeline.
+ virtual bool
+ shouldUseLastChanceRecoloringForVirtReg(const MachineFunction &MF,
+ const LiveInterval &VirtReg) const {
+ return true;
+ }
+
+ /// Deferred spilling delays the spill insertion of a virtual register
+ /// after every other allocation. By deferring the spilling, it is
+ /// sometimes possible to eliminate that spilling altogether because
+ /// something else could have been eliminated, thus leaving some space
+ /// for the virtual register.
+ /// However, this comes with a compile time impact because it adds one
+ /// more stage to the greedy register allocator.
+ /// This method is used to decide whether \p VirtReg should use the deferred
+ /// spilling stage instead of being spilled right away.
+ virtual bool
+ shouldUseDeferredSpillingForVirtReg(const MachineFunction &MF,
+ const LiveInterval &VirtReg) const {
+ return false;
+ }
+
//===--------------------------------------------------------------------===//
/// Debug information queries.
@@ -1057,7 +1057,7 @@ public:
/// Returns the physical register number of sub-register "Index"
/// for physical register RegNo. Return zero if the sub-register does not
/// exist.
- inline MCRegister getSubReg(MCRegister Reg, unsigned Idx) const {
+ inline MCRegister getSubReg(MCRegister Reg, unsigned Idx) const {
return static_cast<const MCRegisterInfo *>(this)->getSubReg(Reg, Idx);
}
};
@@ -1209,8 +1209,8 @@ public:
// This is useful when building IndexedMaps keyed on virtual registers
struct VirtReg2IndexFunctor {
- using argument_type = Register;
- unsigned operator()(Register Reg) const {
+ using argument_type = Register;
+ unsigned operator()(Register Reg) const {
return Register::virtReg2Index(Reg);
}
};
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/TargetSubtargetInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/TargetSubtargetInfo.h
index 25d84d7cf4..9d7c5055c4 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/TargetSubtargetInfo.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/TargetSubtargetInfo.h
@@ -65,8 +65,8 @@ class Triple;
///
class TargetSubtargetInfo : public MCSubtargetInfo {
protected: // Can only create subclasses...
- TargetSubtargetInfo(const Triple &TT, StringRef CPU, StringRef TuneCPU,
- StringRef FS, ArrayRef<SubtargetFeatureKV> PF,
+ TargetSubtargetInfo(const Triple &TT, StringRef CPU, StringRef TuneCPU,
+ StringRef FS, ArrayRef<SubtargetFeatureKV> PF,
ArrayRef<SubtargetSubTypeKV> PD,
const MCWriteProcResEntry *WPR,
const MCWriteLatencyEntry *WL,
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/TileShapeInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/TileShapeInfo.h
index c1d21411f8..083a3bd4e9 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/TileShapeInfo.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/TileShapeInfo.h
@@ -1,108 +1,108 @@
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-parameter"
-#endif
-
-//===- llvm/CodeGen/TileShapeInfo.h - ---------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file Shape utility for AMX.
-/// AMX hardware requires to config the shape of tile data register before use.
-/// The 2D shape includes row and column. In AMX intrinsics interface the shape
-/// is passed as 1st and 2nd parameter and they are lowered as the 1st and 2nd
-/// machine operand of AMX pseudo instructions. ShapeT class is to facilitate
-/// tile config and register allocator. The row and column are machine operand
-/// of AMX pseudo instructions.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_TILESHAPEINFO_H
-#define LLVM_CODEGEN_TILESHAPEINFO_H
-
-#include "llvm/ADT/DenseMapInfo.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineOperand.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/Register.h"
-#include <utility>
-
-namespace llvm {
-
-class ShapeT {
-public:
- ShapeT(MachineOperand *Row, MachineOperand *Col,
- const MachineRegisterInfo *MRI = nullptr)
- : Row(Row), Col(Col) {
- if (MRI)
- deduceImm(MRI);
- }
- ShapeT()
- : Row(nullptr), Col(nullptr), RowImm(InvalidImmShape),
- ColImm(InvalidImmShape) {}
- bool operator==(const ShapeT &Shape) {
- MachineOperand *R = Shape.Row;
- MachineOperand *C = Shape.Col;
- if (!R || !C)
- return false;
- if (!Row || !Col)
- return false;
- if (Row->getReg() == R->getReg() && Col->getReg() == C->getReg())
- return true;
- if ((RowImm != InvalidImmShape) && (ColImm != InvalidImmShape))
- return RowImm == Shape.getRowImm() && ColImm == Shape.getColImm();
- return false;
- }
-
- bool operator!=(const ShapeT &Shape) { return !(*this == Shape); }
-
- MachineOperand *getRow() const { return Row; }
-
- MachineOperand *getCol() const { return Col; }
-
- int64_t getRowImm() const { return RowImm; }
-
- int64_t getColImm() const { return ColImm; }
-
- bool isValid() { return (Row != nullptr) && (Col != nullptr); }
-
- void deduceImm(const MachineRegisterInfo *MRI) {
- // All def must be the same value, otherwise it is invalid MIs.
- // Find the immediate.
- // TODO copy propagation.
- auto GetImm = [&](Register Reg) {
- int64_t Imm = InvalidImmShape;
- for (const MachineOperand &DefMO : MRI->def_operands(Reg)) {
- const auto *MI = DefMO.getParent();
- if (MI->isMoveImmediate()) {
- Imm = MI->getOperand(1).getImm();
- break;
- }
- }
- return Imm;
- };
- RowImm = GetImm(Row->getReg());
- ColImm = GetImm(Col->getReg());
- }
-
-private:
- static constexpr int64_t InvalidImmShape = -1;
- MachineOperand *Row;
- MachineOperand *Col;
- int64_t RowImm;
- int64_t ColImm;
-};
-
-} // namespace llvm
-
-#endif
-
-#ifdef __GNUC__
-#pragma GCC diagnostic pop
-#endif
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/CodeGen/TileShapeInfo.h - ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file Shape utility for AMX.
+/// AMX hardware requires to config the shape of tile data register before use.
+/// The 2D shape includes row and column. In AMX intrinsics interface the shape
+/// is passed as 1st and 2nd parameter and they are lowered as the 1st and 2nd
+/// machine operand of AMX pseudo instructions. ShapeT class is to facilitate
+/// tile config and register allocator. The row and column are machine operand
+/// of AMX pseudo instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TILESHAPEINFO_H
+#define LLVM_CODEGEN_TILESHAPEINFO_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Register.h"
+#include <utility>
+
+namespace llvm {
+
+class ShapeT {
+public:
+ ShapeT(MachineOperand *Row, MachineOperand *Col,
+ const MachineRegisterInfo *MRI = nullptr)
+ : Row(Row), Col(Col) {
+ if (MRI)
+ deduceImm(MRI);
+ }
+ ShapeT()
+ : Row(nullptr), Col(nullptr), RowImm(InvalidImmShape),
+ ColImm(InvalidImmShape) {}
+ bool operator==(const ShapeT &Shape) {
+ MachineOperand *R = Shape.Row;
+ MachineOperand *C = Shape.Col;
+ if (!R || !C)
+ return false;
+ if (!Row || !Col)
+ return false;
+ if (Row->getReg() == R->getReg() && Col->getReg() == C->getReg())
+ return true;
+ if ((RowImm != InvalidImmShape) && (ColImm != InvalidImmShape))
+ return RowImm == Shape.getRowImm() && ColImm == Shape.getColImm();
+ return false;
+ }
+
+ bool operator!=(const ShapeT &Shape) { return !(*this == Shape); }
+
+ MachineOperand *getRow() const { return Row; }
+
+ MachineOperand *getCol() const { return Col; }
+
+ int64_t getRowImm() const { return RowImm; }
+
+ int64_t getColImm() const { return ColImm; }
+
+ bool isValid() { return (Row != nullptr) && (Col != nullptr); }
+
+ void deduceImm(const MachineRegisterInfo *MRI) {
+ // All def must be the same value, otherwise it is invalid MIs.
+ // Find the immediate.
+ // TODO copy propagation.
+ auto GetImm = [&](Register Reg) {
+ int64_t Imm = InvalidImmShape;
+ for (const MachineOperand &DefMO : MRI->def_operands(Reg)) {
+ const auto *MI = DefMO.getParent();
+ if (MI->isMoveImmediate()) {
+ Imm = MI->getOperand(1).getImm();
+ break;
+ }
+ }
+ return Imm;
+ };
+ RowImm = GetImm(Row->getReg());
+ ColImm = GetImm(Col->getReg());
+ }
+
+private:
+ static constexpr int64_t InvalidImmShape = -1;
+ MachineOperand *Row;
+ MachineOperand *Col;
+ int64_t RowImm;
+ int64_t ColImm;
+};
+
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/ValueTypes.h b/contrib/libs/llvm12/include/llvm/CodeGen/ValueTypes.h
index a442a4cc2d..08d16a7a1d 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/ValueTypes.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/ValueTypes.h
@@ -99,17 +99,17 @@ namespace llvm {
/// with the element type converted to an integer type with the same
/// bitwidth.
EVT changeVectorElementTypeToInteger() const {
- if (isSimple())
- return getSimpleVT().changeVectorElementTypeToInteger();
- return changeExtendedVectorElementTypeToInteger();
+ if (isSimple())
+ return getSimpleVT().changeVectorElementTypeToInteger();
+ return changeExtendedVectorElementTypeToInteger();
}
/// Return a VT for a vector type whose attributes match ourselves
/// with the exception of the element type that is chosen by the caller.
EVT changeVectorElementType(EVT EltVT) const {
- if (isSimple() && EltVT.isSimple())
- return getSimpleVT().changeVectorElementType(EltVT.getSimpleVT());
- return changeExtendedVectorElementType(EltVT);
+ if (isSimple() && EltVT.isSimple())
+ return getSimpleVT().changeVectorElementType(EltVT.getSimpleVT());
+ return changeExtendedVectorElementType(EltVT);
}
/// Return the type converted to an equivalently sized integer or vector
@@ -120,7 +120,7 @@ namespace llvm {
return changeVectorElementTypeToInteger();
if (isSimple())
- return getSimpleVT().changeTypeToInteger();
+ return getSimpleVT().changeTypeToInteger();
return changeExtendedTypeToInteger();
}
@@ -211,7 +211,7 @@ namespace llvm {
}
/// Return true if the bit size is a multiple of 8.
- bool isByteSized() const { return getSizeInBits().isKnownMultipleOf(8); }
+ bool isByteSized() const { return getSizeInBits().isKnownMultipleOf(8); }
/// Return true if the size is a power-of-two number of bytes.
bool isRound() const {
@@ -227,58 +227,58 @@ namespace llvm {
return getSizeInBits() == VT.getSizeInBits();
}
- /// Return true if we know at compile time this has more bits than VT.
- bool knownBitsGT(EVT VT) const {
- return TypeSize::isKnownGT(getSizeInBits(), VT.getSizeInBits());
- }
-
- /// Return true if we know at compile time this has more than or the same
- /// bits as VT.
- bool knownBitsGE(EVT VT) const {
- return TypeSize::isKnownGE(getSizeInBits(), VT.getSizeInBits());
- }
-
- /// Return true if we know at compile time this has fewer bits than VT.
- bool knownBitsLT(EVT VT) const {
- return TypeSize::isKnownLT(getSizeInBits(), VT.getSizeInBits());
- }
-
- /// Return true if we know at compile time this has fewer than or the same
- /// bits as VT.
- bool knownBitsLE(EVT VT) const {
- return TypeSize::isKnownLE(getSizeInBits(), VT.getSizeInBits());
- }
-
+ /// Return true if we know at compile time this has more bits than VT.
+ bool knownBitsGT(EVT VT) const {
+ return TypeSize::isKnownGT(getSizeInBits(), VT.getSizeInBits());
+ }
+
+ /// Return true if we know at compile time this has more than or the same
+ /// bits as VT.
+ bool knownBitsGE(EVT VT) const {
+ return TypeSize::isKnownGE(getSizeInBits(), VT.getSizeInBits());
+ }
+
+ /// Return true if we know at compile time this has fewer bits than VT.
+ bool knownBitsLT(EVT VT) const {
+ return TypeSize::isKnownLT(getSizeInBits(), VT.getSizeInBits());
+ }
+
+ /// Return true if we know at compile time this has fewer than or the same
+ /// bits as VT.
+ bool knownBitsLE(EVT VT) const {
+ return TypeSize::isKnownLE(getSizeInBits(), VT.getSizeInBits());
+ }
+
/// Return true if this has more bits than VT.
bool bitsGT(EVT VT) const {
if (EVT::operator==(VT)) return false;
- assert(isScalableVector() == VT.isScalableVector() &&
- "Comparison between scalable and fixed types");
- return knownBitsGT(VT);
+ assert(isScalableVector() == VT.isScalableVector() &&
+ "Comparison between scalable and fixed types");
+ return knownBitsGT(VT);
}
/// Return true if this has no less bits than VT.
bool bitsGE(EVT VT) const {
if (EVT::operator==(VT)) return true;
- assert(isScalableVector() == VT.isScalableVector() &&
- "Comparison between scalable and fixed types");
- return knownBitsGE(VT);
+ assert(isScalableVector() == VT.isScalableVector() &&
+ "Comparison between scalable and fixed types");
+ return knownBitsGE(VT);
}
/// Return true if this has less bits than VT.
bool bitsLT(EVT VT) const {
if (EVT::operator==(VT)) return false;
- assert(isScalableVector() == VT.isScalableVector() &&
- "Comparison between scalable and fixed types");
- return knownBitsLT(VT);
+ assert(isScalableVector() == VT.isScalableVector() &&
+ "Comparison between scalable and fixed types");
+ return knownBitsLT(VT);
}
/// Return true if this has no more bits than VT.
bool bitsLE(EVT VT) const {
if (EVT::operator==(VT)) return true;
- assert(isScalableVector() == VT.isScalableVector() &&
- "Comparison between scalable and fixed types");
- return knownBitsLE(VT);
+ assert(isScalableVector() == VT.isScalableVector() &&
+ "Comparison between scalable and fixed types");
+ return knownBitsLE(VT);
}
/// Return the SimpleValueType held in the specified simple EVT.
@@ -310,7 +310,7 @@ namespace llvm {
if (isScalableVector())
WithColor::warning()
<< "Possible incorrect use of EVT::getVectorNumElements() for "
- "scalable vector. Scalable flag may be dropped, use "
+ "scalable vector. Scalable flag may be dropped, use "
"EVT::getVectorElementCount() instead\n";
#endif
if (isSimple())
@@ -329,7 +329,7 @@ namespace llvm {
/// Given a vector type, return the minimum number of elements it contains.
unsigned getVectorMinNumElements() const {
- return getVectorElementCount().getKnownMinValue();
+ return getVectorElementCount().getKnownMinValue();
}
/// Return the size of the specified value type in bits.
@@ -343,16 +343,16 @@ namespace llvm {
return getExtendedSizeInBits();
}
- /// Return the size of the specified fixed width value type in bits. The
- /// function will assert if the type is scalable.
- uint64_t getFixedSizeInBits() const {
- return getSizeInBits().getFixedSize();
- }
-
- uint64_t getScalarSizeInBits() const {
- return getScalarType().getSizeInBits().getFixedSize();
+ /// Return the size of the specified fixed width value type in bits. The
+ /// function will assert if the type is scalable.
+ uint64_t getFixedSizeInBits() const {
+ return getSizeInBits().getFixedSize();
}
+ uint64_t getScalarSizeInBits() const {
+ return getScalarType().getSizeInBits().getFixedSize();
+ }
+
/// Return the number of bytes overwritten by a store of the specified value
/// type.
///
@@ -414,19 +414,19 @@ namespace llvm {
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const {
EVT EltVT = getVectorElementType();
auto EltCnt = getVectorElementCount();
- assert(EltCnt.isKnownEven() && "Splitting vector, but not in half!");
- return EVT::getVectorVT(Context, EltVT, EltCnt.divideCoefficientBy(2));
- }
-
- // Return a VT for a vector type with the same element type but
- // double the number of elements. The type returned may be an
- // extended type.
- EVT getDoubleNumVectorElementsVT(LLVMContext &Context) const {
- EVT EltVT = getVectorElementType();
- auto EltCnt = getVectorElementCount();
- return EVT::getVectorVT(Context, EltVT, EltCnt * 2);
- }
-
+ assert(EltCnt.isKnownEven() && "Splitting vector, but not in half!");
+ return EVT::getVectorVT(Context, EltVT, EltCnt.divideCoefficientBy(2));
+ }
+
+ // Return a VT for a vector type with the same element type but
+ // double the number of elements. The type returned may be an
+ // extended type.
+ EVT getDoubleNumVectorElementsVT(LLVMContext &Context) const {
+ EVT EltVT = getVectorElementType();
+ auto EltCnt = getVectorElementCount();
+ return EVT::getVectorVT(Context, EltVT, EltCnt * 2);
+ }
+
/// Returns true if the given vector is a power of 2.
bool isPow2VectorType() const {
unsigned NElts = getVectorMinNumElements();
@@ -438,8 +438,8 @@ namespace llvm {
EVT getPow2VectorType(LLVMContext &Context) const {
if (!isPow2VectorType()) {
ElementCount NElts = getVectorElementCount();
- unsigned NewMinCount = 1 << Log2_32_Ceil(NElts.getKnownMinValue());
- NElts = ElementCount::get(NewMinCount, NElts.isScalable());
+ unsigned NewMinCount = 1 << Log2_32_Ceil(NElts.getKnownMinValue());
+ NElts = ElementCount::get(NewMinCount, NElts.isScalable());
return EVT::getVectorVT(Context, getVectorElementType(), NElts);
}
else {
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/ValueTypes.td b/contrib/libs/llvm12/include/llvm/CodeGen/ValueTypes.td
index d13d0a7772..c138d52700 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/ValueTypes.td
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/ValueTypes.td
@@ -87,117 +87,117 @@ def v4i64 : ValueType<256, 60>; // 4 x i64 vector value
def v8i64 : ValueType<512, 61>; // 8 x i64 vector value
def v16i64 : ValueType<1024,62>; // 16 x i64 vector value
def v32i64 : ValueType<2048,63>; // 32 x i64 vector value
-def v64i64 : ValueType<4096,64>; // 64 x i64 vector value
-def v128i64: ValueType<8192,65>; // 128 x i64 vector value
-def v256i64: ValueType<16384,66>; // 256 x i64 vector value
-
-def v1i128 : ValueType<128, 67>; // 1 x i128 vector value
-
-def v2f16 : ValueType<32 , 68>; // 2 x f16 vector value
-def v3f16 : ValueType<48 , 69>; // 3 x f16 vector value
-def v4f16 : ValueType<64 , 70>; // 4 x f16 vector value
-def v8f16 : ValueType<128, 71>; // 8 x f16 vector value
-def v16f16 : ValueType<256, 72>; // 16 x f16 vector value
-def v32f16 : ValueType<512, 73>; // 32 x f16 vector value
-def v64f16 : ValueType<1024, 74>; // 64 x f16 vector value
-def v128f16 : ValueType<2048, 75>; // 128 x f16 vector value
-def v2bf16 : ValueType<32 , 76>; // 2 x bf16 vector value
-def v3bf16 : ValueType<48 , 77>; // 3 x bf16 vector value
-def v4bf16 : ValueType<64 , 78>; // 4 x bf16 vector value
-def v8bf16 : ValueType<128, 79>; // 8 x bf16 vector value
-def v16bf16 : ValueType<256, 80>; // 16 x bf16 vector value
-def v32bf16 : ValueType<512, 81>; // 32 x bf16 vector value
-def v64bf16 : ValueType<1024, 82>; // 64 x bf16 vector value
-def v128bf16 : ValueType<2048, 83>; // 128 x bf16 vector value
-def v1f32 : ValueType<32 , 84>; // 1 x f32 vector value
-def v2f32 : ValueType<64 , 85>; // 2 x f32 vector value
-def v3f32 : ValueType<96 , 86>; // 3 x f32 vector value
-def v4f32 : ValueType<128, 87>; // 4 x f32 vector value
-def v5f32 : ValueType<160, 88>; // 5 x f32 vector value
-def v8f32 : ValueType<256, 89>; // 8 x f32 vector value
-def v16f32 : ValueType<512, 90>; // 16 x f32 vector value
-def v32f32 : ValueType<1024, 91>; // 32 x f32 vector value
-def v64f32 : ValueType<2048, 92>; // 64 x f32 vector value
-def v128f32 : ValueType<4096, 93>; // 128 x f32 vector value
-def v256f32 : ValueType<8182, 94>; // 256 x f32 vector value
-def v512f32 : ValueType<16384, 95>; // 512 x f32 vector value
-def v1024f32 : ValueType<32768, 96>; // 1024 x f32 vector value
-def v2048f32 : ValueType<65536, 97>; // 2048 x f32 vector value
-def v1f64 : ValueType<64, 98>; // 1 x f64 vector value
-def v2f64 : ValueType<128, 99>; // 2 x f64 vector value
-def v4f64 : ValueType<256, 100>; // 4 x f64 vector value
-def v8f64 : ValueType<512, 101>; // 8 x f64 vector value
-def v16f64 : ValueType<1024, 102>; // 16 x f64 vector value
-def v32f64 : ValueType<2048, 103>; // 32 x f64 vector value
-def v64f64 : ValueType<4096, 104>; // 64 x f64 vector value
-def v128f64 : ValueType<8192, 105>; // 128 x f64 vector value
-def v256f64 : ValueType<16384, 106>; // 256 x f64 vector value
-
-def nxv1i1 : ValueType<1, 107>; // n x 1 x i1 vector value
-def nxv2i1 : ValueType<2, 108>; // n x 2 x i1 vector value
-def nxv4i1 : ValueType<4, 109>; // n x 4 x i1 vector value
-def nxv8i1 : ValueType<8, 110>; // n x 8 x i1 vector value
-def nxv16i1 : ValueType<16, 111>; // n x 16 x i1 vector value
-def nxv32i1 : ValueType<32, 112>; // n x 32 x i1 vector value
-def nxv64i1 : ValueType<64,113>; // n x 64 x i1 vector value
-
-def nxv1i8 : ValueType<8, 114>; // n x 1 x i8 vector value
-def nxv2i8 : ValueType<16, 115>; // n x 2 x i8 vector value
-def nxv4i8 : ValueType<32, 116>; // n x 4 x i8 vector value
-def nxv8i8 : ValueType<64, 117>; // n x 8 x i8 vector value
-def nxv16i8 : ValueType<128, 118>; // n x 16 x i8 vector value
-def nxv32i8 : ValueType<256, 119>; // n x 32 x i8 vector value
-def nxv64i8 : ValueType<512, 120>; // n x 64 x i8 vector value
-
-def nxv1i16 : ValueType<16, 121>; // n x 1 x i16 vector value
-def nxv2i16 : ValueType<32, 122>; // n x 2 x i16 vector value
-def nxv4i16 : ValueType<64, 123>; // n x 4 x i16 vector value
-def nxv8i16 : ValueType<128, 124>; // n x 8 x i16 vector value
-def nxv16i16: ValueType<256, 125>; // n x 16 x i16 vector value
-def nxv32i16: ValueType<512, 126>; // n x 32 x i16 vector value
-
-def nxv1i32 : ValueType<32, 127>; // n x 1 x i32 vector value
-def nxv2i32 : ValueType<64, 128>; // n x 2 x i32 vector value
-def nxv4i32 : ValueType<128, 129>; // n x 4 x i32 vector value
-def nxv8i32 : ValueType<256, 130>; // n x 8 x i32 vector value
-def nxv16i32: ValueType<512, 131>; // n x 16 x i32 vector value
-def nxv32i32: ValueType<1024,132>; // n x 32 x i32 vector value
-
-def nxv1i64 : ValueType<64, 133>; // n x 1 x i64 vector value
-def nxv2i64 : ValueType<128, 134>; // n x 2 x i64 vector value
-def nxv4i64 : ValueType<256, 135>; // n x 4 x i64 vector value
-def nxv8i64 : ValueType<512, 136>; // n x 8 x i64 vector value
-def nxv16i64: ValueType<1024,137>; // n x 16 x i64 vector value
-def nxv32i64: ValueType<2048,138>; // n x 32 x i64 vector value
-
-def nxv1f16 : ValueType<32, 139>; // n x 1 x f16 vector value
-def nxv2f16 : ValueType<32 , 140>; // n x 2 x f16 vector value
-def nxv4f16 : ValueType<64 , 141>; // n x 4 x f16 vector value
-def nxv8f16 : ValueType<128, 142>; // n x 8 x f16 vector value
-def nxv16f16 : ValueType<256,143>; // n x 16 x f16 vector value
-def nxv32f16 : ValueType<512,144>; // n x 32 x f16 vector value
-def nxv2bf16 : ValueType<32 , 145>; // n x 2 x bf16 vector value
-def nxv4bf16 : ValueType<64 , 146>; // n x 4 x bf16 vector value
-def nxv8bf16 : ValueType<128, 147>; // n x 8 x bf16 vector value
-def nxv1f32 : ValueType<32 , 148>; // n x 1 x f32 vector value
-def nxv2f32 : ValueType<64 , 149>; // n x 2 x f32 vector value
-def nxv4f32 : ValueType<128, 150>; // n x 4 x f32 vector value
-def nxv8f32 : ValueType<256, 151>; // n x 8 x f32 vector value
-def nxv16f32 : ValueType<512, 152>; // n x 16 x f32 vector value
-def nxv1f64 : ValueType<64, 153>; // n x 1 x f64 vector value
-def nxv2f64 : ValueType<128, 154>; // n x 2 x f64 vector value
-def nxv4f64 : ValueType<256, 155>; // n x 4 x f64 vector value
-def nxv8f64 : ValueType<512, 156>; // n x 8 x f64 vector value
-
-def x86mmx : ValueType<64 , 157>; // X86 MMX value
-def FlagVT : ValueType<0 , 158>; // Pre-RA sched glue
-def isVoid : ValueType<0 , 159>; // Produces no value
-def untyped: ValueType<8 , 160>; // Produces an untyped value
-def funcref : ValueType<0 , 161>; // WebAssembly's funcref type
-def externref : ValueType<0 , 162>; // WebAssembly's externref type
-def x86amx : ValueType<8192, 163>; // X86 AMX value
-
-
+def v64i64 : ValueType<4096,64>; // 64 x i64 vector value
+def v128i64: ValueType<8192,65>; // 128 x i64 vector value
+def v256i64: ValueType<16384,66>; // 256 x i64 vector value
+
+def v1i128 : ValueType<128, 67>; // 1 x i128 vector value
+
+def v2f16 : ValueType<32 , 68>; // 2 x f16 vector value
+def v3f16 : ValueType<48 , 69>; // 3 x f16 vector value
+def v4f16 : ValueType<64 , 70>; // 4 x f16 vector value
+def v8f16 : ValueType<128, 71>; // 8 x f16 vector value
+def v16f16 : ValueType<256, 72>; // 16 x f16 vector value
+def v32f16 : ValueType<512, 73>; // 32 x f16 vector value
+def v64f16 : ValueType<1024, 74>; // 64 x f16 vector value
+def v128f16 : ValueType<2048, 75>; // 128 x f16 vector value
+def v2bf16 : ValueType<32 , 76>; // 2 x bf16 vector value
+def v3bf16 : ValueType<48 , 77>; // 3 x bf16 vector value
+def v4bf16 : ValueType<64 , 78>; // 4 x bf16 vector value
+def v8bf16 : ValueType<128, 79>; // 8 x bf16 vector value
+def v16bf16 : ValueType<256, 80>; // 16 x bf16 vector value
+def v32bf16 : ValueType<512, 81>; // 32 x bf16 vector value
+def v64bf16 : ValueType<1024, 82>; // 64 x bf16 vector value
+def v128bf16 : ValueType<2048, 83>; // 128 x bf16 vector value
+def v1f32 : ValueType<32 , 84>; // 1 x f32 vector value
+def v2f32 : ValueType<64 , 85>; // 2 x f32 vector value
+def v3f32 : ValueType<96 , 86>; // 3 x f32 vector value
+def v4f32 : ValueType<128, 87>; // 4 x f32 vector value
+def v5f32 : ValueType<160, 88>; // 5 x f32 vector value
+def v8f32 : ValueType<256, 89>; // 8 x f32 vector value
+def v16f32 : ValueType<512, 90>; // 16 x f32 vector value
+def v32f32 : ValueType<1024, 91>; // 32 x f32 vector value
+def v64f32 : ValueType<2048, 92>; // 64 x f32 vector value
+def v128f32 : ValueType<4096, 93>; // 128 x f32 vector value
+def v256f32 : ValueType<8182, 94>; // 256 x f32 vector value
+def v512f32 : ValueType<16384, 95>; // 512 x f32 vector value
+def v1024f32 : ValueType<32768, 96>; // 1024 x f32 vector value
+def v2048f32 : ValueType<65536, 97>; // 2048 x f32 vector value
+def v1f64 : ValueType<64, 98>; // 1 x f64 vector value
+def v2f64 : ValueType<128, 99>; // 2 x f64 vector value
+def v4f64 : ValueType<256, 100>; // 4 x f64 vector value
+def v8f64 : ValueType<512, 101>; // 8 x f64 vector value
+def v16f64 : ValueType<1024, 102>; // 16 x f64 vector value
+def v32f64 : ValueType<2048, 103>; // 32 x f64 vector value
+def v64f64 : ValueType<4096, 104>; // 64 x f64 vector value
+def v128f64 : ValueType<8192, 105>; // 128 x f64 vector value
+def v256f64 : ValueType<16384, 106>; // 256 x f64 vector value
+
+def nxv1i1 : ValueType<1, 107>; // n x 1 x i1 vector value
+def nxv2i1 : ValueType<2, 108>; // n x 2 x i1 vector value
+def nxv4i1 : ValueType<4, 109>; // n x 4 x i1 vector value
+def nxv8i1 : ValueType<8, 110>; // n x 8 x i1 vector value
+def nxv16i1 : ValueType<16, 111>; // n x 16 x i1 vector value
+def nxv32i1 : ValueType<32, 112>; // n x 32 x i1 vector value
+def nxv64i1 : ValueType<64,113>; // n x 64 x i1 vector value
+
+def nxv1i8 : ValueType<8, 114>; // n x 1 x i8 vector value
+def nxv2i8 : ValueType<16, 115>; // n x 2 x i8 vector value
+def nxv4i8 : ValueType<32, 116>; // n x 4 x i8 vector value
+def nxv8i8 : ValueType<64, 117>; // n x 8 x i8 vector value
+def nxv16i8 : ValueType<128, 118>; // n x 16 x i8 vector value
+def nxv32i8 : ValueType<256, 119>; // n x 32 x i8 vector value
+def nxv64i8 : ValueType<512, 120>; // n x 64 x i8 vector value
+
+def nxv1i16 : ValueType<16, 121>; // n x 1 x i16 vector value
+def nxv2i16 : ValueType<32, 122>; // n x 2 x i16 vector value
+def nxv4i16 : ValueType<64, 123>; // n x 4 x i16 vector value
+def nxv8i16 : ValueType<128, 124>; // n x 8 x i16 vector value
+def nxv16i16: ValueType<256, 125>; // n x 16 x i16 vector value
+def nxv32i16: ValueType<512, 126>; // n x 32 x i16 vector value
+
+def nxv1i32 : ValueType<32, 127>; // n x 1 x i32 vector value
+def nxv2i32 : ValueType<64, 128>; // n x 2 x i32 vector value
+def nxv4i32 : ValueType<128, 129>; // n x 4 x i32 vector value
+def nxv8i32 : ValueType<256, 130>; // n x 8 x i32 vector value
+def nxv16i32: ValueType<512, 131>; // n x 16 x i32 vector value
+def nxv32i32: ValueType<1024,132>; // n x 32 x i32 vector value
+
+def nxv1i64 : ValueType<64, 133>; // n x 1 x i64 vector value
+def nxv2i64 : ValueType<128, 134>; // n x 2 x i64 vector value
+def nxv4i64 : ValueType<256, 135>; // n x 4 x i64 vector value
+def nxv8i64 : ValueType<512, 136>; // n x 8 x i64 vector value
+def nxv16i64: ValueType<1024,137>; // n x 16 x i64 vector value
+def nxv32i64: ValueType<2048,138>; // n x 32 x i64 vector value
+
+def nxv1f16 : ValueType<32, 139>; // n x 1 x f16 vector value
+def nxv2f16 : ValueType<32 , 140>; // n x 2 x f16 vector value
+def nxv4f16 : ValueType<64 , 141>; // n x 4 x f16 vector value
+def nxv8f16 : ValueType<128, 142>; // n x 8 x f16 vector value
+def nxv16f16 : ValueType<256,143>; // n x 16 x f16 vector value
+def nxv32f16 : ValueType<512,144>; // n x 32 x f16 vector value
+def nxv2bf16 : ValueType<32 , 145>; // n x 2 x bf16 vector value
+def nxv4bf16 : ValueType<64 , 146>; // n x 4 x bf16 vector value
+def nxv8bf16 : ValueType<128, 147>; // n x 8 x bf16 vector value
+def nxv1f32 : ValueType<32 , 148>; // n x 1 x f32 vector value
+def nxv2f32 : ValueType<64 , 149>; // n x 2 x f32 vector value
+def nxv4f32 : ValueType<128, 150>; // n x 4 x f32 vector value
+def nxv8f32 : ValueType<256, 151>; // n x 8 x f32 vector value
+def nxv16f32 : ValueType<512, 152>; // n x 16 x f32 vector value
+def nxv1f64 : ValueType<64, 153>; // n x 1 x f64 vector value
+def nxv2f64 : ValueType<128, 154>; // n x 2 x f64 vector value
+def nxv4f64 : ValueType<256, 155>; // n x 4 x f64 vector value
+def nxv8f64 : ValueType<512, 156>; // n x 8 x f64 vector value
+
+def x86mmx : ValueType<64 , 157>; // X86 MMX value
+def FlagVT : ValueType<0 , 158>; // Pre-RA sched glue
+def isVoid : ValueType<0 , 159>; // Produces no value
+def untyped: ValueType<8 , 160>; // Produces an untyped value
+def funcref : ValueType<0 , 161>; // WebAssembly's funcref type
+def externref : ValueType<0 , 162>; // WebAssembly's externref type
+def x86amx : ValueType<8192, 163>; // X86 AMX value
+
+
def token : ValueType<0 , 248>; // TokenTy
def MetadataVT: ValueType<0, 249>; // Metadata
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/VirtRegMap.h b/contrib/libs/llvm12/include/llvm/CodeGen/VirtRegMap.h
index b4e39214c9..bd0ff64222 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/VirtRegMap.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/VirtRegMap.h
@@ -26,7 +26,7 @@
#include "llvm/ADT/IndexedMap.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
-#include "llvm/CodeGen/TileShapeInfo.h"
+#include "llvm/CodeGen/TileShapeInfo.h"
#include "llvm/Pass.h"
#include <cassert>
@@ -68,10 +68,10 @@ class TargetInstrInfo;
/// mapping.
IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2SplitMap;
- /// Virt2ShapeMap - For X86 AMX register whose register is bound shape
- /// information.
- DenseMap<unsigned, ShapeT> Virt2ShapeMap;
-
+ /// Virt2ShapeMap - For X86 AMX register whose register is bound shape
+ /// information.
+ DenseMap<unsigned, ShapeT> Virt2ShapeMap;
+
/// createSpillSlot - Allocate a spill slot for RC from MFI.
unsigned createSpillSlot(const TargetRegisterClass *RC);
@@ -110,30 +110,30 @@ class TargetInstrInfo;
/// returns the physical register mapped to the specified
/// virtual register
- MCRegister getPhys(Register virtReg) const {
+ MCRegister getPhys(Register virtReg) const {
assert(virtReg.isVirtual());
- return MCRegister::from(Virt2PhysMap[virtReg.id()]);
+ return MCRegister::from(Virt2PhysMap[virtReg.id()]);
}
/// creates a mapping for the specified virtual register to
/// the specified physical register
void assignVirt2Phys(Register virtReg, MCPhysReg physReg);
- bool isShapeMapEmpty() const { return Virt2ShapeMap.empty(); }
-
- bool hasShape(Register virtReg) const {
- return getShape(virtReg).isValid();
- }
-
- ShapeT getShape(Register virtReg) const {
- assert(virtReg.isVirtual());
- return Virt2ShapeMap.lookup(virtReg);
- }
-
- void assignVirt2Shape(Register virtReg, ShapeT shape) {
- Virt2ShapeMap[virtReg.id()] = shape;
- }
-
+ bool isShapeMapEmpty() const { return Virt2ShapeMap.empty(); }
+
+ bool hasShape(Register virtReg) const {
+ return getShape(virtReg).isValid();
+ }
+
+ ShapeT getShape(Register virtReg) const {
+ assert(virtReg.isVirtual());
+ return Virt2ShapeMap.lookup(virtReg);
+ }
+
+ void assignVirt2Shape(Register virtReg, ShapeT shape) {
+ Virt2ShapeMap[virtReg.id()] = shape;
+ }
+
/// clears the specified virtual register's, physical
/// register mapping
void clearVirt(Register virtReg) {
@@ -158,15 +158,15 @@ class TargetInstrInfo;
bool hasKnownPreference(Register VirtReg);
/// records virtReg is a split live interval from SReg.
- void setIsSplitFromReg(Register virtReg, Register SReg) {
+ void setIsSplitFromReg(Register virtReg, Register SReg) {
Virt2SplitMap[virtReg.id()] = SReg;
- if (hasShape(SReg)) {
- Virt2ShapeMap[virtReg.id()] = getShape(SReg);
- }
+ if (hasShape(SReg)) {
+ Virt2ShapeMap[virtReg.id()] = getShape(SReg);
+ }
}
/// returns the live interval virtReg is split from.
- Register getPreSplitReg(Register virtReg) const {
+ Register getPreSplitReg(Register virtReg) const {
return Virt2SplitMap[virtReg.id()];
}
@@ -174,8 +174,8 @@ class TargetInstrInfo;
/// from through splitting.
/// A register that was not created by splitting is its own original.
/// This operation is idempotent.
- Register getOriginal(Register VirtReg) const {
- Register Orig = getPreSplitReg(VirtReg);
+ Register getOriginal(Register VirtReg) const {
+ Register Orig = getPreSplitReg(VirtReg);
return Orig ? Orig : VirtReg;
}
diff --git a/contrib/libs/llvm12/include/llvm/CodeGen/WasmEHFuncInfo.h b/contrib/libs/llvm12/include/llvm/CodeGen/WasmEHFuncInfo.h
index 1e5377d30f..7ecb43d534 100644
--- a/contrib/libs/llvm12/include/llvm/CodeGen/WasmEHFuncInfo.h
+++ b/contrib/libs/llvm12/include/llvm/CodeGen/WasmEHFuncInfo.h
@@ -29,9 +29,9 @@ class BasicBlock;
class Function;
class MachineBasicBlock;
-namespace WebAssembly {
+namespace WebAssembly {
enum EventTag { CPP_EXCEPTION = 0, C_LONGJMP = 1 };
-}
+}
using BBOrMBB = PointerUnion<const BasicBlock *, MachineBasicBlock *>;