aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm12/include/llvm/Analysis
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.ru>2022-02-10 16:44:30 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:30 +0300
commit2598ef1d0aee359b4b6d5fdd1758916d5907d04f (patch)
tree012bb94d777798f1f56ac1cec429509766d05181 /contrib/libs/llvm12/include/llvm/Analysis
parent6751af0b0c1b952fede40b19b71da8025b5d8bcf (diff)
downloadydb-2598ef1d0aee359b4b6d5fdd1758916d5907d04f.tar.gz
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/libs/llvm12/include/llvm/Analysis')
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/AliasAnalysis.h96
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/AliasSetTracker.h42
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/AssumptionCache.h2
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/BasicAliasAnalysis.h94
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/BlockFrequencyInfoImpl.h12
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/BranchProbabilityInfo.h490
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/CFGPrinter.h32
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/CGSCCPassManager.h110
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/CaptureTracking.h16
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/CodeMetrics.h4
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/ConstantFolding.h10
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/ConstraintSystem.h198
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/DDG.h54
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/DDGPrinter.h204
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/Delinearization.h88
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/DemandedBits.h28
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/DivergenceAnalysis.h62
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/EHPersonalities.h12
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/FunctionPropertiesAnalysis.h194
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/IRSimilarityIdentifier.h1600
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/IVDescriptors.h160
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/InlineAdvisor.h130
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/InlineSizeEstimatorAnalysis.h22
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/InstCount.h78
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/InstructionSimplify.h56
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/IntervalIterator.h4
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/IteratedDominanceFrontier.h2
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/LazyBranchProbabilityInfo.h2
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/LazyCallGraph.h56
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/LazyValueInfo.h18
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/Lint.h28
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/Loads.h18
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/LoopAccessAnalysis.h26
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/LoopAnalysisManager.h2
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/LoopCacheAnalysis.h30
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/LoopInfo.h38
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/LoopInfoImpl.h30
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/LoopNestAnalysis.h30
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/MLInlineAdvisor.h14
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/MemDerefPrinter.h70
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/MemoryDependenceAnalysis.h2
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/MemoryLocation.h104
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/MemorySSA.h82
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/MemorySSAUpdater.h10
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/ModuleDebugInfoPrinter.h80
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/MustExecute.h38
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/ObjCARCAnalysisUtils.h18
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/OptimizationRemarkEmitter.h16
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/PhiValues.h6
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/ProfileSummaryInfo.h6
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/RegionInfoImpl.h8
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/ReplayInlineAdvisor.h104
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/ScalarEvolution.h418
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/ScalarEvolutionDivision.h2
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/ScalarEvolutionExpressions.h192
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/SparsePropagation.h2
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/StackLifetime.h12
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/StackSafetyAnalysis.h4
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/SyncDependenceAnalysis.h56
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/TargetLibraryInfo.def42
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/TargetLibraryInfo.h2
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/TargetTransformInfo.h488
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/TargetTransformInfoImpl.h412
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h246
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/Utils/Local.h78
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/Utils/TFUtils.h356
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/ValueLattice.h22
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/ValueTracking.h150
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/VecFuncs.def292
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/VectorUtils.h60
70 files changed, 3735 insertions, 3735 deletions
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/AliasAnalysis.h b/contrib/libs/llvm12/include/llvm/Analysis/AliasAnalysis.h
index adf34c62d7..bfd269831d 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/AliasAnalysis.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/AliasAnalysis.h
@@ -59,17 +59,17 @@
namespace llvm {
class AnalysisUsage;
-class AtomicCmpXchgInst;
+class AtomicCmpXchgInst;
class BasicAAResult;
class BasicBlock;
-class CatchPadInst;
-class CatchReturnInst;
+class CatchPadInst;
+class CatchReturnInst;
class DominatorTree;
-class FenceInst;
-class Function;
-class InvokeInst;
-class PreservedAnalyses;
-class TargetLibraryInfo;
+class FenceInst;
+class Function;
+class InvokeInst;
+class PreservedAnalyses;
+class TargetLibraryInfo;
class Value;
/// The possible results of an alias query.
@@ -353,28 +353,28 @@ createModRefInfo(const FunctionModRefBehavior FMRB) {
class AAQueryInfo {
public:
using LocPair = std::pair<MemoryLocation, MemoryLocation>;
- struct CacheEntry {
- AliasResult Result;
- /// Number of times a NoAlias assumption has been used.
- /// 0 for assumptions that have not been used, -1 for definitive results.
- int NumAssumptionUses;
- /// Whether this is a definitive (non-assumption) result.
- bool isDefinitive() const { return NumAssumptionUses < 0; }
- };
- using AliasCacheT = SmallDenseMap<LocPair, CacheEntry, 8>;
+ struct CacheEntry {
+ AliasResult Result;
+ /// Number of times a NoAlias assumption has been used.
+ /// 0 for assumptions that have not been used, -1 for definitive results.
+ int NumAssumptionUses;
+ /// Whether this is a definitive (non-assumption) result.
+ bool isDefinitive() const { return NumAssumptionUses < 0; }
+ };
+ using AliasCacheT = SmallDenseMap<LocPair, CacheEntry, 8>;
AliasCacheT AliasCache;
using IsCapturedCacheT = SmallDenseMap<const Value *, bool, 8>;
IsCapturedCacheT IsCapturedCache;
- /// How many active NoAlias assumption uses there are.
- int NumAssumptionUses = 0;
-
- /// Location pairs for which an assumption based result is currently stored.
- /// Used to remove all potentially incorrect results from the cache if an
- /// assumption is disproven.
- SmallVector<AAQueryInfo::LocPair, 4> AssumptionBasedResults;
-
+ /// How many active NoAlias assumption uses there are.
+ int NumAssumptionUses = 0;
+
+ /// Location pairs for which an assumption based result is currently stored.
+ /// Used to remove all potentially incorrect results from the cache if an
+ /// assumption is disproven.
+ SmallVector<AAQueryInfo::LocPair, 4> AssumptionBasedResults;
+
AAQueryInfo() : AliasCache(), IsCapturedCache() {}
};
@@ -428,8 +428,8 @@ public:
/// A convenience wrapper around the primary \c alias interface.
AliasResult alias(const Value *V1, const Value *V2) {
- return alias(MemoryLocation::getBeforeOrAfter(V1),
- MemoryLocation::getBeforeOrAfter(V2));
+ return alias(MemoryLocation::getBeforeOrAfter(V1),
+ MemoryLocation::getBeforeOrAfter(V2));
}
/// A trivial helper function to check to see if the specified pointers are
@@ -446,8 +446,8 @@ public:
/// A convenience wrapper around the \c isNoAlias helper interface.
bool isNoAlias(const Value *V1, const Value *V2) {
- return isNoAlias(MemoryLocation::getBeforeOrAfter(V1),
- MemoryLocation::getBeforeOrAfter(V2));
+ return isNoAlias(MemoryLocation::getBeforeOrAfter(V1),
+ MemoryLocation::getBeforeOrAfter(V2));
}
/// A trivial helper function to check to see if the specified pointers are
@@ -469,7 +469,7 @@ public:
/// A convenience wrapper around the primary \c pointsToConstantMemory
/// interface.
bool pointsToConstantMemory(const Value *P, bool OrLocal = false) {
- return pointsToConstantMemory(MemoryLocation::getBeforeOrAfter(P), OrLocal);
+ return pointsToConstantMemory(MemoryLocation::getBeforeOrAfter(P), OrLocal);
}
/// @}
@@ -562,7 +562,7 @@ public:
/// write at most from objects pointed to by their pointer-typed arguments
/// (with arbitrary offsets).
static bool onlyAccessesArgPointees(FunctionModRefBehavior MRB) {
- return !((unsigned)MRB & FMRL_Anywhere & ~FMRL_ArgumentPointees);
+ return !((unsigned)MRB & FMRL_Anywhere & ~FMRL_ArgumentPointees);
}
/// Checks if functions with the specified behavior are known to potentially
@@ -570,27 +570,27 @@ public:
/// (with arbitrary offsets).
static bool doesAccessArgPointees(FunctionModRefBehavior MRB) {
return isModOrRefSet(createModRefInfo(MRB)) &&
- ((unsigned)MRB & FMRL_ArgumentPointees);
+ ((unsigned)MRB & FMRL_ArgumentPointees);
}
/// Checks if functions with the specified behavior are known to read and
/// write at most from memory that is inaccessible from LLVM IR.
static bool onlyAccessesInaccessibleMem(FunctionModRefBehavior MRB) {
- return !((unsigned)MRB & FMRL_Anywhere & ~FMRL_InaccessibleMem);
+ return !((unsigned)MRB & FMRL_Anywhere & ~FMRL_InaccessibleMem);
}
/// Checks if functions with the specified behavior are known to potentially
/// read or write from memory that is inaccessible from LLVM IR.
static bool doesAccessInaccessibleMem(FunctionModRefBehavior MRB) {
- return isModOrRefSet(createModRefInfo(MRB)) &&
- ((unsigned)MRB & FMRL_InaccessibleMem);
+ return isModOrRefSet(createModRefInfo(MRB)) &&
+ ((unsigned)MRB & FMRL_InaccessibleMem);
}
/// Checks if functions with the specified behavior are known to read and
/// write at most from memory that is inaccessible from LLVM IR or objects
/// pointed to by their pointer-typed arguments (with arbitrary offsets).
static bool onlyAccessesInaccessibleOrArgMem(FunctionModRefBehavior MRB) {
- return !((unsigned)MRB & FMRL_Anywhere &
+ return !((unsigned)MRB & FMRL_Anywhere &
~(FMRL_InaccessibleMem | FMRL_ArgumentPointees));
}
@@ -790,7 +790,7 @@ private:
AAQueryInfo &AAQI);
ModRefInfo getModRefInfo(const Instruction *I,
const Optional<MemoryLocation> &OptLoc,
- AAQueryInfo &AAQIP);
+ AAQueryInfo &AAQIP);
class Concept;
@@ -804,9 +804,9 @@ private:
std::vector<AnalysisKey *> AADeps;
- /// Query depth used to distinguish recursive queries.
- unsigned Depth = 0;
-
+ /// Query depth used to distinguish recursive queries.
+ unsigned Depth = 0;
+
friend class BatchAAResults;
};
@@ -847,13 +847,13 @@ public:
FunctionModRefBehavior getModRefBehavior(const CallBase *Call) {
return AA.getModRefBehavior(Call);
}
- bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
- return alias(LocA, LocB) == MustAlias;
- }
- bool isMustAlias(const Value *V1, const Value *V2) {
- return alias(MemoryLocation(V1, LocationSize::precise(1)),
- MemoryLocation(V2, LocationSize::precise(1))) == MustAlias;
- }
+ bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
+ return alias(LocA, LocB) == MustAlias;
+ }
+ bool isMustAlias(const Value *V1, const Value *V2) {
+ return alias(MemoryLocation(V1, LocationSize::precise(1)),
+ MemoryLocation(V2, LocationSize::precise(1))) == MustAlias;
+ }
};
/// Temporary typedef for legacy code that uses a generic \c AliasAnalysis
@@ -1161,7 +1161,7 @@ public:
ResultGetters.push_back(&getModuleAAResultImpl<AnalysisT>);
}
- Result run(Function &F, FunctionAnalysisManager &AM);
+ Result run(Function &F, FunctionAnalysisManager &AM);
private:
friend AnalysisInfoMixin<AAManager>;
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/AliasSetTracker.h b/contrib/libs/llvm12/include/llvm/Analysis/AliasSetTracker.h
index cdb54d310d..fbd2e49f90 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/AliasSetTracker.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/AliasSetTracker.h
@@ -27,10 +27,10 @@
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
-#include "llvm/Analysis/MemoryLocation.h"
+#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Metadata.h"
-#include "llvm/IR/PassManager.h"
+#include "llvm/IR/PassManager.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Casting.h"
#include <cassert>
@@ -41,7 +41,7 @@
namespace llvm {
-class AAResults;
+class AAResults;
class AliasSetTracker;
class BasicBlock;
class LoadInst;
@@ -54,8 +54,8 @@ class StoreInst;
class VAArgInst;
class Value;
-enum AliasResult : uint8_t;
-
+enum AliasResult : uint8_t;
+
class AliasSet : public ilist_node<AliasSet> {
friend class AliasSetTracker;
@@ -304,7 +304,7 @@ private:
void addPointer(AliasSetTracker &AST, PointerRec &Entry, LocationSize Size,
const AAMDNodes &AAInfo, bool KnownMustAlias = false,
bool SkipSizeUpdate = false);
- void addUnknownInst(Instruction *I, AAResults &AA);
+ void addUnknownInst(Instruction *I, AAResults &AA);
void removeUnknownInst(AliasSetTracker &AST, Instruction *I) {
bool WasEmpty = UnknownInsts.empty();
@@ -322,8 +322,8 @@ public:
/// If the specified pointer "may" (or must) alias one of the members in the
/// set return the appropriate AliasResult. Otherwise return NoAlias.
AliasResult aliasesPointer(const Value *Ptr, LocationSize Size,
- const AAMDNodes &AAInfo, AAResults &AA) const;
- bool aliasesUnknownInst(const Instruction *Inst, AAResults &AA) const;
+ const AAMDNodes &AAInfo, AAResults &AA) const;
+ bool aliasesUnknownInst(const Instruction *Inst, AAResults &AA) const;
};
inline raw_ostream& operator<<(raw_ostream &OS, const AliasSet &AS) {
@@ -349,7 +349,7 @@ class AliasSetTracker {
/// handle.
struct ASTCallbackVHDenseMapInfo : public DenseMapInfo<Value *> {};
- AAResults &AA;
+ AAResults &AA;
MemorySSA *MSSA = nullptr;
Loop *L = nullptr;
ilist<AliasSet> AliasSets;
@@ -363,9 +363,9 @@ class AliasSetTracker {
public:
/// Create an empty collection of AliasSets, and use the specified alias
/// analysis object to disambiguate load and store addresses.
- explicit AliasSetTracker(AAResults &AA) : AA(AA) {}
- explicit AliasSetTracker(AAResults &AA, MemorySSA *MSSA, Loop *L)
- : AA(AA), MSSA(MSSA), L(L) {}
+ explicit AliasSetTracker(AAResults &AA) : AA(AA) {}
+ explicit AliasSetTracker(AAResults &AA, MemorySSA *MSSA, Loop *L)
+ : AA(AA), MSSA(MSSA), L(L) {}
~AliasSetTracker() { clear(); }
/// These methods are used to add different types of instructions to the alias
@@ -404,7 +404,7 @@ public:
AliasSet &getAliasSetFor(const MemoryLocation &MemLoc);
/// Return the underlying alias analysis object used by this tracker.
- AAResults &getAliasAnalysis() const { return AA; }
+ AAResults &getAliasAnalysis() const { return AA; }
/// This method is used to remove a pointer value from the AliasSetTracker
/// entirely. It should be used when an instruction is deleted from the
@@ -468,14 +468,14 @@ inline raw_ostream& operator<<(raw_ostream &OS, const AliasSetTracker &AST) {
return OS;
}
-class AliasSetsPrinterPass : public PassInfoMixin<AliasSetsPrinterPass> {
- raw_ostream &OS;
-
-public:
- explicit AliasSetsPrinterPass(raw_ostream &OS);
- PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
-};
-
+class AliasSetsPrinterPass : public PassInfoMixin<AliasSetsPrinterPass> {
+ raw_ostream &OS;
+
+public:
+ explicit AliasSetsPrinterPass(raw_ostream &OS);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
} // end namespace llvm
#endif // LLVM_ANALYSIS_ALIASSETTRACKER_H
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/AssumptionCache.h b/contrib/libs/llvm12/include/llvm/Analysis/AssumptionCache.h
index 86dced7f30..906b4613a6 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/AssumptionCache.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/AssumptionCache.h
@@ -52,7 +52,7 @@ public:
enum : unsigned { ExprResultIdx = std::numeric_limits<unsigned>::max() };
struct ResultElem {
- WeakVH Assume;
+ WeakVH Assume;
/// contains either ExprResultIdx or the index of the operand bundle
/// containing the knowledge.
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/BasicAliasAnalysis.h b/contrib/libs/llvm12/include/llvm/Analysis/BasicAliasAnalysis.h
index 7557bd687b..206cc1f395 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/BasicAliasAnalysis.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/BasicAliasAnalysis.h
@@ -124,9 +124,9 @@ private:
APInt Scale;
- // Context instruction to use when querying information about this index.
- const Instruction *CxtI;
-
+ // Context instruction to use when querying information about this index.
+ const Instruction *CxtI;
+
bool operator==(const VariableGEPIndex &Other) const {
return V == Other.V && ZExtBits == Other.ZExtBits &&
SExtBits == Other.SExtBits && Scale == Other.Scale;
@@ -135,17 +135,17 @@ private:
bool operator!=(const VariableGEPIndex &Other) const {
return !operator==(Other);
}
-
- void dump() const {
- print(dbgs());
- dbgs() << "\n";
- }
- void print(raw_ostream &OS) const {
- OS << "(V=" << V->getName()
- << ", zextbits=" << ZExtBits
- << ", sextbits=" << SExtBits
- << ", scale=" << Scale << ")";
- }
+
+ void dump() const {
+ print(dbgs());
+ dbgs() << "\n";
+ }
+ void print(raw_ostream &OS) const {
+ OS << "(V=" << V->getName()
+ << ", zextbits=" << ZExtBits
+ << ", sextbits=" << SExtBits
+ << ", scale=" << Scale << ")";
+ }
};
// Represents the internal structure of a GEP, decomposed into a base pointer,
@@ -153,29 +153,29 @@ private:
struct DecomposedGEP {
// Base pointer of the GEP
const Value *Base;
- // Total constant offset from base.
- APInt Offset;
+ // Total constant offset from base.
+ APInt Offset;
// Scaled variable (non-constant) indices.
SmallVector<VariableGEPIndex, 4> VarIndices;
// Is GEP index scale compile-time constant.
bool HasCompileTimeConstantScale;
-
- void dump() const {
- print(dbgs());
- dbgs() << "\n";
- }
- void print(raw_ostream &OS) const {
- OS << "(DecomposedGEP Base=" << Base->getName()
- << ", Offset=" << Offset
- << ", VarIndices=[";
- for (size_t i = 0; i < VarIndices.size(); i++) {
- if (i != 0)
- OS << ", ";
- VarIndices[i].print(OS);
- }
- OS << "], HasCompileTimeConstantScale=" << HasCompileTimeConstantScale
- << ")";
- }
+
+ void dump() const {
+ print(dbgs());
+ dbgs() << "\n";
+ }
+ void print(raw_ostream &OS) const {
+ OS << "(DecomposedGEP Base=" << Base->getName()
+ << ", Offset=" << Offset
+ << ", VarIndices=[";
+ for (size_t i = 0; i < VarIndices.size(); i++) {
+ if (i != 0)
+ OS << ", ";
+ VarIndices[i].print(OS);
+ }
+ OS << "], HasCompileTimeConstantScale=" << HasCompileTimeConstantScale
+ << ")";
+ }
};
/// Tracks phi nodes we have visited.
@@ -203,9 +203,9 @@ private:
const DataLayout &DL, unsigned Depth, AssumptionCache *AC,
DominatorTree *DT, bool &NSW, bool &NUW);
- static DecomposedGEP
- DecomposeGEPExpression(const Value *V, const DataLayout &DL,
- AssumptionCache *AC, DominatorTree *DT);
+ static DecomposedGEP
+ DecomposeGEPExpression(const Value *V, const DataLayout &DL,
+ AssumptionCache *AC, DominatorTree *DT);
static bool isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
@@ -239,23 +239,23 @@ private:
AliasResult aliasPHI(const PHINode *PN, LocationSize PNSize,
const AAMDNodes &PNAAInfo, const Value *V2,
LocationSize V2Size, const AAMDNodes &V2AAInfo,
- AAQueryInfo &AAQI);
+ AAQueryInfo &AAQI);
AliasResult aliasSelect(const SelectInst *SI, LocationSize SISize,
const AAMDNodes &SIAAInfo, const Value *V2,
LocationSize V2Size, const AAMDNodes &V2AAInfo,
- AAQueryInfo &AAQI);
+ AAQueryInfo &AAQI);
AliasResult aliasCheck(const Value *V1, LocationSize V1Size,
- const AAMDNodes &V1AATag, const Value *V2,
- LocationSize V2Size, const AAMDNodes &V2AATag,
- AAQueryInfo &AAQI);
-
- AliasResult aliasCheckRecursive(const Value *V1, LocationSize V1Size,
- const AAMDNodes &V1AATag, const Value *V2,
- LocationSize V2Size, const AAMDNodes &V2AATag,
- AAQueryInfo &AAQI, const Value *O1,
- const Value *O2);
+ const AAMDNodes &V1AATag, const Value *V2,
+ LocationSize V2Size, const AAMDNodes &V2AATag,
+ AAQueryInfo &AAQI);
+
+ AliasResult aliasCheckRecursive(const Value *V1, LocationSize V1Size,
+ const AAMDNodes &V1AATag, const Value *V2,
+ LocationSize V2Size, const AAMDNodes &V2AATag,
+ AAQueryInfo &AAQI, const Value *O1,
+ const Value *O2);
};
/// Analysis pass providing a never-invalidated alias analysis result.
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/BlockFrequencyInfoImpl.h b/contrib/libs/llvm12/include/llvm/Analysis/BlockFrequencyInfoImpl.h
index f5c9294263..daf4db72b8 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/BlockFrequencyInfoImpl.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/BlockFrequencyInfoImpl.h
@@ -176,7 +176,7 @@ inline raw_ostream &operator<<(raw_ostream &OS, BlockMass X) {
/// algorithms for BlockFrequencyInfoImplBase. Only algorithms that depend on
/// the block type (or that call such algorithms) are skipped here.
///
-/// Nevertheless, the majority of the overall algorithm documentation lives with
+/// Nevertheless, the majority of the overall algorithm documentation lives with
/// BlockFrequencyInfoImpl. See there for details.
class BlockFrequencyInfoImplBase {
public:
@@ -465,7 +465,7 @@ public:
/// Analyze irreducible SCCs.
///
- /// Separate irreducible SCCs from \c G, which is an explicit graph of \c
+ /// Separate irreducible SCCs from \c G, which is an explicit graph of \c
/// OuterLoop (or the top-level function, if \c OuterLoop is \c nullptr).
/// Insert them into \a Loops before \c Insert.
///
@@ -713,7 +713,7 @@ void IrreducibleGraph::addEdges(const BlockNode &Node,
///
/// In addition to loops, this algorithm has limited support for irreducible
/// SCCs, which are SCCs with multiple entry blocks. Irreducible SCCs are
-/// discovered on the fly, and modelled as loops with multiple headers.
+/// discovered on the fly, and modelled as loops with multiple headers.
///
/// The headers of irreducible sub-SCCs consist of its entry blocks and all
/// nodes that are targets of a backedge within it (excluding backedges within
@@ -1253,7 +1253,7 @@ bool BlockFrequencyInfoImpl<BT>::computeMassInLoop(LoopData &Loop) {
}
}
// As a heuristic, if some headers don't have a weight, give them the
- // minimum weight seen (not to disrupt the existing trends too much by
+ // minimum weight seen (not to disrupt the existing trends too much by
// using a weight that's in the general range of the other headers' weights,
// and the minimum seems to perform better than the average.)
// FIXME: better update in the passes that drop the header weight.
@@ -1456,8 +1456,8 @@ void BlockFrequencyInfoImpl<BT>::verifyMatch(
BlockNode Node = Entry.second;
if (OtherValidNodes.count(BB)) {
BlockNode OtherNode = OtherValidNodes[BB];
- const auto &Freq = Freqs[Node.Index];
- const auto &OtherFreq = Other.Freqs[OtherNode.Index];
+ const auto &Freq = Freqs[Node.Index];
+ const auto &OtherFreq = Other.Freqs[OtherNode.Index];
if (Freq.Integer != OtherFreq.Integer) {
Match = false;
dbgs() << "Freq mismatch: " << bfi_detail::getBlockName(BB) << " "
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/BranchProbabilityInfo.h b/contrib/libs/llvm12/include/llvm/Analysis/BranchProbabilityInfo.h
index beabd622a9..e4a8f76b59 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/BranchProbabilityInfo.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/BranchProbabilityInfo.h
@@ -34,16 +34,16 @@
#include <algorithm>
#include <cassert>
#include <cstdint>
-#include <memory>
+#include <memory>
#include <utility>
namespace llvm {
class Function;
-class Loop;
+class Loop;
class LoopInfo;
class raw_ostream;
-class DominatorTree;
+class DominatorTree;
class PostDominatorTree;
class TargetLibraryInfo;
class Value;
@@ -60,79 +60,79 @@ class Value;
/// identify an edge, since we can have multiple edges from Src to Dst.
/// As an example, we can have a switch which jumps to Dst with value 0 and
/// value 10.
-///
-/// Process of computing branch probabilities can be logically viewed as three
-/// step process:
-///
-/// First, if there is a profile information associated with the branch then
-/// it is trivially translated to branch probabilities. There is one exception
-/// from this rule though. Probabilities for edges leading to "unreachable"
-/// blocks (blocks with the estimated weight not greater than
-/// UNREACHABLE_WEIGHT) are evaluated according to static estimation and
-/// override profile information. If no branch probabilities were calculated
-/// on this step then take the next one.
-///
-/// Second, estimate absolute execution weights for each block based on
-/// statically known information. Roots of such information are "cold",
-/// "unreachable", "noreturn" and "unwind" blocks. Those blocks get their
-/// weights set to BlockExecWeight::COLD, BlockExecWeight::UNREACHABLE,
-/// BlockExecWeight::NORETURN and BlockExecWeight::UNWIND respectively. Then the
-/// weights are propagated to the other blocks up the domination line. In
-/// addition, if all successors have estimated weights set then maximum of these
-/// weights assigned to the block itself (while this is not ideal heuristic in
-/// theory it's simple and works reasonably well in most cases) and the process
-/// repeats. Once the process of weights propagation converges branch
-/// probabilities are set for all such branches that have at least one successor
-/// with the weight set. Default execution weight (BlockExecWeight::DEFAULT) is
-/// used for any successors which doesn't have its weight set. For loop back
-/// branches we use their weights scaled by loop trip count equal to
-/// 'LBH_TAKEN_WEIGHT/LBH_NOTTAKEN_WEIGHT'.
-///
-/// Here is a simple example demonstrating how the described algorithm works.
-///
-/// BB1
-/// / \
-/// v v
-/// BB2 BB3
-/// / \
-/// v v
-/// ColdBB UnreachBB
-///
-/// Initially, ColdBB is associated with COLD_WEIGHT and UnreachBB with
-/// UNREACHABLE_WEIGHT. COLD_WEIGHT is set to BB2 as maximum between its
-/// successors. BB1 and BB3 has no explicit estimated weights and assumed to
-/// have DEFAULT_WEIGHT. Based on assigned weights branches will have the
-/// following probabilities:
-/// P(BB1->BB2) = COLD_WEIGHT/(COLD_WEIGHT + DEFAULT_WEIGHT) =
-/// 0xffff / (0xffff + 0xfffff) = 0.0588(5.9%)
-/// P(BB1->BB3) = DEFAULT_WEIGHT_WEIGHT/(COLD_WEIGHT + DEFAULT_WEIGHT) =
-/// 0xfffff / (0xffff + 0xfffff) = 0.941(94.1%)
-/// P(BB2->ColdBB) = COLD_WEIGHT/(COLD_WEIGHT + UNREACHABLE_WEIGHT) = 1(100%)
-/// P(BB2->UnreachBB) =
-/// UNREACHABLE_WEIGHT/(COLD_WEIGHT+UNREACHABLE_WEIGHT) = 0(0%)
-///
-/// If no branch probabilities were calculated on this step then take the next
-/// one.
-///
-/// Third, apply different kinds of local heuristics for each individual
-/// branch until first match. For example probability of a pointer to be null is
-/// estimated as PH_TAKEN_WEIGHT/(PH_TAKEN_WEIGHT + PH_NONTAKEN_WEIGHT). If
-/// no local heuristic has been matched then branch is left with no explicit
-/// probability set and assumed to have default probability.
+///
+/// Process of computing branch probabilities can be logically viewed as three
+/// step process:
+///
+/// First, if there is a profile information associated with the branch then
+/// it is trivially translated to branch probabilities. There is one exception
+/// from this rule though. Probabilities for edges leading to "unreachable"
+/// blocks (blocks with the estimated weight not greater than
+/// UNREACHABLE_WEIGHT) are evaluated according to static estimation and
+/// override profile information. If no branch probabilities were calculated
+/// on this step then take the next one.
+///
+/// Second, estimate absolute execution weights for each block based on
+/// statically known information. Roots of such information are "cold",
+/// "unreachable", "noreturn" and "unwind" blocks. Those blocks get their
+/// weights set to BlockExecWeight::COLD, BlockExecWeight::UNREACHABLE,
+/// BlockExecWeight::NORETURN and BlockExecWeight::UNWIND respectively. Then the
+/// weights are propagated to the other blocks up the domination line. In
+/// addition, if all successors have estimated weights set then maximum of these
+/// weights assigned to the block itself (while this is not ideal heuristic in
+/// theory it's simple and works reasonably well in most cases) and the process
+/// repeats. Once the process of weights propagation converges branch
+/// probabilities are set for all such branches that have at least one successor
+/// with the weight set. Default execution weight (BlockExecWeight::DEFAULT) is
+/// used for any successors which doesn't have its weight set. For loop back
+/// branches we use their weights scaled by loop trip count equal to
+/// 'LBH_TAKEN_WEIGHT/LBH_NOTTAKEN_WEIGHT'.
+///
+/// Here is a simple example demonstrating how the described algorithm works.
+///
+/// BB1
+/// / \
+/// v v
+/// BB2 BB3
+/// / \
+/// v v
+/// ColdBB UnreachBB
+///
+/// Initially, ColdBB is associated with COLD_WEIGHT and UnreachBB with
+/// UNREACHABLE_WEIGHT. COLD_WEIGHT is set to BB2 as maximum between its
+/// successors. BB1 and BB3 has no explicit estimated weights and assumed to
+/// have DEFAULT_WEIGHT. Based on assigned weights branches will have the
+/// following probabilities:
+/// P(BB1->BB2) = COLD_WEIGHT/(COLD_WEIGHT + DEFAULT_WEIGHT) =
+/// 0xffff / (0xffff + 0xfffff) = 0.0588(5.9%)
+/// P(BB1->BB3) = DEFAULT_WEIGHT_WEIGHT/(COLD_WEIGHT + DEFAULT_WEIGHT) =
+/// 0xfffff / (0xffff + 0xfffff) = 0.941(94.1%)
+/// P(BB2->ColdBB) = COLD_WEIGHT/(COLD_WEIGHT + UNREACHABLE_WEIGHT) = 1(100%)
+/// P(BB2->UnreachBB) =
+/// UNREACHABLE_WEIGHT/(COLD_WEIGHT+UNREACHABLE_WEIGHT) = 0(0%)
+///
+/// If no branch probabilities were calculated on this step then take the next
+/// one.
+///
+/// Third, apply different kinds of local heuristics for each individual
+/// branch until first match. For example probability of a pointer to be null is
+/// estimated as PH_TAKEN_WEIGHT/(PH_TAKEN_WEIGHT + PH_NONTAKEN_WEIGHT). If
+/// no local heuristic has been matched then branch is left with no explicit
+/// probability set and assumed to have default probability.
class BranchProbabilityInfo {
public:
BranchProbabilityInfo() = default;
BranchProbabilityInfo(const Function &F, const LoopInfo &LI,
const TargetLibraryInfo *TLI = nullptr,
- DominatorTree *DT = nullptr,
+ DominatorTree *DT = nullptr,
PostDominatorTree *PDT = nullptr) {
- calculate(F, LI, TLI, DT, PDT);
+ calculate(F, LI, TLI, DT, PDT);
}
BranchProbabilityInfo(BranchProbabilityInfo &&Arg)
: Probs(std::move(Arg.Probs)), LastF(Arg.LastF),
- EstimatedBlockWeight(std::move(Arg.EstimatedBlockWeight)) {}
+ EstimatedBlockWeight(std::move(Arg.EstimatedBlockWeight)) {}
BranchProbabilityInfo(const BranchProbabilityInfo &) = delete;
BranchProbabilityInfo &operator=(const BranchProbabilityInfo &) = delete;
@@ -140,7 +140,7 @@ public:
BranchProbabilityInfo &operator=(BranchProbabilityInfo &&RHS) {
releaseMemory();
Probs = std::move(RHS.Probs);
- EstimatedBlockWeight = std::move(RHS.EstimatedBlockWeight);
+ EstimatedBlockWeight = std::move(RHS.EstimatedBlockWeight);
return *this;
}
@@ -198,85 +198,85 @@ public:
void setEdgeProbability(const BasicBlock *Src,
const SmallVectorImpl<BranchProbability> &Probs);
- /// Copy outgoing edge probabilities from \p Src to \p Dst.
- ///
- /// This allows to keep probabilities unset for the destination if they were
- /// unset for source.
- void copyEdgeProbabilities(BasicBlock *Src, BasicBlock *Dst);
-
+ /// Copy outgoing edge probabilities from \p Src to \p Dst.
+ ///
+ /// This allows to keep probabilities unset for the destination if they were
+ /// unset for source.
+ void copyEdgeProbabilities(BasicBlock *Src, BasicBlock *Dst);
+
static BranchProbability getBranchProbStackProtector(bool IsLikely) {
static const BranchProbability LikelyProb((1u << 20) - 1, 1u << 20);
return IsLikely ? LikelyProb : LikelyProb.getCompl();
}
void calculate(const Function &F, const LoopInfo &LI,
- const TargetLibraryInfo *TLI, DominatorTree *DT,
- PostDominatorTree *PDT);
+ const TargetLibraryInfo *TLI, DominatorTree *DT,
+ PostDominatorTree *PDT);
/// Forget analysis results for the given basic block.
void eraseBlock(const BasicBlock *BB);
- // Data structure to track SCCs for handling irreducible loops.
- class SccInfo {
- // Enum of types to classify basic blocks in SCC. Basic block belonging to
- // SCC is 'Inner' until it is either 'Header' or 'Exiting'. Note that a
- // basic block can be 'Header' and 'Exiting' at the same time.
- enum SccBlockType {
- Inner = 0x0,
- Header = 0x1,
- Exiting = 0x2,
- };
- // Map of basic blocks to SCC IDs they belong to. If basic block doesn't
- // belong to any SCC it is not in the map.
- using SccMap = DenseMap<const BasicBlock *, int>;
- // Each basic block in SCC is attributed with one or several types from
- // SccBlockType. Map value has uint32_t type (instead of SccBlockType)
- // since basic block may be for example "Header" and "Exiting" at the same
- // time and we need to be able to keep more than one value from
- // SccBlockType.
- using SccBlockTypeMap = DenseMap<const BasicBlock *, uint32_t>;
- // Vector containing classification of basic blocks for all SCCs where i'th
- // vector element corresponds to SCC with ID equal to i.
- using SccBlockTypeMaps = std::vector<SccBlockTypeMap>;
-
+ // Data structure to track SCCs for handling irreducible loops.
+ class SccInfo {
+ // Enum of types to classify basic blocks in SCC. Basic block belonging to
+ // SCC is 'Inner' until it is either 'Header' or 'Exiting'. Note that a
+ // basic block can be 'Header' and 'Exiting' at the same time.
+ enum SccBlockType {
+ Inner = 0x0,
+ Header = 0x1,
+ Exiting = 0x2,
+ };
+ // Map of basic blocks to SCC IDs they belong to. If basic block doesn't
+ // belong to any SCC it is not in the map.
+ using SccMap = DenseMap<const BasicBlock *, int>;
+ // Each basic block in SCC is attributed with one or several types from
+ // SccBlockType. Map value has uint32_t type (instead of SccBlockType)
+ // since basic block may be for example "Header" and "Exiting" at the same
+ // time and we need to be able to keep more than one value from
+ // SccBlockType.
+ using SccBlockTypeMap = DenseMap<const BasicBlock *, uint32_t>;
+ // Vector containing classification of basic blocks for all SCCs where i'th
+ // vector element corresponds to SCC with ID equal to i.
+ using SccBlockTypeMaps = std::vector<SccBlockTypeMap>;
+
SccMap SccNums;
- SccBlockTypeMaps SccBlocks;
-
- public:
- explicit SccInfo(const Function &F);
-
- /// If \p BB belongs to some SCC then ID of that SCC is returned, otherwise
- /// -1 is returned. If \p BB belongs to more than one SCC at the same time
- /// result is undefined.
- int getSCCNum(const BasicBlock *BB) const;
- /// Returns true if \p BB is a 'header' block in SCC with \p SccNum ID,
- /// false otherwise.
- bool isSCCHeader(const BasicBlock *BB, int SccNum) const {
- return getSccBlockType(BB, SccNum) & Header;
- }
- /// Returns true if \p BB is an 'exiting' block in SCC with \p SccNum ID,
- /// false otherwise.
- bool isSCCExitingBlock(const BasicBlock *BB, int SccNum) const {
- return getSccBlockType(BB, SccNum) & Exiting;
- }
- /// Fills in \p Enters vector with all such blocks that don't belong to
- /// SCC with \p SccNum ID but there is an edge to a block belonging to the
- /// SCC.
- void getSccEnterBlocks(int SccNum,
- SmallVectorImpl<BasicBlock *> &Enters) const;
- /// Fills in \p Exits vector with all such blocks that don't belong to
- /// SCC with \p SccNum ID but there is an edge from a block belonging to the
- /// SCC.
- void getSccExitBlocks(int SccNum,
- SmallVectorImpl<BasicBlock *> &Exits) const;
-
- private:
- /// Returns \p BB's type according to classification given by SccBlockType
- /// enum. Please note that \p BB must belong to SSC with \p SccNum ID.
- uint32_t getSccBlockType(const BasicBlock *BB, int SccNum) const;
- /// Calculates \p BB's type and stores it in internal data structures for
- /// future use. Please note that \p BB must belong to SSC with \p SccNum ID.
- void calculateSccBlockType(const BasicBlock *BB, int SccNum);
+ SccBlockTypeMaps SccBlocks;
+
+ public:
+ explicit SccInfo(const Function &F);
+
+ /// If \p BB belongs to some SCC then ID of that SCC is returned, otherwise
+ /// -1 is returned. If \p BB belongs to more than one SCC at the same time
+ /// result is undefined.
+ int getSCCNum(const BasicBlock *BB) const;
+ /// Returns true if \p BB is a 'header' block in SCC with \p SccNum ID,
+ /// false otherwise.
+ bool isSCCHeader(const BasicBlock *BB, int SccNum) const {
+ return getSccBlockType(BB, SccNum) & Header;
+ }
+ /// Returns true if \p BB is an 'exiting' block in SCC with \p SccNum ID,
+ /// false otherwise.
+ bool isSCCExitingBlock(const BasicBlock *BB, int SccNum) const {
+ return getSccBlockType(BB, SccNum) & Exiting;
+ }
+ /// Fills in \p Enters vector with all such blocks that don't belong to
+ /// SCC with \p SccNum ID but there is an edge to a block belonging to the
+ /// SCC.
+ void getSccEnterBlocks(int SccNum,
+ SmallVectorImpl<BasicBlock *> &Enters) const;
+ /// Fills in \p Exits vector with all such blocks that don't belong to
+ /// SCC with \p SccNum ID but there is an edge from a block belonging to the
+ /// SCC.
+ void getSccExitBlocks(int SccNum,
+ SmallVectorImpl<BasicBlock *> &Exits) const;
+
+ private:
+ /// Returns \p BB's type according to classification given by SccBlockType
+ /// enum. Please note that \p BB must belong to SSC with \p SccNum ID.
+ uint32_t getSccBlockType(const BasicBlock *BB, int SccNum) const;
+ /// Calculates \p BB's type and stores it in internal data structures for
+ /// future use. Please note that \p BB must belong to SSC with \p SccNum ID.
+ void calculateSccBlockType(const BasicBlock *BB, int SccNum);
};
private:
@@ -295,35 +295,35 @@ private:
: CallbackVH(const_cast<Value *>(V)), BPI(BPI) {}
};
- /// Pair of Loop and SCC ID number. Used to unify handling of normal and
- /// SCC based loop representations.
- using LoopData = std::pair<Loop *, int>;
- /// Helper class to keep basic block along with its loop data information.
- class LoopBlock {
- public:
- explicit LoopBlock(const BasicBlock *BB, const LoopInfo &LI,
- const SccInfo &SccI);
-
- const BasicBlock *getBlock() const { return BB; }
- BasicBlock *getBlock() { return const_cast<BasicBlock *>(BB); }
- LoopData getLoopData() const { return LD; }
- Loop *getLoop() const { return LD.first; }
- int getSccNum() const { return LD.second; }
-
- bool belongsToLoop() const { return getLoop() || getSccNum() != -1; }
- bool belongsToSameLoop(const LoopBlock &LB) const {
- return (LB.getLoop() && getLoop() == LB.getLoop()) ||
- (LB.getSccNum() != -1 && getSccNum() == LB.getSccNum());
- }
-
- private:
- const BasicBlock *const BB = nullptr;
- LoopData LD = {nullptr, -1};
- };
-
- // Pair of LoopBlocks representing an edge from first to second block.
- using LoopEdge = std::pair<const LoopBlock &, const LoopBlock &>;
-
+ /// Pair of Loop and SCC ID number. Used to unify handling of normal and
+ /// SCC based loop representations.
+ using LoopData = std::pair<Loop *, int>;
+ /// Helper class to keep basic block along with its loop data information.
+ class LoopBlock {
+ public:
+ explicit LoopBlock(const BasicBlock *BB, const LoopInfo &LI,
+ const SccInfo &SccI);
+
+ const BasicBlock *getBlock() const { return BB; }
+ BasicBlock *getBlock() { return const_cast<BasicBlock *>(BB); }
+ LoopData getLoopData() const { return LD; }
+ Loop *getLoop() const { return LD.first; }
+ int getSccNum() const { return LD.second; }
+
+ bool belongsToLoop() const { return getLoop() || getSccNum() != -1; }
+ bool belongsToSameLoop(const LoopBlock &LB) const {
+ return (LB.getLoop() && getLoop() == LB.getLoop()) ||
+ (LB.getSccNum() != -1 && getSccNum() == LB.getSccNum());
+ }
+
+ private:
+ const BasicBlock *const BB = nullptr;
+ LoopData LD = {nullptr, -1};
+ };
+
+ // Pair of LoopBlocks representing an edge from first to second block.
+ using LoopEdge = std::pair<const LoopBlock &, const LoopBlock &>;
+
DenseSet<BasicBlockCallbackVH, DenseMapInfo<Value*>> Handles;
// Since we allow duplicate edges from one basic block to another, we use
@@ -335,88 +335,88 @@ private:
/// Track the last function we run over for printing.
const Function *LastF = nullptr;
- const LoopInfo *LI = nullptr;
-
- /// Keeps information about all SCCs in a function.
- std::unique_ptr<const SccInfo> SccI;
-
- /// Keeps mapping of a basic block to its estimated weight.
- SmallDenseMap<const BasicBlock *, uint32_t> EstimatedBlockWeight;
-
- /// Keeps mapping of a loop to estimated weight to enter the loop.
- SmallDenseMap<LoopData, uint32_t> EstimatedLoopWeight;
-
- /// Helper to construct LoopBlock for \p BB.
- LoopBlock getLoopBlock(const BasicBlock *BB) const {
- return LoopBlock(BB, *LI, *SccI.get());
- }
-
- /// Returns true if destination block belongs to some loop and source block is
- /// either doesn't belong to any loop or belongs to a loop which is not inner
- /// relative to the destination block.
- bool isLoopEnteringEdge(const LoopEdge &Edge) const;
- /// Returns true if source block belongs to some loop and destination block is
- /// either doesn't belong to any loop or belongs to a loop which is not inner
- /// relative to the source block.
- bool isLoopExitingEdge(const LoopEdge &Edge) const;
- /// Returns true if \p Edge is either enters to or exits from some loop, false
- /// in all other cases.
- bool isLoopEnteringExitingEdge(const LoopEdge &Edge) const;
- /// Returns true if source and destination blocks belongs to the same loop and
- /// destination block is loop header.
- bool isLoopBackEdge(const LoopEdge &Edge) const;
- // Fills in \p Enters vector with all "enter" blocks to a loop \LB belongs to.
- void getLoopEnterBlocks(const LoopBlock &LB,
- SmallVectorImpl<BasicBlock *> &Enters) const;
- // Fills in \p Exits vector with all "exit" blocks from a loop \LB belongs to.
- void getLoopExitBlocks(const LoopBlock &LB,
- SmallVectorImpl<BasicBlock *> &Exits) const;
-
- /// Returns estimated weight for \p BB. None if \p BB has no estimated weight.
- Optional<uint32_t> getEstimatedBlockWeight(const BasicBlock *BB) const;
-
- /// Returns estimated weight to enter \p L. In other words it is weight of
- /// loop's header block not scaled by trip count. Returns None if \p L has no
- /// no estimated weight.
- Optional<uint32_t> getEstimatedLoopWeight(const LoopData &L) const;
-
- /// Return estimated weight for \p Edge. Returns None if estimated weight is
- /// unknown.
- Optional<uint32_t> getEstimatedEdgeWeight(const LoopEdge &Edge) const;
-
- /// Iterates over all edges leading from \p SrcBB to \p Successors and
- /// returns maximum of all estimated weights. If at least one edge has unknown
- /// estimated weight None is returned.
- template <class IterT>
- Optional<uint32_t>
- getMaxEstimatedEdgeWeight(const LoopBlock &SrcBB,
- iterator_range<IterT> Successors) const;
-
- /// If \p LoopBB has no estimated weight then set it to \p BBWeight and
- /// return true. Otherwise \p BB's weight remains unchanged and false is
- /// returned. In addition all blocks/loops that might need their weight to be
- /// re-estimated are put into BlockWorkList/LoopWorkList.
- bool updateEstimatedBlockWeight(LoopBlock &LoopBB, uint32_t BBWeight,
- SmallVectorImpl<BasicBlock *> &BlockWorkList,
- SmallVectorImpl<LoopBlock> &LoopWorkList);
-
- /// Starting from \p LoopBB (including \p LoopBB itself) propagate \p BBWeight
- /// up the domination tree.
- void propagateEstimatedBlockWeight(const LoopBlock &LoopBB, DominatorTree *DT,
- PostDominatorTree *PDT, uint32_t BBWeight,
- SmallVectorImpl<BasicBlock *> &WorkList,
- SmallVectorImpl<LoopBlock> &LoopWorkList);
-
- /// Returns block's weight encoded in the IR.
- Optional<uint32_t> getInitialEstimatedBlockWeight(const BasicBlock *BB);
-
- // Computes estimated weights for all blocks in \p F.
- void computeEestimateBlockWeight(const Function &F, DominatorTree *DT,
- PostDominatorTree *PDT);
-
- /// Based on computed weights by \p computeEstimatedBlockWeight set
- /// probabilities on branches.
- bool calcEstimatedHeuristics(const BasicBlock *BB);
+ const LoopInfo *LI = nullptr;
+
+ /// Keeps information about all SCCs in a function.
+ std::unique_ptr<const SccInfo> SccI;
+
+ /// Keeps mapping of a basic block to its estimated weight.
+ SmallDenseMap<const BasicBlock *, uint32_t> EstimatedBlockWeight;
+
+ /// Keeps mapping of a loop to estimated weight to enter the loop.
+ SmallDenseMap<LoopData, uint32_t> EstimatedLoopWeight;
+
+ /// Helper to construct LoopBlock for \p BB.
+ LoopBlock getLoopBlock(const BasicBlock *BB) const {
+ return LoopBlock(BB, *LI, *SccI.get());
+ }
+
+ /// Returns true if destination block belongs to some loop and source block is
+ /// either doesn't belong to any loop or belongs to a loop which is not inner
+ /// relative to the destination block.
+ bool isLoopEnteringEdge(const LoopEdge &Edge) const;
+ /// Returns true if source block belongs to some loop and destination block is
+ /// either doesn't belong to any loop or belongs to a loop which is not inner
+ /// relative to the source block.
+ bool isLoopExitingEdge(const LoopEdge &Edge) const;
+ /// Returns true if \p Edge is either enters to or exits from some loop, false
+ /// in all other cases.
+ bool isLoopEnteringExitingEdge(const LoopEdge &Edge) const;
+ /// Returns true if source and destination blocks belongs to the same loop and
+ /// destination block is loop header.
+ bool isLoopBackEdge(const LoopEdge &Edge) const;
+ // Fills in \p Enters vector with all "enter" blocks to a loop \LB belongs to.
+ void getLoopEnterBlocks(const LoopBlock &LB,
+ SmallVectorImpl<BasicBlock *> &Enters) const;
+ // Fills in \p Exits vector with all "exit" blocks from a loop \LB belongs to.
+ void getLoopExitBlocks(const LoopBlock &LB,
+ SmallVectorImpl<BasicBlock *> &Exits) const;
+
+ /// Returns estimated weight for \p BB. None if \p BB has no estimated weight.
+ Optional<uint32_t> getEstimatedBlockWeight(const BasicBlock *BB) const;
+
+ /// Returns estimated weight to enter \p L. In other words it is weight of
+ /// loop's header block not scaled by trip count. Returns None if \p L has no
+ /// no estimated weight.
+ Optional<uint32_t> getEstimatedLoopWeight(const LoopData &L) const;
+
+ /// Return estimated weight for \p Edge. Returns None if estimated weight is
+ /// unknown.
+ Optional<uint32_t> getEstimatedEdgeWeight(const LoopEdge &Edge) const;
+
+ /// Iterates over all edges leading from \p SrcBB to \p Successors and
+ /// returns maximum of all estimated weights. If at least one edge has unknown
+ /// estimated weight None is returned.
+ template <class IterT>
+ Optional<uint32_t>
+ getMaxEstimatedEdgeWeight(const LoopBlock &SrcBB,
+ iterator_range<IterT> Successors) const;
+
+ /// If \p LoopBB has no estimated weight then set it to \p BBWeight and
+ /// return true. Otherwise \p BB's weight remains unchanged and false is
+ /// returned. In addition all blocks/loops that might need their weight to be
+ /// re-estimated are put into BlockWorkList/LoopWorkList.
+ bool updateEstimatedBlockWeight(LoopBlock &LoopBB, uint32_t BBWeight,
+ SmallVectorImpl<BasicBlock *> &BlockWorkList,
+ SmallVectorImpl<LoopBlock> &LoopWorkList);
+
+ /// Starting from \p LoopBB (including \p LoopBB itself) propagate \p BBWeight
+ /// up the domination tree.
+ void propagateEstimatedBlockWeight(const LoopBlock &LoopBB, DominatorTree *DT,
+ PostDominatorTree *PDT, uint32_t BBWeight,
+ SmallVectorImpl<BasicBlock *> &WorkList,
+ SmallVectorImpl<LoopBlock> &LoopWorkList);
+
+ /// Returns block's weight encoded in the IR.
+ Optional<uint32_t> getInitialEstimatedBlockWeight(const BasicBlock *BB);
+
+ // Computes estimated weights for all blocks in \p F.
+ void computeEestimateBlockWeight(const Function &F, DominatorTree *DT,
+ PostDominatorTree *PDT);
+
+ /// Based on computed weights by \p computeEstimatedBlockWeight set
+ /// probabilities on branches.
+ bool calcEstimatedHeuristics(const BasicBlock *BB);
bool calcMetadataWeights(const BasicBlock *BB);
bool calcPointerHeuristics(const BasicBlock *BB);
bool calcZeroHeuristics(const BasicBlock *BB, const TargetLibraryInfo *TLI);
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/CFGPrinter.h b/contrib/libs/llvm12/include/llvm/Analysis/CFGPrinter.h
index 6547ac800d..a790adc529 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/CFGPrinter.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/CFGPrinter.h
@@ -25,7 +25,7 @@
#ifndef LLVM_ANALYSIS_CFGPRINTER_H
#define LLVM_ANALYSIS_CFGPRINTER_H
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/HeatUtils.h"
@@ -149,18 +149,18 @@ struct DOTGraphTraits<DOTFuncInfo *> : public DefaultDOTGraphTraits {
return OS.str();
}
- static void eraseComment(std::string &OutStr, unsigned &I, unsigned Idx) {
- OutStr.erase(OutStr.begin() + I, OutStr.begin() + Idx);
- --I;
- }
-
- static std::string getCompleteNodeLabel(
- const BasicBlock *Node, DOTFuncInfo *,
- llvm::function_ref<void(raw_string_ostream &, const BasicBlock &)>
- HandleBasicBlock = [](raw_string_ostream &OS,
- const BasicBlock &Node) -> void { OS << Node; },
- llvm::function_ref<void(std::string &, unsigned &, unsigned)>
- HandleComment = eraseComment) {
+ static void eraseComment(std::string &OutStr, unsigned &I, unsigned Idx) {
+ OutStr.erase(OutStr.begin() + I, OutStr.begin() + Idx);
+ --I;
+ }
+
+ static std::string getCompleteNodeLabel(
+ const BasicBlock *Node, DOTFuncInfo *,
+ llvm::function_ref<void(raw_string_ostream &, const BasicBlock &)>
+ HandleBasicBlock = [](raw_string_ostream &OS,
+ const BasicBlock &Node) -> void { OS << Node; },
+ llvm::function_ref<void(std::string &, unsigned &, unsigned)>
+ HandleComment = eraseComment) {
enum { MaxColumns = 80 };
std::string Str;
raw_string_ostream OS(Str);
@@ -170,7 +170,7 @@ struct DOTGraphTraits<DOTFuncInfo *> : public DefaultDOTGraphTraits {
OS << ":";
}
- HandleBasicBlock(OS, *Node);
+ HandleBasicBlock(OS, *Node);
std::string OutStr = OS.str();
if (OutStr[0] == '\n')
OutStr.erase(OutStr.begin());
@@ -186,7 +186,7 @@ struct DOTGraphTraits<DOTFuncInfo *> : public DefaultDOTGraphTraits {
LastSpace = 0;
} else if (OutStr[i] == ';') { // Delete comments!
unsigned Idx = OutStr.find('\n', i + 1); // Find end of line
- HandleComment(OutStr, i, Idx);
+ HandleComment(OutStr, i, Idx);
} else if (ColNum == MaxColumns) { // Wrap lines.
// Wrap very long names even though we can't find a space.
if (!LastSpace)
@@ -302,7 +302,7 @@ struct DOTGraphTraits<DOTFuncInfo *> : public DefaultDOTGraphTraits {
" fillcolor=\"" + Color + "70\"";
return Attrs;
}
- bool isNodeHidden(const BasicBlock *Node, const DOTFuncInfo *CFGInfo);
+ bool isNodeHidden(const BasicBlock *Node, const DOTFuncInfo *CFGInfo);
void computeHiddenNodes(const Function *F);
};
} // End llvm namespace
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/CGSCCPassManager.h b/contrib/libs/llvm12/include/llvm/Analysis/CGSCCPassManager.h
index 5ef05f350a..b5154cae9b 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/CGSCCPassManager.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/CGSCCPassManager.h
@@ -97,7 +97,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/PriorityWorklist.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -322,16 +322,16 @@ struct CGSCCUpdateResult {
/// for a better technique.
SmallDenseSet<std::pair<LazyCallGraph::Node *, LazyCallGraph::SCC *>, 4>
&InlinedInternalEdges;
-
- /// Weak VHs to keep track of indirect calls for the purposes of detecting
- /// devirtualization.
- ///
- /// This is a map to avoid having duplicate entries. If a Value is
- /// deallocated, its corresponding WeakTrackingVH will be nulled out. When
- /// checking if a Value is in the map or not, also check if the corresponding
- /// WeakTrackingVH is null to avoid issues with a new Value sharing the same
- /// address as a deallocated one.
- SmallMapVector<Value *, WeakTrackingVH, 16> IndirectVHs;
+
+ /// Weak VHs to keep track of indirect calls for the purposes of detecting
+ /// devirtualization.
+ ///
+ /// This is a map to avoid having duplicate entries. If a Value is
+ /// deallocated, its corresponding WeakTrackingVH will be nulled out. When
+ /// checking if a Value is in the map or not, also check if the corresponding
+ /// WeakTrackingVH is null to avoid issues with a new Value sharing the same
+ /// address as a deallocated one.
+ SmallMapVector<Value *, WeakTrackingVH, 16> IndirectVHs;
};
/// The core module pass which does a post-order walk of the SCCs and
@@ -344,13 +344,13 @@ struct CGSCCUpdateResult {
/// pass over the module to enable a \c FunctionAnalysisManager to be used
/// within this run safely.
class ModuleToPostOrderCGSCCPassAdaptor
- : public PassInfoMixin<ModuleToPostOrderCGSCCPassAdaptor> {
+ : public PassInfoMixin<ModuleToPostOrderCGSCCPassAdaptor> {
public:
- using PassConceptT =
- detail::PassConcept<LazyCallGraph::SCC, CGSCCAnalysisManager,
- LazyCallGraph &, CGSCCUpdateResult &>;
-
- explicit ModuleToPostOrderCGSCCPassAdaptor(std::unique_ptr<PassConceptT> Pass)
+ using PassConceptT =
+ detail::PassConcept<LazyCallGraph::SCC, CGSCCAnalysisManager,
+ LazyCallGraph &, CGSCCUpdateResult &>;
+
+ explicit ModuleToPostOrderCGSCCPassAdaptor(std::unique_ptr<PassConceptT> Pass)
: Pass(std::move(Pass)) {}
ModuleToPostOrderCGSCCPassAdaptor(ModuleToPostOrderCGSCCPassAdaptor &&Arg)
@@ -370,22 +370,22 @@ public:
/// Runs the CGSCC pass across every SCC in the module.
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
- static bool isRequired() { return true; }
-
+ static bool isRequired() { return true; }
+
private:
- std::unique_ptr<PassConceptT> Pass;
+ std::unique_ptr<PassConceptT> Pass;
};
/// A function to deduce a function pass type and wrap it in the
/// templated adaptor.
template <typename CGSCCPassT>
-ModuleToPostOrderCGSCCPassAdaptor
+ModuleToPostOrderCGSCCPassAdaptor
createModuleToPostOrderCGSCCPassAdaptor(CGSCCPassT Pass) {
- using PassModelT = detail::PassModel<LazyCallGraph::SCC, CGSCCPassT,
- PreservedAnalyses, CGSCCAnalysisManager,
- LazyCallGraph &, CGSCCUpdateResult &>;
- return ModuleToPostOrderCGSCCPassAdaptor(
- std::make_unique<PassModelT>(std::move(Pass)));
+ using PassModelT = detail::PassModel<LazyCallGraph::SCC, CGSCCPassT,
+ PreservedAnalyses, CGSCCAnalysisManager,
+ LazyCallGraph &, CGSCCUpdateResult &>;
+ return ModuleToPostOrderCGSCCPassAdaptor(
+ std::make_unique<PassModelT>(std::move(Pass)));
}
/// A proxy from a \c FunctionAnalysisManager to an \c SCC.
@@ -464,11 +464,11 @@ LazyCallGraph::SCC &updateCGAndAnalysisManagerForCGSCCPass(
/// pass over the SCC to enable a \c FunctionAnalysisManager to be used
/// within this run safely.
class CGSCCToFunctionPassAdaptor
- : public PassInfoMixin<CGSCCToFunctionPassAdaptor> {
+ : public PassInfoMixin<CGSCCToFunctionPassAdaptor> {
public:
- using PassConceptT = detail::PassConcept<Function, FunctionAnalysisManager>;
-
- explicit CGSCCToFunctionPassAdaptor(std::unique_ptr<PassConceptT> Pass)
+ using PassConceptT = detail::PassConcept<Function, FunctionAnalysisManager>;
+
+ explicit CGSCCToFunctionPassAdaptor(std::unique_ptr<PassConceptT> Pass)
: Pass(std::move(Pass)) {}
CGSCCToFunctionPassAdaptor(CGSCCToFunctionPassAdaptor &&Arg)
@@ -486,24 +486,24 @@ public:
/// Runs the function pass across every function in the module.
PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
- LazyCallGraph &CG, CGSCCUpdateResult &UR);
+ LazyCallGraph &CG, CGSCCUpdateResult &UR);
- static bool isRequired() { return true; }
+ static bool isRequired() { return true; }
private:
- std::unique_ptr<PassConceptT> Pass;
+ std::unique_ptr<PassConceptT> Pass;
};
/// A function to deduce a function pass type and wrap it in the
/// templated adaptor.
template <typename FunctionPassT>
-CGSCCToFunctionPassAdaptor
+CGSCCToFunctionPassAdaptor
createCGSCCToFunctionPassAdaptor(FunctionPassT Pass) {
- using PassModelT =
- detail::PassModel<Function, FunctionPassT, PreservedAnalyses,
- FunctionAnalysisManager>;
- return CGSCCToFunctionPassAdaptor(
- std::make_unique<PassModelT>(std::move(Pass)));
+ using PassModelT =
+ detail::PassModel<Function, FunctionPassT, PreservedAnalyses,
+ FunctionAnalysisManager>;
+ return CGSCCToFunctionPassAdaptor(
+ std::make_unique<PassModelT>(std::move(Pass)));
}
/// A helper that repeats an SCC pass each time an indirect call is refined to
@@ -520,36 +520,36 @@ createCGSCCToFunctionPassAdaptor(FunctionPassT Pass) {
/// This repetition has the potential to be very large however, as each one
/// might refine a single call site. As a consequence, in practice we use an
/// upper bound on the number of repetitions to limit things.
-class DevirtSCCRepeatedPass : public PassInfoMixin<DevirtSCCRepeatedPass> {
+class DevirtSCCRepeatedPass : public PassInfoMixin<DevirtSCCRepeatedPass> {
public:
- using PassConceptT =
- detail::PassConcept<LazyCallGraph::SCC, CGSCCAnalysisManager,
- LazyCallGraph &, CGSCCUpdateResult &>;
-
- explicit DevirtSCCRepeatedPass(std::unique_ptr<PassConceptT> Pass,
- int MaxIterations)
+ using PassConceptT =
+ detail::PassConcept<LazyCallGraph::SCC, CGSCCAnalysisManager,
+ LazyCallGraph &, CGSCCUpdateResult &>;
+
+ explicit DevirtSCCRepeatedPass(std::unique_ptr<PassConceptT> Pass,
+ int MaxIterations)
: Pass(std::move(Pass)), MaxIterations(MaxIterations) {}
/// Runs the wrapped pass up to \c MaxIterations on the SCC, iterating
/// whenever an indirect call is refined.
PreservedAnalyses run(LazyCallGraph::SCC &InitialC, CGSCCAnalysisManager &AM,
- LazyCallGraph &CG, CGSCCUpdateResult &UR);
+ LazyCallGraph &CG, CGSCCUpdateResult &UR);
private:
- std::unique_ptr<PassConceptT> Pass;
+ std::unique_ptr<PassConceptT> Pass;
int MaxIterations;
};
/// A function to deduce a function pass type and wrap it in the
/// templated adaptor.
template <typename CGSCCPassT>
-DevirtSCCRepeatedPass createDevirtSCCRepeatedPass(CGSCCPassT Pass,
- int MaxIterations) {
- using PassModelT = detail::PassModel<LazyCallGraph::SCC, CGSCCPassT,
- PreservedAnalyses, CGSCCAnalysisManager,
- LazyCallGraph &, CGSCCUpdateResult &>;
- return DevirtSCCRepeatedPass(std::make_unique<PassModelT>(std::move(Pass)),
- MaxIterations);
+DevirtSCCRepeatedPass createDevirtSCCRepeatedPass(CGSCCPassT Pass,
+ int MaxIterations) {
+ using PassModelT = detail::PassModel<LazyCallGraph::SCC, CGSCCPassT,
+ PreservedAnalyses, CGSCCAnalysisManager,
+ LazyCallGraph &, CGSCCUpdateResult &>;
+ return DevirtSCCRepeatedPass(std::make_unique<PassModelT>(std::move(Pass)),
+ MaxIterations);
}
// Clear out the debug logging macro.
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/CaptureTracking.h b/contrib/libs/llvm12/include/llvm/Analysis/CaptureTracking.h
index ed752e0e53..3fa0c98703 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/CaptureTracking.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/CaptureTracking.h
@@ -20,8 +20,8 @@
#ifndef LLVM_ANALYSIS_CAPTURETRACKING_H
#define LLVM_ANALYSIS_CAPTURETRACKING_H
-#include "llvm/ADT/DenseMap.h"
-
+#include "llvm/ADT/DenseMap.h"
+
namespace llvm {
class Value;
@@ -103,12 +103,12 @@ namespace llvm {
/// is zero, a default value is assumed.
void PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker,
unsigned MaxUsesToExplore = 0);
-
- /// Returns true if the pointer is to a function-local object that never
- /// escapes from the function.
- bool isNonEscapingLocalObject(
- const Value *V,
- SmallDenseMap<const Value *, bool, 8> *IsCapturedCache = nullptr);
+
+ /// Returns true if the pointer is to a function-local object that never
+ /// escapes from the function.
+ bool isNonEscapingLocalObject(
+ const Value *V,
+ SmallDenseMap<const Value *, bool, 8> *IsCapturedCache = nullptr);
} // end namespace llvm
#endif
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/CodeMetrics.h b/contrib/libs/llvm12/include/llvm/Analysis/CodeMetrics.h
index c50218dc8b..3e39bad84e 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/CodeMetrics.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/CodeMetrics.h
@@ -82,8 +82,8 @@ struct CodeMetrics {
/// Add information about a block to the current state.
void analyzeBasicBlock(const BasicBlock *BB, const TargetTransformInfo &TTI,
- const SmallPtrSetImpl<const Value *> &EphValues,
- bool PrepareForLTO = false);
+ const SmallPtrSetImpl<const Value *> &EphValues,
+ bool PrepareForLTO = false);
/// Collect a loop's ephemeral values (those used only by an assume
/// or similar intrinsics in the loop).
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/ConstantFolding.h b/contrib/libs/llvm12/include/llvm/Analysis/ConstantFolding.h
index aecf333a1d..d2e598ab9a 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/ConstantFolding.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/ConstantFolding.h
@@ -32,7 +32,7 @@ template <typename T> class ArrayRef;
class CallBase;
class Constant;
class ConstantExpr;
-class DSOLocalEquivalent;
+class DSOLocalEquivalent;
class DataLayout;
class Function;
class GlobalValue;
@@ -42,11 +42,11 @@ class Type;
/// If this constant is a constant offset from a global, return the global and
/// the constant. Because of constantexprs, this function is recursive.
-/// If the global is part of a dso_local_equivalent constant, return it through
-/// `Equiv` if it is provided.
+/// If the global is part of a dso_local_equivalent constant, return it through
+/// `Equiv` if it is provided.
bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset,
- const DataLayout &DL,
- DSOLocalEquivalent **DSOEquiv = nullptr);
+ const DataLayout &DL,
+ DSOLocalEquivalent **DSOEquiv = nullptr);
/// ConstantFoldInstruction - Try to constant fold the specified instruction.
/// If successful, the constant result is returned, if not, null is returned.
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/ConstraintSystem.h b/contrib/libs/llvm12/include/llvm/Analysis/ConstraintSystem.h
index 11d353bb59..a2cd1b256d 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/ConstraintSystem.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/ConstraintSystem.h
@@ -1,99 +1,99 @@
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-parameter"
-#endif
-
-//===- ConstraintSystem.h - A system of linear constraints. --------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_ANALYSIS_CONSTRAINTSYSTEM_H
-#define LLVM_ANALYSIS_CONSTRAINTSYSTEM_H
-
-#include "llvm/ADT/APInt.h"
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallVector.h"
-
-#include <string>
-
-namespace llvm {
-
-class ConstraintSystem {
- /// Current linear constraints in the system.
- /// An entry of the form c0, c1, ... cn represents the following constraint:
- /// c0 >= v0 * c1 + .... + v{n-1} * cn
- SmallVector<SmallVector<int64_t, 8>, 4> Constraints;
-
- /// Current greatest common divisor for all coefficients in the system.
- uint32_t GCD = 1;
-
- // Eliminate constraints from the system using Fourier–Motzkin elimination.
- bool eliminateUsingFM();
-
- /// Print the constraints in the system, using \p Names as variable names.
- void dump(ArrayRef<std::string> Names) const;
-
- /// Print the constraints in the system, using x0...xn as variable names.
- void dump() const;
-
- /// Returns true if there may be a solution for the constraints in the system.
- bool mayHaveSolutionImpl();
-
-public:
- bool addVariableRow(const SmallVector<int64_t, 8> &R) {
- assert(Constraints.empty() || R.size() == Constraints.back().size());
- // If all variable coefficients are 0, the constraint does not provide any
- // usable information.
- if (all_of(makeArrayRef(R).drop_front(1), [](int64_t C) { return C == 0; }))
- return false;
-
- for (const auto &C : R) {
- auto A = std::abs(C);
- GCD = APIntOps::GreatestCommonDivisor({32, (uint32_t)A}, {32, GCD})
- .getZExtValue();
- }
- Constraints.push_back(R);
- return true;
- }
-
- bool addVariableRowFill(const SmallVector<int64_t, 8> &R) {
- for (auto &CR : Constraints) {
- while (CR.size() != R.size())
- CR.push_back(0);
- }
- return addVariableRow(R);
- }
-
- /// Returns true if there may be a solution for the constraints in the system.
- bool mayHaveSolution();
-
- static SmallVector<int64_t, 8> negate(SmallVector<int64_t, 8> R) {
- // The negated constraint R is obtained by multiplying by -1 and adding 1 to
- // the constant.
- R[0] += 1;
- for (auto &C : R)
- C *= -1;
- return R;
- }
-
- bool isConditionImplied(SmallVector<int64_t, 8> R);
-
- void popLastConstraint() { Constraints.pop_back(); }
-
- /// Returns the number of rows in the constraint system.
- unsigned size() const { return Constraints.size(); }
-};
-} // namespace llvm
-
-#endif // LLVM_ANALYSIS_CONSTRAINTSYSTEM_H
-
-#ifdef __GNUC__
-#pragma GCC diagnostic pop
-#endif
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- ConstraintSystem.h - A system of linear constraints. --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CONSTRAINTSYSTEM_H
+#define LLVM_ANALYSIS_CONSTRAINTSYSTEM_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+
+#include <string>
+
+namespace llvm {
+
+class ConstraintSystem {
+ /// Current linear constraints in the system.
+ /// An entry of the form c0, c1, ... cn represents the following constraint:
+ /// c0 >= v0 * c1 + .... + v{n-1} * cn
+ SmallVector<SmallVector<int64_t, 8>, 4> Constraints;
+
+ /// Current greatest common divisor for all coefficients in the system.
+ uint32_t GCD = 1;
+
+ // Eliminate constraints from the system using Fourier–Motzkin elimination.
+ bool eliminateUsingFM();
+
+ /// Print the constraints in the system, using \p Names as variable names.
+ void dump(ArrayRef<std::string> Names) const;
+
+ /// Print the constraints in the system, using x0...xn as variable names.
+ void dump() const;
+
+ /// Returns true if there may be a solution for the constraints in the system.
+ bool mayHaveSolutionImpl();
+
+public:
+ bool addVariableRow(const SmallVector<int64_t, 8> &R) {
+ assert(Constraints.empty() || R.size() == Constraints.back().size());
+ // If all variable coefficients are 0, the constraint does not provide any
+ // usable information.
+ if (all_of(makeArrayRef(R).drop_front(1), [](int64_t C) { return C == 0; }))
+ return false;
+
+ for (const auto &C : R) {
+ auto A = std::abs(C);
+ GCD = APIntOps::GreatestCommonDivisor({32, (uint32_t)A}, {32, GCD})
+ .getZExtValue();
+ }
+ Constraints.push_back(R);
+ return true;
+ }
+
+ bool addVariableRowFill(const SmallVector<int64_t, 8> &R) {
+ for (auto &CR : Constraints) {
+ while (CR.size() != R.size())
+ CR.push_back(0);
+ }
+ return addVariableRow(R);
+ }
+
+ /// Returns true if there may be a solution for the constraints in the system.
+ bool mayHaveSolution();
+
+ static SmallVector<int64_t, 8> negate(SmallVector<int64_t, 8> R) {
+ // The negated constraint R is obtained by multiplying by -1 and adding 1 to
+ // the constant.
+ R[0] += 1;
+ for (auto &C : R)
+ C *= -1;
+ return R;
+ }
+
+ bool isConditionImplied(SmallVector<int64_t, 8> R);
+
+ void popLastConstraint() { Constraints.pop_back(); }
+
+ /// Returns the number of rows in the constraint system.
+ unsigned size() const { return Constraints.size(); }
+};
+} // namespace llvm
+
+#endif // LLVM_ANALYSIS_CONSTRAINTSYSTEM_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/DDG.h b/contrib/libs/llvm12/include/llvm/Analysis/DDG.h
index 7629ceee4a..280a2d10a6 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/DDG.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/DDG.h
@@ -159,7 +159,7 @@ private:
setKind((InstList.size() == 0 && Input.size() == 1)
? NodeKind::SingleInstruction
: NodeKind::MultiInstruction);
- llvm::append_range(InstList, Input);
+ llvm::append_range(InstList, Input);
}
void appendInstructions(const SimpleDDGNode &Input) {
appendInstructions(Input.getInstructions());
@@ -297,12 +297,12 @@ public:
bool getDependencies(const NodeType &Src, const NodeType &Dst,
DependenceList &Deps) const;
- /// Return a string representing the type of dependence that the dependence
- /// analysis identified between the two given nodes. This function assumes
- /// that there is a memory dependence between the given two nodes.
- const std::string getDependenceString(const NodeType &Src,
- const NodeType &Dst) const;
-
+ /// Return a string representing the type of dependence that the dependence
+ /// analysis identified between the two given nodes. This function assumes
+ /// that there is a memory dependence between the given two nodes.
+ const std::string getDependenceString(const NodeType &Src,
+ const NodeType &Dst) const;
+
protected:
// Name of the graph.
std::string Name;
@@ -476,26 +476,26 @@ bool DependenceGraphInfo<NodeType>::getDependencies(
return !Deps.empty();
}
-template <typename NodeType>
-const std::string
-DependenceGraphInfo<NodeType>::getDependenceString(const NodeType &Src,
- const NodeType &Dst) const {
- std::string Str;
- raw_string_ostream OS(Str);
- DependenceList Deps;
- if (!getDependencies(Src, Dst, Deps))
- return OS.str();
- interleaveComma(Deps, OS, [&](const std::unique_ptr<Dependence> &D) {
- D->dump(OS);
- // Remove the extra new-line character printed by the dump
- // method
- if (OS.str().back() == '\n')
- OS.str().pop_back();
- });
-
- return OS.str();
-}
-
+template <typename NodeType>
+const std::string
+DependenceGraphInfo<NodeType>::getDependenceString(const NodeType &Src,
+ const NodeType &Dst) const {
+ std::string Str;
+ raw_string_ostream OS(Str);
+ DependenceList Deps;
+ if (!getDependencies(Src, Dst, Deps))
+ return OS.str();
+ interleaveComma(Deps, OS, [&](const std::unique_ptr<Dependence> &D) {
+ D->dump(OS);
+ // Remove the extra new-line character printed by the dump
+ // method
+ if (OS.str().back() == '\n')
+ OS.str().pop_back();
+ });
+
+ return OS.str();
+}
+
//===--------------------------------------------------------------------===//
// GraphTraits specializations for the DDG
//===--------------------------------------------------------------------===//
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/DDGPrinter.h b/contrib/libs/llvm12/include/llvm/Analysis/DDGPrinter.h
index bb40d82d46..bffbefdab0 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/DDGPrinter.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/DDGPrinter.h
@@ -1,102 +1,102 @@
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-parameter"
-#endif
-
-//===- llvm/Analysis/DDGPrinter.h -------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-//
-// This file defines the DOT printer for the Data-Dependence Graph (DDG).
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_ANALYSIS_DDGPRINTER_H
-#define LLVM_ANALYSIS_DDGPRINTER_H
-
-#include "llvm/Analysis/DDG.h"
-#include "llvm/Pass.h"
-#include "llvm/Support/DOTGraphTraits.h"
-
-namespace llvm {
-
-//===--------------------------------------------------------------------===//
-// Implementation of DDG DOT Printer for a loop.
-//===--------------------------------------------------------------------===//
-class DDGDotPrinterPass : public PassInfoMixin<DDGDotPrinterPass> {
-public:
- PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
- LoopStandardAnalysisResults &AR, LPMUpdater &U);
-};
-
-//===--------------------------------------------------------------------===//
-// Specialization of DOTGraphTraits.
-//===--------------------------------------------------------------------===//
-template <>
-struct DOTGraphTraits<const DataDependenceGraph *>
- : public DefaultDOTGraphTraits {
-
- DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {}
-
- /// Generate a title for the graph in DOT format
- std::string getGraphName(const DataDependenceGraph *G) {
- assert(G && "expected a valid pointer to the graph.");
- return "DDG for '" + std::string(G->getName()) + "'";
- }
-
- /// Print a DDG node either in concise form (-ddg-dot-only) or
- /// verbose mode (-ddg-dot).
- std::string getNodeLabel(const DDGNode *Node,
- const DataDependenceGraph *Graph);
-
- /// Print attributes of an edge in the DDG graph. If the edge
- /// is a MemoryDependence edge, then detailed dependence info
- /// available from DependenceAnalysis is displayed.
- std::string
- getEdgeAttributes(const DDGNode *Node,
- GraphTraits<const DDGNode *>::ChildIteratorType I,
- const DataDependenceGraph *G);
-
- /// Do not print nodes that are part of a pi-block separately. They
- /// will be printed when their containing pi-block is being printed.
- bool isNodeHidden(const DDGNode *Node, const DataDependenceGraph *G);
-
-private:
- /// Print a DDG node in concise form.
- static std::string getSimpleNodeLabel(const DDGNode *Node,
- const DataDependenceGraph *G);
-
- /// Print a DDG node with more information including containing instructions
- /// and detailed information about the dependence edges.
- static std::string getVerboseNodeLabel(const DDGNode *Node,
- const DataDependenceGraph *G);
-
- /// Print a DDG edge in concise form.
- static std::string getSimpleEdgeAttributes(const DDGNode *Src,
- const DDGEdge *Edge,
- const DataDependenceGraph *G);
-
- /// Print a DDG edge with more information including detailed information
- /// about the dependence edges.
- static std::string getVerboseEdgeAttributes(const DDGNode *Src,
- const DDGEdge *Edge,
- const DataDependenceGraph *G);
-};
-
-using DDGDotGraphTraits = DOTGraphTraits<const DataDependenceGraph *>;
-
-} // namespace llvm
-
-#endif // LLVM_ANALYSIS_DDGPRINTER_H
-
-#ifdef __GNUC__
-#pragma GCC diagnostic pop
-#endif
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/Analysis/DDGPrinter.h -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DOT printer for the Data-Dependence Graph (DDG).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_DDGPRINTER_H
+#define LLVM_ANALYSIS_DDGPRINTER_H
+
+#include "llvm/Analysis/DDG.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/DOTGraphTraits.h"
+
+namespace llvm {
+
+//===--------------------------------------------------------------------===//
+// Implementation of DDG DOT Printer for a loop.
+//===--------------------------------------------------------------------===//
+class DDGDotPrinterPass : public PassInfoMixin<DDGDotPrinterPass> {
+public:
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+
+//===--------------------------------------------------------------------===//
+// Specialization of DOTGraphTraits.
+//===--------------------------------------------------------------------===//
+template <>
+struct DOTGraphTraits<const DataDependenceGraph *>
+ : public DefaultDOTGraphTraits {
+
+ DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {}
+
+ /// Generate a title for the graph in DOT format
+ std::string getGraphName(const DataDependenceGraph *G) {
+ assert(G && "expected a valid pointer to the graph.");
+ return "DDG for '" + std::string(G->getName()) + "'";
+ }
+
+ /// Print a DDG node either in concise form (-ddg-dot-only) or
+ /// verbose mode (-ddg-dot).
+ std::string getNodeLabel(const DDGNode *Node,
+ const DataDependenceGraph *Graph);
+
+ /// Print attributes of an edge in the DDG graph. If the edge
+ /// is a MemoryDependence edge, then detailed dependence info
+ /// available from DependenceAnalysis is displayed.
+ std::string
+ getEdgeAttributes(const DDGNode *Node,
+ GraphTraits<const DDGNode *>::ChildIteratorType I,
+ const DataDependenceGraph *G);
+
+ /// Do not print nodes that are part of a pi-block separately. They
+ /// will be printed when their containing pi-block is being printed.
+ bool isNodeHidden(const DDGNode *Node, const DataDependenceGraph *G);
+
+private:
+ /// Print a DDG node in concise form.
+ static std::string getSimpleNodeLabel(const DDGNode *Node,
+ const DataDependenceGraph *G);
+
+ /// Print a DDG node with more information including containing instructions
+ /// and detailed information about the dependence edges.
+ static std::string getVerboseNodeLabel(const DDGNode *Node,
+ const DataDependenceGraph *G);
+
+ /// Print a DDG edge in concise form.
+ static std::string getSimpleEdgeAttributes(const DDGNode *Src,
+ const DDGEdge *Edge,
+ const DataDependenceGraph *G);
+
+ /// Print a DDG edge with more information including detailed information
+ /// about the dependence edges.
+ static std::string getVerboseEdgeAttributes(const DDGNode *Src,
+ const DDGEdge *Edge,
+ const DataDependenceGraph *G);
+};
+
+using DDGDotGraphTraits = DOTGraphTraits<const DataDependenceGraph *>;
+
+} // namespace llvm
+
+#endif // LLVM_ANALYSIS_DDGPRINTER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/Delinearization.h b/contrib/libs/llvm12/include/llvm/Analysis/Delinearization.h
index 229eed3074..6fb0cb48c0 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/Delinearization.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/Delinearization.h
@@ -1,44 +1,44 @@
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-parameter"
-#endif
-
-//===---- Delinearization.h - MultiDimensional Index Delinearization ------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This implements an analysis pass that tries to delinearize all GEP
-// instructions in all loops using the SCEV analysis functionality. This pass is
-// only used for testing purposes: if your pass needs delinearization, please
-// use the on-demand SCEVAddRecExpr::delinearize() function.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_ANALYSIS_DELINEARIZATION_H
-#define LLVM_ANALYSIS_DELINEARIZATION_H
-
-#include "llvm/IR/PassManager.h"
-#include "llvm/Support/raw_ostream.h"
-
-namespace llvm {
-struct DelinearizationPrinterPass
- : public PassInfoMixin<DelinearizationPrinterPass> {
- explicit DelinearizationPrinterPass(raw_ostream &OS);
- PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
-
-private:
- raw_ostream &OS;
-};
-} // namespace llvm
-
-#endif // LLVM_ANALYSIS_DELINEARIZATION_H
-
-#ifdef __GNUC__
-#pragma GCC diagnostic pop
-#endif
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===---- Delinearization.h - MultiDimensional Index Delinearization ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This implements an analysis pass that tries to delinearize all GEP
+// instructions in all loops using the SCEV analysis functionality. This pass is
+// only used for testing purposes: if your pass needs delinearization, please
+// use the on-demand SCEVAddRecExpr::delinearize() function.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_DELINEARIZATION_H
+#define LLVM_ANALYSIS_DELINEARIZATION_H
+
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+struct DelinearizationPrinterPass
+ : public PassInfoMixin<DelinearizationPrinterPass> {
+ explicit DelinearizationPrinterPass(raw_ostream &OS);
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+private:
+ raw_ostream &OS;
+};
+} // namespace llvm
+
+#endif // LLVM_ANALYSIS_DELINEARIZATION_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/DemandedBits.h b/contrib/libs/llvm12/include/llvm/Analysis/DemandedBits.h
index eb3cd20a42..11046b7557 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/DemandedBits.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/DemandedBits.h
@@ -68,20 +68,20 @@ public:
void print(raw_ostream &OS);
- /// Compute alive bits of one addition operand from alive output and known
- /// operand bits
- static APInt determineLiveOperandBitsAdd(unsigned OperandNo,
- const APInt &AOut,
- const KnownBits &LHS,
- const KnownBits &RHS);
-
- /// Compute alive bits of one subtraction operand from alive output and known
- /// operand bits
- static APInt determineLiveOperandBitsSub(unsigned OperandNo,
- const APInt &AOut,
- const KnownBits &LHS,
- const KnownBits &RHS);
-
+ /// Compute alive bits of one addition operand from alive output and known
+ /// operand bits
+ static APInt determineLiveOperandBitsAdd(unsigned OperandNo,
+ const APInt &AOut,
+ const KnownBits &LHS,
+ const KnownBits &RHS);
+
+ /// Compute alive bits of one subtraction operand from alive output and known
+ /// operand bits
+ static APInt determineLiveOperandBitsSub(unsigned OperandNo,
+ const APInt &AOut,
+ const KnownBits &LHS,
+ const KnownBits &RHS);
+
private:
void performAnalysis();
void determineLiveOperandBits(const Instruction *UserI,
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/DivergenceAnalysis.h b/contrib/libs/llvm12/include/llvm/Analysis/DivergenceAnalysis.h
index b92ae823de..6c84f9034b 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/DivergenceAnalysis.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/DivergenceAnalysis.h
@@ -66,10 +66,10 @@ public:
/// \brief Mark \p UniVal as a value that is always uniform.
void addUniformOverride(const Value &UniVal);
- /// \brief Mark \p DivVal as a value that is always divergent. Will not do so
- /// if `isAlwaysUniform(DivVal)`.
- /// \returns Whether the tracked divergence state of \p DivVal changed.
- bool markDivergent(const Value &DivVal);
+ /// \brief Mark \p DivVal as a value that is always divergent. Will not do so
+ /// if `isAlwaysUniform(DivVal)`.
+ /// \returns Whether the tracked divergence state of \p DivVal changed.
+ bool markDivergent(const Value &DivVal);
/// \brief Propagate divergence to all instructions in the region.
/// Divergence is seeded by calls to \p markDivergent.
@@ -85,36 +85,36 @@ public:
/// \brief Whether \p Val is divergent at its definition.
bool isDivergent(const Value &Val) const;
- /// \brief Whether \p U is divergent. Uses of a uniform value can be
- /// divergent.
+ /// \brief Whether \p U is divergent. Uses of a uniform value can be
+ /// divergent.
bool isDivergentUse(const Use &U) const;
void print(raw_ostream &OS, const Module *) const;
private:
- /// \brief Mark \p Term as divergent and push all Instructions that become
- /// divergent as a result on the worklist.
- void analyzeControlDivergence(const Instruction &Term);
- /// \brief Mark all phi nodes in \p JoinBlock as divergent and push them on
- /// the worklist.
- void taintAndPushPhiNodes(const BasicBlock &JoinBlock);
-
- /// \brief Identify all Instructions that become divergent because \p DivExit
- /// is a divergent loop exit of \p DivLoop. Mark those instructions as
- /// divergent and push them on the worklist.
- void propagateLoopExitDivergence(const BasicBlock &DivExit,
- const Loop &DivLoop);
-
- /// \brief Internal implementation function for propagateLoopExitDivergence.
- void analyzeLoopExitDivergence(const BasicBlock &DivExit,
- const Loop &OuterDivLoop);
-
- /// \brief Mark all instruction as divergent that use a value defined in \p
- /// OuterDivLoop. Push their users on the worklist.
- void analyzeTemporalDivergence(const Instruction &I,
- const Loop &OuterDivLoop);
-
- /// \brief Push all users of \p Val (in the region) to the worklist.
+ /// \brief Mark \p Term as divergent and push all Instructions that become
+ /// divergent as a result on the worklist.
+ void analyzeControlDivergence(const Instruction &Term);
+ /// \brief Mark all phi nodes in \p JoinBlock as divergent and push them on
+ /// the worklist.
+ void taintAndPushPhiNodes(const BasicBlock &JoinBlock);
+
+ /// \brief Identify all Instructions that become divergent because \p DivExit
+ /// is a divergent loop exit of \p DivLoop. Mark those instructions as
+ /// divergent and push them on the worklist.
+ void propagateLoopExitDivergence(const BasicBlock &DivExit,
+ const Loop &DivLoop);
+
+ /// \brief Internal implementation function for propagateLoopExitDivergence.
+ void analyzeLoopExitDivergence(const BasicBlock &DivExit,
+ const Loop &OuterDivLoop);
+
+ /// \brief Mark all instruction as divergent that use a value defined in \p
+ /// OuterDivLoop. Push their users on the worklist.
+ void analyzeTemporalDivergence(const Instruction &I,
+ const Loop &OuterDivLoop);
+
+ /// \brief Push all users of \p Val (in the region) to the worklist.
void pushUsers(const Value &I);
/// \brief Whether \p Val is divergent when read in \p ObservingBlock.
@@ -125,7 +125,7 @@ private:
///
/// (see markBlockJoinDivergent).
bool isJoinDivergent(const BasicBlock &Block) const {
- return DivergentJoinBlocks.contains(&Block);
+ return DivergentJoinBlocks.contains(&Block);
}
private:
@@ -150,7 +150,7 @@ private:
DenseSet<const Value *> UniformOverrides;
// Blocks with joining divergent control from different predecessors.
- DenseSet<const BasicBlock *> DivergentJoinBlocks; // FIXME Deprecated
+ DenseSet<const BasicBlock *> DivergentJoinBlocks; // FIXME Deprecated
// Detected/marked divergent values.
DenseSet<const Value *> DivergentValues;
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/EHPersonalities.h b/contrib/libs/llvm12/include/llvm/Analysis/EHPersonalities.h
index 2961af091e..b294409778 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/EHPersonalities.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/EHPersonalities.h
@@ -35,12 +35,12 @@ enum class EHPersonality {
GNU_CXX_SjLj,
GNU_ObjC,
MSVC_X86SEH,
- MSVC_TableSEH,
+ MSVC_TableSEH,
MSVC_CXX,
CoreCLR,
Rust,
- Wasm_CXX,
- XL_CXX
+ Wasm_CXX,
+ XL_CXX
};
/// See if the given exception handling personality function is one
@@ -59,7 +59,7 @@ inline bool isAsynchronousEHPersonality(EHPersonality Pers) {
// unknown personalities don't catch asynch exceptions.
switch (Pers) {
case EHPersonality::MSVC_X86SEH:
- case EHPersonality::MSVC_TableSEH:
+ case EHPersonality::MSVC_TableSEH:
return true;
default:
return false;
@@ -73,7 +73,7 @@ inline bool isFuncletEHPersonality(EHPersonality Pers) {
switch (Pers) {
case EHPersonality::MSVC_CXX:
case EHPersonality::MSVC_X86SEH:
- case EHPersonality::MSVC_TableSEH:
+ case EHPersonality::MSVC_TableSEH:
case EHPersonality::CoreCLR:
return true;
default:
@@ -88,7 +88,7 @@ inline bool isScopedEHPersonality(EHPersonality Pers) {
switch (Pers) {
case EHPersonality::MSVC_CXX:
case EHPersonality::MSVC_X86SEH:
- case EHPersonality::MSVC_TableSEH:
+ case EHPersonality::MSVC_TableSEH:
case EHPersonality::CoreCLR:
case EHPersonality::Wasm_CXX:
return true;
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/FunctionPropertiesAnalysis.h b/contrib/libs/llvm12/include/llvm/Analysis/FunctionPropertiesAnalysis.h
index 56a25af596..81bdf6fa7b 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/FunctionPropertiesAnalysis.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/FunctionPropertiesAnalysis.h
@@ -1,97 +1,97 @@
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-parameter"
-#endif
-
-//=- FunctionPropertiesAnalysis.h - Function Properties Analysis --*- C++ -*-=//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the FunctionPropertiesInfo and FunctionPropertiesAnalysis
-// classes used to extract function properties.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_FUNCTIONPROPERTIESANALYSIS_H_
-#define LLVM_FUNCTIONPROPERTIESANALYSIS_H_
-
-#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/IR/PassManager.h"
-
-namespace llvm {
-class Function;
-
-class FunctionPropertiesInfo {
-public:
- static FunctionPropertiesInfo getFunctionPropertiesInfo(const Function &F,
- const LoopInfo &LI);
-
- void print(raw_ostream &OS) const;
-
- /// Number of basic blocks
- int64_t BasicBlockCount = 0;
-
- /// Number of blocks reached from a conditional instruction, or that are
- /// 'cases' of a SwitchInstr.
- // FIXME: We may want to replace this with a more meaningful metric, like
- // number of conditionally executed blocks:
- // 'if (a) s();' would be counted here as 2 blocks, just like
- // 'if (a) s(); else s2(); s3();' would.
- int64_t BlocksReachedFromConditionalInstruction = 0;
-
- /// Number of uses of this function, plus 1 if the function is callable
- /// outside the module.
- int64_t Uses = 0;
-
- /// Number of direct calls made from this function to other functions
- /// defined in this module.
- int64_t DirectCallsToDefinedFunctions = 0;
-
- // Load Instruction Count
- int64_t LoadInstCount = 0;
-
- // Store Instruction Count
- int64_t StoreInstCount = 0;
-
- // Maximum Loop Depth in the Function
- int64_t MaxLoopDepth = 0;
-
- // Number of Top Level Loops in the Function
- int64_t TopLevelLoopCount = 0;
-};
-
-// Analysis pass
-class FunctionPropertiesAnalysis
- : public AnalysisInfoMixin<FunctionPropertiesAnalysis> {
-
-public:
- static AnalysisKey Key;
-
- using Result = FunctionPropertiesInfo;
-
- Result run(Function &F, FunctionAnalysisManager &FAM);
-};
-
-/// Printer pass for the FunctionPropertiesAnalysis results.
-class FunctionPropertiesPrinterPass
- : public PassInfoMixin<FunctionPropertiesPrinterPass> {
- raw_ostream &OS;
-
-public:
- explicit FunctionPropertiesPrinterPass(raw_ostream &OS) : OS(OS) {}
-
- PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
-};
-
-} // namespace llvm
-#endif // LLVM_FUNCTIONPROPERTIESANALYSIS_H_
-
-#ifdef __GNUC__
-#pragma GCC diagnostic pop
-#endif
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//=- FunctionPropertiesAnalysis.h - Function Properties Analysis --*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the FunctionPropertiesInfo and FunctionPropertiesAnalysis
+// classes used to extract function properties.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUNCTIONPROPERTIESANALYSIS_H_
+#define LLVM_FUNCTIONPROPERTIESANALYSIS_H_
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class Function;
+
+class FunctionPropertiesInfo {
+public:
+ static FunctionPropertiesInfo getFunctionPropertiesInfo(const Function &F,
+ const LoopInfo &LI);
+
+ void print(raw_ostream &OS) const;
+
+ /// Number of basic blocks
+ int64_t BasicBlockCount = 0;
+
+ /// Number of blocks reached from a conditional instruction, or that are
+ /// 'cases' of a SwitchInstr.
+ // FIXME: We may want to replace this with a more meaningful metric, like
+ // number of conditionally executed blocks:
+ // 'if (a) s();' would be counted here as 2 blocks, just like
+ // 'if (a) s(); else s2(); s3();' would.
+ int64_t BlocksReachedFromConditionalInstruction = 0;
+
+ /// Number of uses of this function, plus 1 if the function is callable
+ /// outside the module.
+ int64_t Uses = 0;
+
+ /// Number of direct calls made from this function to other functions
+ /// defined in this module.
+ int64_t DirectCallsToDefinedFunctions = 0;
+
+ // Load Instruction Count
+ int64_t LoadInstCount = 0;
+
+ // Store Instruction Count
+ int64_t StoreInstCount = 0;
+
+ // Maximum Loop Depth in the Function
+ int64_t MaxLoopDepth = 0;
+
+ // Number of Top Level Loops in the Function
+ int64_t TopLevelLoopCount = 0;
+};
+
+// Analysis pass
+class FunctionPropertiesAnalysis
+ : public AnalysisInfoMixin<FunctionPropertiesAnalysis> {
+
+public:
+ static AnalysisKey Key;
+
+ using Result = FunctionPropertiesInfo;
+
+ Result run(Function &F, FunctionAnalysisManager &FAM);
+};
+
+/// Printer pass for the FunctionPropertiesAnalysis results.
+class FunctionPropertiesPrinterPass
+ : public PassInfoMixin<FunctionPropertiesPrinterPass> {
+ raw_ostream &OS;
+
+public:
+ explicit FunctionPropertiesPrinterPass(raw_ostream &OS) : OS(OS) {}
+
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // namespace llvm
+#endif // LLVM_FUNCTIONPROPERTIESANALYSIS_H_
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/IRSimilarityIdentifier.h b/contrib/libs/llvm12/include/llvm/Analysis/IRSimilarityIdentifier.h
index 6c765c5c00..4544bfc8b9 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/IRSimilarityIdentifier.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/IRSimilarityIdentifier.h
@@ -1,800 +1,800 @@
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-parameter"
-#endif
-
-//===- IRSimilarityIdentifier.h - Find similarity in a module --------------==//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// \file
-// Interface file for the IRSimilarityIdentifier for identifying similarities in
-// IR including the IRInstructionMapper, which maps an Instruction to unsigned
-// integers.
-//
-// Two sequences of instructions are called "similar" if they perform the same
-// series of operations for all inputs.
-//
-// \code
-// %1 = add i32 %a, 10
-// %2 = add i32 %a, %1
-// %3 = icmp slt icmp %1, %2
-// \endcode
-//
-// and
-//
-// \code
-// %1 = add i32 11, %a
-// %2 = sub i32 %a, %1
-// %3 = icmp sgt icmp %2, %1
-// \endcode
-//
-// ultimately have the same result, even if the inputs, and structure are
-// slightly different.
-//
-// For instructions, we do not worry about operands that do not have fixed
-// semantic meaning to the program. We consider the opcode that the instruction
-// has, the types, parameters, and extra information such as the function name,
-// or comparison predicate. These are used to create a hash to map instructions
-// to integers to be used in similarity matching in sequences of instructions
-//
-// Terminology:
-// An IRSimilarityCandidate is a region of IRInstructionData (wrapped
-// Instructions), usually used to denote a region of similarity has been found.
-//
-// A SimilarityGroup is a set of IRSimilarityCandidates that are structurally
-// similar to one another.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_ANALYSIS_IRSIMILARITYIDENTIFIER_H
-#define LLVM_ANALYSIS_IRSIMILARITYIDENTIFIER_H
-
-#include "llvm/IR/InstVisitor.h"
-#include "llvm/IR/Instructions.h"
-#include "llvm/IR/Module.h"
-#include "llvm/IR/PassManager.h"
-#include "llvm/Pass.h"
-#include "llvm/Support/Allocator.h"
-
-namespace llvm {
-namespace IRSimilarity {
-
-struct IRInstructionDataList;
-
-/// This represents what is and is not supported when finding similarity in
-/// Instructions.
-///
-/// Legal Instructions are considered when looking at similarity between
-/// Instructions.
-///
-/// Illegal Instructions cannot be considered when looking for similarity
-/// between Instructions. They act as boundaries between similarity regions.
-///
-/// Invisible Instructions are skipped over during analysis.
-// TODO: Shared with MachineOutliner
-enum InstrType { Legal, Illegal, Invisible };
-
-/// This provides the utilities for hashing an Instruction to an unsigned
-/// integer. Two IRInstructionDatas produce the same hash value when their
-/// underlying Instructions perform the same operation (even if they don't have
-/// the same input operands.)
-/// As a more concrete example, consider the following:
-///
-/// \code
-/// %add1 = add i32 %a, %b
-/// %add2 = add i32 %c, %d
-/// %add3 = add i64 %e, %f
-/// \endcode
-///
-// Then the IRInstructionData wrappers for these Instructions may be hashed like
-/// so:
-///
-/// \code
-/// ; These two adds have the same types and operand types, so they hash to the
-/// ; same number.
-/// %add1 = add i32 %a, %b ; Hash: 1
-/// %add2 = add i32 %c, %d ; Hash: 1
-/// ; This add produces an i64. This differentiates it from %add1 and %add2. So,
-/// ; it hashes to a different number.
-/// %add3 = add i64 %e, %f; Hash: 2
-/// \endcode
-///
-///
-/// This hashing scheme will be used to represent the program as a very long
-/// string. This string can then be placed in a data structure which can be used
-/// for similarity queries.
-///
-/// TODO: Handle types of Instructions which can be equal even with different
-/// operands. (E.g. comparisons with swapped predicates.)
-/// TODO: Handle CallInsts, which are only checked for function type
-/// by \ref isSameOperationAs.
-/// TODO: Handle GetElementPtrInsts, as some of the operands have to be the
-/// exact same, and some do not.
-struct IRInstructionData : ilist_node<IRInstructionData> {
-
- /// The source Instruction that is being wrapped.
- Instruction *Inst = nullptr;
- /// The values of the operands in the Instruction.
- SmallVector<Value *, 4> OperVals;
- /// The legality of the wrapped instruction. This is informed by InstrType,
- /// and is used when checking when two instructions are considered similar.
- /// If either instruction is not legal, the instructions are automatically not
- /// considered similar.
- bool Legal;
-
- /// This is only relevant if we are wrapping a CmpInst where we needed to
- /// change the predicate of a compare instruction from a greater than form
- /// to a less than form. It is None otherwise.
- Optional<CmpInst::Predicate> RevisedPredicate;
-
- /// Gather the information that is difficult to gather for an Instruction, or
- /// is changed. i.e. the operands of an Instruction and the Types of those
- /// operands. This extra information allows for similarity matching to make
- /// assertions that allow for more flexibility when checking for whether an
- /// Instruction performs the same operation.
- IRInstructionData(Instruction &I, bool Legality, IRInstructionDataList &IDL);
-
- /// Get the predicate that the compare instruction is using for hashing the
- /// instruction. the IRInstructionData must be wrapping a CmpInst.
- CmpInst::Predicate getPredicate() const;
-
- /// A function that swaps the predicates to their less than form if they are
- /// in a greater than form. Otherwise, the predicate is unchanged.
- ///
- /// \param CI - The comparison operation to find a consistent preidcate for.
- /// \return the consistent comparison predicate.
- static CmpInst::Predicate predicateForConsistency(CmpInst *CI);
-
- /// Hashes \p Value based on its opcode, types, and operand types.
- /// Two IRInstructionData instances produce the same hash when they perform
- /// the same operation.
- ///
- /// As a simple example, consider the following instructions.
- ///
- /// \code
- /// %add1 = add i32 %x1, %y1
- /// %add2 = add i32 %x2, %y2
- ///
- /// %sub = sub i32 %x1, %y1
- ///
- /// %add_i64 = add i64 %x2, %y2
- /// \endcode
- ///
- /// Because the first two adds operate the same types, and are performing the
- /// same action, they will be hashed to the same value.
- ///
- /// However, the subtraction instruction is not the same as an addition, and
- /// will be hashed to a different value.
- ///
- /// Finally, the last add has a different type compared to the first two add
- /// instructions, so it will also be hashed to a different value that any of
- /// the previous instructions.
- ///
- /// \param [in] ID - The IRInstructionData instance to be hashed.
- /// \returns A hash_value of the IRInstructionData.
- friend hash_code hash_value(const IRInstructionData &ID) {
- SmallVector<Type *, 4> OperTypes;
- for (Value *V : ID.OperVals)
- OperTypes.push_back(V->getType());
-
- if (isa<CmpInst>(ID.Inst))
- return llvm::hash_combine(
- llvm::hash_value(ID.Inst->getOpcode()),
- llvm::hash_value(ID.Inst->getType()),
- llvm::hash_value(ID.getPredicate()),
- llvm::hash_combine_range(OperTypes.begin(), OperTypes.end()));
- else if (CallInst *CI = dyn_cast<CallInst>(ID.Inst))
- return llvm::hash_combine(
- llvm::hash_value(ID.Inst->getOpcode()),
- llvm::hash_value(ID.Inst->getType()),
- llvm::hash_value(CI->getCalledFunction()->getName().str()),
- llvm::hash_combine_range(OperTypes.begin(), OperTypes.end()));
- return llvm::hash_combine(
- llvm::hash_value(ID.Inst->getOpcode()),
- llvm::hash_value(ID.Inst->getType()),
- llvm::hash_combine_range(OperTypes.begin(), OperTypes.end()));
- }
-
- IRInstructionDataList *IDL = nullptr;
-};
-
-struct IRInstructionDataList : simple_ilist<IRInstructionData> {};
-
-/// Compare one IRInstructionData class to another IRInstructionData class for
-/// whether they are performing a the same operation, and can mapped to the
-/// same value. For regular instructions if the hash value is the same, then
-/// they will also be close.
-///
-/// \param A - The first IRInstructionData class to compare
-/// \param B - The second IRInstructionData class to compare
-/// \returns true if \p A and \p B are similar enough to be mapped to the same
-/// value.
-bool isClose(const IRInstructionData &A, const IRInstructionData &B);
-
-struct IRInstructionDataTraits : DenseMapInfo<IRInstructionData *> {
- static inline IRInstructionData *getEmptyKey() { return nullptr; }
- static inline IRInstructionData *getTombstoneKey() {
- return reinterpret_cast<IRInstructionData *>(-1);
- }
-
- static unsigned getHashValue(const IRInstructionData *E) {
- using llvm::hash_value;
- assert(E && "IRInstructionData is a nullptr?");
- return hash_value(*E);
- }
-
- static bool isEqual(const IRInstructionData *LHS,
- const IRInstructionData *RHS) {
- if (RHS == getEmptyKey() || RHS == getTombstoneKey() ||
- LHS == getEmptyKey() || LHS == getTombstoneKey())
- return LHS == RHS;
-
- assert(LHS && RHS && "nullptr should have been caught by getEmptyKey?");
- return isClose(*LHS, *RHS);
- }
-};
-
-/// Helper struct for converting the Instructions in a Module into a vector of
-/// unsigned integers. This vector of unsigned integers can be thought of as a
-/// "numeric string". This numeric string can then be queried by, for example,
-/// data structures that find repeated substrings.
-///
-/// This hashing is done per BasicBlock in the module. To hash Instructions
-/// based off of their operations, each Instruction is wrapped in an
-/// IRInstructionData struct. The unsigned integer for an IRInstructionData
-/// depends on:
-/// - The hash provided by the IRInstructionData.
-/// - Which member of InstrType the IRInstructionData is classified as.
-// See InstrType for more details on the possible classifications, and how they
-// manifest in the numeric string.
-///
-/// The numeric string for an individual BasicBlock is terminated by an unique
-/// unsigned integer. This prevents data structures which rely on repetition
-/// from matching across BasicBlocks. (For example, the SuffixTree.)
-/// As a concrete example, if we have the following two BasicBlocks:
-/// \code
-/// bb0:
-/// %add1 = add i32 %a, %b
-/// %add2 = add i32 %c, %d
-/// %add3 = add i64 %e, %f
-/// bb1:
-/// %sub = sub i32 %c, %d
-/// \endcode
-/// We may hash the Instructions like this (via IRInstructionData):
-/// \code
-/// bb0:
-/// %add1 = add i32 %a, %b ; Hash: 1
-/// %add2 = add i32 %c, %d; Hash: 1
-/// %add3 = add i64 %e, %f; Hash: 2
-/// bb1:
-/// %sub = sub i32 %c, %d; Hash: 3
-/// %add4 = add i32 %c, %d ; Hash: 1
-/// \endcode
-/// And produce a "numeric string representation" like so:
-/// 1, 1, 2, unique_integer_1, 3, 1, unique_integer_2
-///
-/// TODO: This is very similar to the MachineOutliner, and should be
-/// consolidated into the same interface.
-struct IRInstructionMapper {
- /// The starting illegal instruction number to map to.
- ///
- /// Set to -3 for compatibility with DenseMapInfo<unsigned>.
- unsigned IllegalInstrNumber = static_cast<unsigned>(-3);
-
- /// The next available integer to assign to a legal Instruction to.
- unsigned LegalInstrNumber = 0;
-
- /// Correspondence from IRInstructionData to unsigned integers.
- DenseMap<IRInstructionData *, unsigned, IRInstructionDataTraits>
- InstructionIntegerMap;
-
- /// Set if we added an illegal number in the previous step.
- /// Since each illegal number is unique, we only need one of them between
- /// each range of legal numbers. This lets us make sure we don't add more
- /// than one illegal number per range.
- bool AddedIllegalLastTime = false;
-
- /// Marks whether we found a illegal instruction in the previous step.
- bool CanCombineWithPrevInstr = false;
-
- /// Marks whether we have found a set of instructions that is long enough
- /// to be considered for similarity.
- bool HaveLegalRange = false;
-
- /// This allocator pointer is in charge of holding on to the IRInstructionData
- /// so it is not deallocated until whatever external tool is using it is done
- /// with the information.
- SpecificBumpPtrAllocator<IRInstructionData> *InstDataAllocator = nullptr;
-
- /// This allocator pointer is in charge of creating the IRInstructionDataList
- /// so it is not deallocated until whatever external tool is using it is done
- /// with the information.
- SpecificBumpPtrAllocator<IRInstructionDataList> *IDLAllocator = nullptr;
-
- /// Get an allocated IRInstructionData struct using the InstDataAllocator.
- ///
- /// \param I - The Instruction to wrap with IRInstructionData.
- /// \param Legality - A boolean value that is true if the instruction is to
- /// be considered for similarity, and false if not.
- /// \param IDL - The InstructionDataList that the IRInstructionData is
- /// inserted into.
- /// \returns An allocated IRInstructionData struct.
- IRInstructionData *allocateIRInstructionData(Instruction &I, bool Legality,
- IRInstructionDataList &IDL);
-
- /// Get an allocated IRInstructionDataList object using the IDLAllocator.
- ///
- /// \returns An allocated IRInstructionDataList object.
- IRInstructionDataList *allocateIRInstructionDataList();
-
- IRInstructionDataList *IDL = nullptr;
-
- /// Maps the Instructions in a BasicBlock \p BB to legal or illegal integers
- /// determined by \p InstrType. Two Instructions are mapped to the same value
- /// if they are close as defined by the InstructionData class above.
- ///
- /// \param [in] BB - The BasicBlock to be mapped to integers.
- /// \param [in,out] InstrList - Vector of IRInstructionData to append to.
- /// \param [in,out] IntegerMapping - Vector of unsigned integers to append to.
- void convertToUnsignedVec(BasicBlock &BB,
- std::vector<IRInstructionData *> &InstrList,
- std::vector<unsigned> &IntegerMapping);
-
- /// Maps an Instruction to a legal integer.
- ///
- /// \param [in] It - The Instruction to be mapped to an integer.
- /// \param [in,out] IntegerMappingForBB - Vector of unsigned integers to
- /// append to.
- /// \param [in,out] InstrListForBB - Vector of InstructionData to append to.
- /// \returns The integer \p It was mapped to.
- unsigned mapToLegalUnsigned(BasicBlock::iterator &It,
- std::vector<unsigned> &IntegerMappingForBB,
- std::vector<IRInstructionData *> &InstrListForBB);
-
- /// Maps an Instruction to an illegal integer.
- ///
- /// \param [in] It - The \p Instruction to be mapped to an integer.
- /// \param [in,out] IntegerMappingForBB - Vector of unsigned integers to
- /// append to.
- /// \param [in,out] InstrListForBB - Vector of IRInstructionData to append to.
- /// \param End - true if creating a dummy IRInstructionData at the end of a
- /// basic block.
- /// \returns The integer \p It was mapped to.
- unsigned mapToIllegalUnsigned(
- BasicBlock::iterator &It, std::vector<unsigned> &IntegerMappingForBB,
- std::vector<IRInstructionData *> &InstrListForBB, bool End = false);
-
- IRInstructionMapper(SpecificBumpPtrAllocator<IRInstructionData> *IDA,
- SpecificBumpPtrAllocator<IRInstructionDataList> *IDLA)
- : InstDataAllocator(IDA), IDLAllocator(IDLA) {
- // Make sure that the implementation of DenseMapInfo<unsigned> hasn't
- // changed.
- assert(DenseMapInfo<unsigned>::getEmptyKey() == static_cast<unsigned>(-1) &&
- "DenseMapInfo<unsigned>'s empty key isn't -1!");
- assert(DenseMapInfo<unsigned>::getTombstoneKey() ==
- static_cast<unsigned>(-2) &&
- "DenseMapInfo<unsigned>'s tombstone key isn't -2!");
-
- IDL = new (IDLAllocator->Allocate())
- IRInstructionDataList();
- }
-
- /// Custom InstVisitor to classify different instructions for whether it can
- /// be analyzed for similarity.
- struct InstructionClassification
- : public InstVisitor<InstructionClassification, InstrType> {
- InstructionClassification() {}
-
- // TODO: Determine a scheme to resolve when the label is similar enough.
- InstrType visitBranchInst(BranchInst &BI) { return Illegal; }
- // TODO: Determine a scheme to resolve when the labels are similar enough.
- InstrType visitPHINode(PHINode &PN) { return Illegal; }
- // TODO: Handle allocas.
- InstrType visitAllocaInst(AllocaInst &AI) { return Illegal; }
- // We exclude variable argument instructions since variable arguments
- // requires extra checking of the argument list.
- InstrType visitVAArgInst(VAArgInst &VI) { return Illegal; }
- // We exclude all exception handling cases since they are so context
- // dependent.
- InstrType visitLandingPadInst(LandingPadInst &LPI) { return Illegal; }
- InstrType visitFuncletPadInst(FuncletPadInst &FPI) { return Illegal; }
- // DebugInfo should be included in the regions, but should not be
- // analyzed for similarity as it has no bearing on the outcome of the
- // program.
- InstrType visitDbgInfoIntrinsic(DbgInfoIntrinsic &DII) { return Invisible; }
- // TODO: Handle specific intrinsics.
- InstrType visitIntrinsicInst(IntrinsicInst &II) { return Illegal; }
- // We only allow call instructions where the function has a name and
- // is not an indirect call.
- InstrType visitCallInst(CallInst &CI) {
- Function *F = CI.getCalledFunction();
- if (!F || CI.isIndirectCall() || !F->hasName())
- return Illegal;
- return Legal;
- }
- // TODO: We do not current handle similarity that changes the control flow.
- InstrType visitInvokeInst(InvokeInst &II) { return Illegal; }
- // TODO: We do not current handle similarity that changes the control flow.
- InstrType visitCallBrInst(CallBrInst &CBI) { return Illegal; }
- // TODO: Handle interblock similarity.
- InstrType visitTerminator(Instruction &I) { return Illegal; }
- InstrType visitInstruction(Instruction &I) { return Legal; }
- };
-
- /// Maps an Instruction to a member of InstrType.
- InstructionClassification InstClassifier;
-};
-
-/// This is a class that wraps a range of IRInstructionData from one point to
-/// another in the vector of IRInstructionData, which is a region of the
-/// program. It is also responsible for defining the structure within this
-/// region of instructions.
-///
-/// The structure of a region is defined through a value numbering system
-/// assigned to each unique value in a region at the creation of the
-/// IRSimilarityCandidate.
-///
-/// For example, for each Instruction we add a mapping for each new
-/// value seen in that Instruction.
-/// IR: Mapping Added:
-/// %add1 = add i32 %a, c1 %add1 -> 3, %a -> 1, c1 -> 2
-/// %add2 = add i32 %a, %1 %add2 -> 4
-/// %add3 = add i32 c2, c1 %add3 -> 6, c2 -> 5
-///
-/// We can compare IRSimilarityCandidates against one another.
-/// The \ref isSimilar function compares each IRInstructionData against one
-/// another and if we have the same sequences of IRInstructionData that would
-/// create the same hash, we have similar IRSimilarityCandidates.
-///
-/// We can also compare the structure of IRSimilarityCandidates. If we can
-/// create a mapping of registers in the region contained by one
-/// IRSimilarityCandidate to the region contained by different
-/// IRSimilarityCandidate, they can be considered structurally similar.
-///
-/// IRSimilarityCandidate1: IRSimilarityCandidate2:
-/// %add1 = add i32 %a, %b %add1 = add i32 %d, %e
-/// %add2 = add i32 %a, %c %add2 = add i32 %d, %f
-/// %add3 = add i32 c1, c2 %add3 = add i32 c3, c4
-///
-/// Can have the following mapping from candidate to candidate of:
-/// %a -> %d, %b -> %e, %c -> %f, c1 -> c3, c2 -> c4
-/// and can be considered similar.
-///
-/// IRSimilarityCandidate1: IRSimilarityCandidate2:
-/// %add1 = add i32 %a, %b %add1 = add i32 %d, c4
-/// %add2 = add i32 %a, %c %add2 = add i32 %d, %f
-/// %add3 = add i32 c1, c2 %add3 = add i32 c3, c4
-///
-/// We cannot create the same mapping since the use of c4 is not used in the
-/// same way as %b or c2.
-class IRSimilarityCandidate {
-private:
- /// The start index of this IRSimilarityCandidate in the instruction list.
- unsigned StartIdx = 0;
-
- /// The number of instructions in this IRSimilarityCandidate.
- unsigned Len = 0;
-
- /// The first instruction in this IRSimilarityCandidate.
- IRInstructionData *FirstInst = nullptr;
-
- /// The last instruction in this IRSimilarityCandidate.
- IRInstructionData *LastInst = nullptr;
-
- /// Global Value Numbering structures
- /// @{
- /// Stores the mapping of the value to the number assigned to it in the
- /// IRSimilarityCandidate.
- DenseMap<Value *, unsigned> ValueToNumber;
- /// Stores the mapping of the number to the value assigned this number.
- DenseMap<unsigned, Value *> NumberToValue;
- /// @}
-
-public:
- /// \param StartIdx - The starting location of the region.
- /// \param Len - The length of the region.
- /// \param FirstInstIt - The starting IRInstructionData of the region.
- /// \param LastInstIt - The ending IRInstructionData of the region.
- IRSimilarityCandidate(unsigned StartIdx, unsigned Len,
- IRInstructionData *FirstInstIt,
- IRInstructionData *LastInstIt);
-
- /// \param A - The first IRInstructionCandidate to compare.
- /// \param B - The second IRInstructionCandidate to compare.
- /// \returns True when every IRInstructionData in \p A is similar to every
- /// IRInstructionData in \p B.
- static bool isSimilar(const IRSimilarityCandidate &A,
- const IRSimilarityCandidate &B);
-
- /// \param A - The first IRInstructionCandidate to compare.
- /// \param B - The second IRInstructionCandidate to compare.
- /// \returns True when every IRInstructionData in \p A is structurally similar
- /// to \p B.
- static bool compareStructure(const IRSimilarityCandidate &A,
- const IRSimilarityCandidate &B);
-
- struct OperandMapping {
- /// The IRSimilarityCandidate that holds the instruction the OperVals were
- /// pulled from.
- const IRSimilarityCandidate &IRSC;
-
- /// The operand values to be analyzed.
- ArrayRef<Value *> &OperVals;
-
- /// The current mapping of global value numbers from one IRSimilarityCandidate
- /// to another IRSimilarityCandidate.
- DenseMap<unsigned, DenseSet<unsigned>> &ValueNumberMapping;
- };
-
- /// Compare the operands in \p A and \p B and check that the current mapping
- /// of global value numbers from \p A to \p B and \p B to \A is consistent.
- ///
- /// \param A - The first IRInstructionCandidate, operand values, and current
- /// operand mappings to compare.
- /// \param B - The second IRInstructionCandidate, operand values, and current
- /// operand mappings to compare.
- /// \returns true if the IRSimilarityCandidates operands are compatible.
- static bool compareNonCommutativeOperandMapping(OperandMapping A,
- OperandMapping B);
-
- /// Compare the operands in \p A and \p B and check that the current mapping
- /// of global value numbers from \p A to \p B and \p B to \A is consistent
- /// given that the operands are commutative.
- ///
- /// \param A - The first IRInstructionCandidate, operand values, and current
- /// operand mappings to compare.
- /// \param B - The second IRInstructionCandidate, operand values, and current
- /// operand mappings to compare.
- /// \returns true if the IRSimilarityCandidates operands are compatible.
- static bool compareCommutativeOperandMapping(OperandMapping A,
- OperandMapping B);
-
- /// Compare the start and end indices of the two IRSimilarityCandidates for
- /// whether they overlap. If the start instruction of one
- /// IRSimilarityCandidate is less than the end instruction of the other, and
- /// the start instruction of one is greater than the start instruction of the
- /// other, they overlap.
- ///
- /// \returns true if the IRSimilarityCandidates do not have overlapping
- /// instructions.
- static bool overlap(const IRSimilarityCandidate &A,
- const IRSimilarityCandidate &B);
-
- /// \returns the number of instructions in this Candidate.
- unsigned getLength() const { return Len; }
-
- /// \returns the start index of this IRSimilarityCandidate.
- unsigned getStartIdx() const { return StartIdx; }
-
- /// \returns the end index of this IRSimilarityCandidate.
- unsigned getEndIdx() const { return StartIdx + Len - 1; }
-
- /// \returns The first IRInstructionData.
- IRInstructionData *front() const { return FirstInst; }
- /// \returns The last IRInstructionData.
- IRInstructionData *back() const { return LastInst; }
-
- /// \returns The first Instruction.
- Instruction *frontInstruction() { return FirstInst->Inst; }
- /// \returns The last Instruction
- Instruction *backInstruction() { return LastInst->Inst; }
-
- /// \returns The BasicBlock the IRSimilarityCandidate starts in.
- BasicBlock *getStartBB() { return FirstInst->Inst->getParent(); }
- /// \returns The BasicBlock the IRSimilarityCandidate ends in.
- BasicBlock *getEndBB() { return LastInst->Inst->getParent(); }
-
- /// \returns The Function that the IRSimilarityCandidate is located in.
- Function *getFunction() { return getStartBB()->getParent(); }
-
- /// Finds the positive number associated with \p V if it has been mapped.
- /// \param [in] V - the Value to find.
- /// \returns The positive number corresponding to the value.
- /// \returns None if not present.
- Optional<unsigned> getGVN(Value *V) {
- assert(V != nullptr && "Value is a nullptr?");
- DenseMap<Value *, unsigned>::iterator VNIt = ValueToNumber.find(V);
- if (VNIt == ValueToNumber.end())
- return None;
- return VNIt->second;
- }
-
- /// Finds the Value associate with \p Num if it exists.
- /// \param [in] Num - the number to find.
- /// \returns The Value associated with the number.
- /// \returns None if not present.
- Optional<Value *> fromGVN(unsigned Num) {
- DenseMap<unsigned, Value *>::iterator VNIt = NumberToValue.find(Num);
- if (VNIt == NumberToValue.end())
- return None;
- assert(VNIt->second != nullptr && "Found value is a nullptr!");
- return VNIt->second;
- }
-
- /// \param RHS -The IRSimilarityCandidate to compare against
- /// \returns true if the IRSimilarityCandidate is occurs after the
- /// IRSimilarityCandidate in the program.
- bool operator<(const IRSimilarityCandidate &RHS) const {
- return getStartIdx() > RHS.getStartIdx();
- }
-
- using iterator = IRInstructionDataList::iterator;
- iterator begin() const { return iterator(front()); }
- iterator end() const { return std::next(iterator(back())); }
-};
-
-typedef std::vector<IRSimilarityCandidate> SimilarityGroup;
-typedef std::vector<SimilarityGroup> SimilarityGroupList;
-
-/// This class puts all the pieces of the IRInstructionData,
-/// IRInstructionMapper, IRSimilarityCandidate together.
-///
-/// It first feeds the Module or vector of Modules into the IRInstructionMapper,
-/// and puts all the mapped instructions into a single long list of
-/// IRInstructionData.
-///
-/// The list of unsigned integers is given to the Suffix Tree or similar data
-/// structure to find repeated subsequences. We construct an
-/// IRSimilarityCandidate for each instance of the subsequence. We compare them
-/// against one another since These repeated subsequences can have different
-/// structure. For each different kind of structure found, we create a
-/// similarity group.
-///
-/// If we had four IRSimilarityCandidates A, B, C, and D where A, B and D are
-/// structurally similar to one another, while C is different we would have two
-/// SimilarityGroups:
-///
-/// SimilarityGroup 1: SimilarityGroup 2
-/// A, B, D C
-///
-/// A list of the different similarity groups is then returned after
-/// analyzing the module.
-class IRSimilarityIdentifier {
-public:
- IRSimilarityIdentifier()
- : Mapper(&InstDataAllocator, &InstDataListAllocator) {}
-
- /// \param M the module to find similarity in.
- explicit IRSimilarityIdentifier(Module &M)
- : Mapper(&InstDataAllocator, &InstDataListAllocator) {
- findSimilarity(M);
- }
-
-private:
- /// Map the instructions in the module to unsigned integers, using mapping
- /// already present in the Mapper if possible.
- ///
- /// \param [in] M Module - To map to integers.
- /// \param [in,out] InstrList - The vector to append IRInstructionData to.
- /// \param [in,out] IntegerMapping - The vector to append integers to.
- void populateMapper(Module &M, std::vector<IRInstructionData *> &InstrList,
- std::vector<unsigned> &IntegerMapping);
-
- /// Map the instructions in the modules vector to unsigned integers, using
- /// mapping already present in the mapper if possible.
- ///
- /// \param [in] Modules - The list of modules to use to populate the mapper
- /// \param [in,out] InstrList - The vector to append IRInstructionData to.
- /// \param [in,out] IntegerMapping - The vector to append integers to.
- void populateMapper(ArrayRef<std::unique_ptr<Module>> &Modules,
- std::vector<IRInstructionData *> &InstrList,
- std::vector<unsigned> &IntegerMapping);
-
- /// Find the similarity candidates in \p InstrList and corresponding
- /// \p UnsignedVec
- ///
- /// \param [in,out] InstrList - The vector to append IRInstructionData to.
- /// \param [in,out] IntegerMapping - The vector to append integers to.
- /// candidates found in the program.
- void findCandidates(std::vector<IRInstructionData *> &InstrList,
- std::vector<unsigned> &IntegerMapping);
-
-public:
- // Find the IRSimilarityCandidates in the \p Modules and group by structural
- // similarity in a SimilarityGroup, each group is returned in a
- // SimilarityGroupList.
- //
- // \param [in] Modules - the modules to analyze.
- // \returns The groups of similarity ranges found in the modules.
- SimilarityGroupList &
- findSimilarity(ArrayRef<std::unique_ptr<Module>> Modules);
-
- // Find the IRSimilarityCandidates in the given Module grouped by structural
- // similarity in a SimilarityGroup, contained inside a SimilarityGroupList.
- //
- // \param [in] M - the module to analyze.
- // \returns The groups of similarity ranges found in the module.
- SimilarityGroupList &findSimilarity(Module &M);
-
- // Clears \ref SimilarityCandidates if it is already filled by a previous run.
- void resetSimilarityCandidates() {
- // If we've already analyzed a Module or set of Modules, so we must clear
- // the SimilarityCandidates to make sure we do not have only old values
- // hanging around.
- if (SimilarityCandidates.hasValue())
- SimilarityCandidates->clear();
- else
- SimilarityCandidates = SimilarityGroupList();
- }
-
- // \returns The groups of similarity ranges found in the most recently passed
- // set of modules.
- Optional<SimilarityGroupList> &getSimilarity() {
- return SimilarityCandidates;
- }
-
-private:
- /// The allocator for IRInstructionData.
- SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;
-
- /// The allocator for IRInstructionDataLists.
- SpecificBumpPtrAllocator<IRInstructionDataList> InstDataListAllocator;
-
- /// Map Instructions to unsigned integers and wraps the Instruction in an
- /// instance of IRInstructionData.
- IRInstructionMapper Mapper;
-
- /// The SimilarityGroups found with the most recent run of \ref
- /// findSimilarity. None if there is no recent run.
- Optional<SimilarityGroupList> SimilarityCandidates;
-};
-
-} // end namespace IRSimilarity
-
-/// An analysis pass based on legacy pass manager that runs and returns
-/// IRSimilarityIdentifier run on the Module.
-class IRSimilarityIdentifierWrapperPass : public ModulePass {
- std::unique_ptr<IRSimilarity::IRSimilarityIdentifier> IRSI;
-
-public:
- static char ID;
- IRSimilarityIdentifierWrapperPass();
-
- IRSimilarity::IRSimilarityIdentifier &getIRSI() { return *IRSI; }
- const IRSimilarity::IRSimilarityIdentifier &getIRSI() const { return *IRSI; }
-
- bool doInitialization(Module &M) override;
- bool doFinalization(Module &M) override;
- bool runOnModule(Module &M) override;
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.setPreservesAll();
- }
-};
-
-/// An analysis pass that runs and returns the IRSimilarityIdentifier run on the
-/// Module.
-class IRSimilarityAnalysis : public AnalysisInfoMixin<IRSimilarityAnalysis> {
-public:
- typedef IRSimilarity::IRSimilarityIdentifier Result;
-
- Result run(Module &M, ModuleAnalysisManager &);
-
-private:
- friend AnalysisInfoMixin<IRSimilarityAnalysis>;
- static AnalysisKey Key;
-};
-
-/// Printer pass that uses \c IRSimilarityAnalysis.
-class IRSimilarityAnalysisPrinterPass
- : public PassInfoMixin<IRSimilarityAnalysisPrinterPass> {
- raw_ostream &OS;
-
-public:
- explicit IRSimilarityAnalysisPrinterPass(raw_ostream &OS) : OS(OS) {}
- PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
-};
-
-} // end namespace llvm
-
-#endif // LLVM_ANALYSIS_IRSIMILARITYIDENTIFIER_H
-
-#ifdef __GNUC__
-#pragma GCC diagnostic pop
-#endif
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- IRSimilarityIdentifier.h - Find similarity in a module --------------==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// \file
+// Interface file for the IRSimilarityIdentifier for identifying similarities in
+// IR including the IRInstructionMapper, which maps an Instruction to unsigned
+// integers.
+//
+// Two sequences of instructions are called "similar" if they perform the same
+// series of operations for all inputs.
+//
+// \code
+// %1 = add i32 %a, 10
+// %2 = add i32 %a, %1
+// %3 = icmp slt icmp %1, %2
+// \endcode
+//
+// and
+//
+// \code
+// %1 = add i32 11, %a
+// %2 = sub i32 %a, %1
+// %3 = icmp sgt icmp %2, %1
+// \endcode
+//
+// ultimately have the same result, even if the inputs, and structure are
+// slightly different.
+//
+// For instructions, we do not worry about operands that do not have fixed
+// semantic meaning to the program. We consider the opcode that the instruction
+// has, the types, parameters, and extra information such as the function name,
+// or comparison predicate. These are used to create a hash to map instructions
+// to integers to be used in similarity matching in sequences of instructions
+//
+// Terminology:
+// An IRSimilarityCandidate is a region of IRInstructionData (wrapped
+// Instructions), usually used to denote a region of similarity has been found.
+//
+// A SimilarityGroup is a set of IRSimilarityCandidates that are structurally
+// similar to one another.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_IRSIMILARITYIDENTIFIER_H
+#define LLVM_ANALYSIS_IRSIMILARITYIDENTIFIER_H
+
+#include "llvm/IR/InstVisitor.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Allocator.h"
+
+namespace llvm {
+namespace IRSimilarity {
+
+struct IRInstructionDataList;
+
+/// This represents what is and is not supported when finding similarity in
+/// Instructions.
+///
+/// Legal Instructions are considered when looking at similarity between
+/// Instructions.
+///
+/// Illegal Instructions cannot be considered when looking for similarity
+/// between Instructions. They act as boundaries between similarity regions.
+///
+/// Invisible Instructions are skipped over during analysis.
+// TODO: Shared with MachineOutliner
+enum InstrType { Legal, Illegal, Invisible };
+
+/// This provides the utilities for hashing an Instruction to an unsigned
+/// integer. Two IRInstructionDatas produce the same hash value when their
+/// underlying Instructions perform the same operation (even if they don't have
+/// the same input operands.)
+/// As a more concrete example, consider the following:
+///
+/// \code
+/// %add1 = add i32 %a, %b
+/// %add2 = add i32 %c, %d
+/// %add3 = add i64 %e, %f
+/// \endcode
+///
+// Then the IRInstructionData wrappers for these Instructions may be hashed like
+/// so:
+///
+/// \code
+/// ; These two adds have the same types and operand types, so they hash to the
+/// ; same number.
+/// %add1 = add i32 %a, %b ; Hash: 1
+/// %add2 = add i32 %c, %d ; Hash: 1
+/// ; This add produces an i64. This differentiates it from %add1 and %add2. So,
+/// ; it hashes to a different number.
+/// %add3 = add i64 %e, %f; Hash: 2
+/// \endcode
+///
+///
+/// This hashing scheme will be used to represent the program as a very long
+/// string. This string can then be placed in a data structure which can be used
+/// for similarity queries.
+///
+/// TODO: Handle types of Instructions which can be equal even with different
+/// operands. (E.g. comparisons with swapped predicates.)
+/// TODO: Handle CallInsts, which are only checked for function type
+/// by \ref isSameOperationAs.
+/// TODO: Handle GetElementPtrInsts, as some of the operands have to be the
+/// exact same, and some do not.
+struct IRInstructionData : ilist_node<IRInstructionData> {
+
+ /// The source Instruction that is being wrapped.
+ Instruction *Inst = nullptr;
+ /// The values of the operands in the Instruction.
+ SmallVector<Value *, 4> OperVals;
+ /// The legality of the wrapped instruction. This is informed by InstrType,
+ /// and is used when checking when two instructions are considered similar.
+ /// If either instruction is not legal, the instructions are automatically not
+ /// considered similar.
+ bool Legal;
+
+ /// This is only relevant if we are wrapping a CmpInst where we needed to
+ /// change the predicate of a compare instruction from a greater than form
+ /// to a less than form. It is None otherwise.
+ Optional<CmpInst::Predicate> RevisedPredicate;
+
+ /// Gather the information that is difficult to gather for an Instruction, or
+ /// is changed. i.e. the operands of an Instruction and the Types of those
+ /// operands. This extra information allows for similarity matching to make
+ /// assertions that allow for more flexibility when checking for whether an
+ /// Instruction performs the same operation.
+ IRInstructionData(Instruction &I, bool Legality, IRInstructionDataList &IDL);
+
+ /// Get the predicate that the compare instruction is using for hashing the
+ /// instruction. the IRInstructionData must be wrapping a CmpInst.
+ CmpInst::Predicate getPredicate() const;
+
+ /// A function that swaps the predicates to their less than form if they are
+ /// in a greater than form. Otherwise, the predicate is unchanged.
+ ///
+ /// \param CI - The comparison operation to find a consistent preidcate for.
+ /// \return the consistent comparison predicate.
+ static CmpInst::Predicate predicateForConsistency(CmpInst *CI);
+
+ /// Hashes \p Value based on its opcode, types, and operand types.
+ /// Two IRInstructionData instances produce the same hash when they perform
+ /// the same operation.
+ ///
+ /// As a simple example, consider the following instructions.
+ ///
+ /// \code
+ /// %add1 = add i32 %x1, %y1
+ /// %add2 = add i32 %x2, %y2
+ ///
+ /// %sub = sub i32 %x1, %y1
+ ///
+ /// %add_i64 = add i64 %x2, %y2
+ /// \endcode
+ ///
+ /// Because the first two adds operate the same types, and are performing the
+ /// same action, they will be hashed to the same value.
+ ///
+ /// However, the subtraction instruction is not the same as an addition, and
+ /// will be hashed to a different value.
+ ///
+ /// Finally, the last add has a different type compared to the first two add
+ /// instructions, so it will also be hashed to a different value that any of
+ /// the previous instructions.
+ ///
+ /// \param [in] ID - The IRInstructionData instance to be hashed.
+ /// \returns A hash_value of the IRInstructionData.
+ friend hash_code hash_value(const IRInstructionData &ID) {
+ SmallVector<Type *, 4> OperTypes;
+ for (Value *V : ID.OperVals)
+ OperTypes.push_back(V->getType());
+
+ if (isa<CmpInst>(ID.Inst))
+ return llvm::hash_combine(
+ llvm::hash_value(ID.Inst->getOpcode()),
+ llvm::hash_value(ID.Inst->getType()),
+ llvm::hash_value(ID.getPredicate()),
+ llvm::hash_combine_range(OperTypes.begin(), OperTypes.end()));
+ else if (CallInst *CI = dyn_cast<CallInst>(ID.Inst))
+ return llvm::hash_combine(
+ llvm::hash_value(ID.Inst->getOpcode()),
+ llvm::hash_value(ID.Inst->getType()),
+ llvm::hash_value(CI->getCalledFunction()->getName().str()),
+ llvm::hash_combine_range(OperTypes.begin(), OperTypes.end()));
+ return llvm::hash_combine(
+ llvm::hash_value(ID.Inst->getOpcode()),
+ llvm::hash_value(ID.Inst->getType()),
+ llvm::hash_combine_range(OperTypes.begin(), OperTypes.end()));
+ }
+
+ IRInstructionDataList *IDL = nullptr;
+};
+
+struct IRInstructionDataList : simple_ilist<IRInstructionData> {};
+
+/// Compare one IRInstructionData class to another IRInstructionData class for
+/// whether they are performing a the same operation, and can mapped to the
+/// same value. For regular instructions if the hash value is the same, then
+/// they will also be close.
+///
+/// \param A - The first IRInstructionData class to compare
+/// \param B - The second IRInstructionData class to compare
+/// \returns true if \p A and \p B are similar enough to be mapped to the same
+/// value.
+bool isClose(const IRInstructionData &A, const IRInstructionData &B);
+
+struct IRInstructionDataTraits : DenseMapInfo<IRInstructionData *> {
+ static inline IRInstructionData *getEmptyKey() { return nullptr; }
+ static inline IRInstructionData *getTombstoneKey() {
+ return reinterpret_cast<IRInstructionData *>(-1);
+ }
+
+ static unsigned getHashValue(const IRInstructionData *E) {
+ using llvm::hash_value;
+ assert(E && "IRInstructionData is a nullptr?");
+ return hash_value(*E);
+ }
+
+ static bool isEqual(const IRInstructionData *LHS,
+ const IRInstructionData *RHS) {
+ if (RHS == getEmptyKey() || RHS == getTombstoneKey() ||
+ LHS == getEmptyKey() || LHS == getTombstoneKey())
+ return LHS == RHS;
+
+ assert(LHS && RHS && "nullptr should have been caught by getEmptyKey?");
+ return isClose(*LHS, *RHS);
+ }
+};
+
+/// Helper struct for converting the Instructions in a Module into a vector of
+/// unsigned integers. This vector of unsigned integers can be thought of as a
+/// "numeric string". This numeric string can then be queried by, for example,
+/// data structures that find repeated substrings.
+///
+/// This hashing is done per BasicBlock in the module. To hash Instructions
+/// based off of their operations, each Instruction is wrapped in an
+/// IRInstructionData struct. The unsigned integer for an IRInstructionData
+/// depends on:
+/// - The hash provided by the IRInstructionData.
+/// - Which member of InstrType the IRInstructionData is classified as.
+// See InstrType for more details on the possible classifications, and how they
+// manifest in the numeric string.
+///
+/// The numeric string for an individual BasicBlock is terminated by an unique
+/// unsigned integer. This prevents data structures which rely on repetition
+/// from matching across BasicBlocks. (For example, the SuffixTree.)
+/// As a concrete example, if we have the following two BasicBlocks:
+/// \code
+/// bb0:
+/// %add1 = add i32 %a, %b
+/// %add2 = add i32 %c, %d
+/// %add3 = add i64 %e, %f
+/// bb1:
+/// %sub = sub i32 %c, %d
+/// \endcode
+/// We may hash the Instructions like this (via IRInstructionData):
+/// \code
+/// bb0:
+/// %add1 = add i32 %a, %b ; Hash: 1
+/// %add2 = add i32 %c, %d; Hash: 1
+/// %add3 = add i64 %e, %f; Hash: 2
+/// bb1:
+/// %sub = sub i32 %c, %d; Hash: 3
+/// %add4 = add i32 %c, %d ; Hash: 1
+/// \endcode
+/// And produce a "numeric string representation" like so:
+/// 1, 1, 2, unique_integer_1, 3, 1, unique_integer_2
+///
+/// TODO: This is very similar to the MachineOutliner, and should be
+/// consolidated into the same interface.
+struct IRInstructionMapper {
+ /// The starting illegal instruction number to map to.
+ ///
+ /// Set to -3 for compatibility with DenseMapInfo<unsigned>.
+ unsigned IllegalInstrNumber = static_cast<unsigned>(-3);
+
+ /// The next available integer to assign to a legal Instruction to.
+ unsigned LegalInstrNumber = 0;
+
+ /// Correspondence from IRInstructionData to unsigned integers.
+ DenseMap<IRInstructionData *, unsigned, IRInstructionDataTraits>
+ InstructionIntegerMap;
+
+ /// Set if we added an illegal number in the previous step.
+ /// Since each illegal number is unique, we only need one of them between
+ /// each range of legal numbers. This lets us make sure we don't add more
+ /// than one illegal number per range.
+ bool AddedIllegalLastTime = false;
+
+ /// Marks whether we found a illegal instruction in the previous step.
+ bool CanCombineWithPrevInstr = false;
+
+ /// Marks whether we have found a set of instructions that is long enough
+ /// to be considered for similarity.
+ bool HaveLegalRange = false;
+
+ /// This allocator pointer is in charge of holding on to the IRInstructionData
+ /// so it is not deallocated until whatever external tool is using it is done
+ /// with the information.
+ SpecificBumpPtrAllocator<IRInstructionData> *InstDataAllocator = nullptr;
+
+ /// This allocator pointer is in charge of creating the IRInstructionDataList
+ /// so it is not deallocated until whatever external tool is using it is done
+ /// with the information.
+ SpecificBumpPtrAllocator<IRInstructionDataList> *IDLAllocator = nullptr;
+
+ /// Get an allocated IRInstructionData struct using the InstDataAllocator.
+ ///
+ /// \param I - The Instruction to wrap with IRInstructionData.
+ /// \param Legality - A boolean value that is true if the instruction is to
+ /// be considered for similarity, and false if not.
+ /// \param IDL - The InstructionDataList that the IRInstructionData is
+ /// inserted into.
+ /// \returns An allocated IRInstructionData struct.
+ IRInstructionData *allocateIRInstructionData(Instruction &I, bool Legality,
+ IRInstructionDataList &IDL);
+
+ /// Get an allocated IRInstructionDataList object using the IDLAllocator.
+ ///
+ /// \returns An allocated IRInstructionDataList object.
+ IRInstructionDataList *allocateIRInstructionDataList();
+
+ IRInstructionDataList *IDL = nullptr;
+
+ /// Maps the Instructions in a BasicBlock \p BB to legal or illegal integers
+ /// determined by \p InstrType. Two Instructions are mapped to the same value
+ /// if they are close as defined by the InstructionData class above.
+ ///
+ /// \param [in] BB - The BasicBlock to be mapped to integers.
+ /// \param [in,out] InstrList - Vector of IRInstructionData to append to.
+ /// \param [in,out] IntegerMapping - Vector of unsigned integers to append to.
+ void convertToUnsignedVec(BasicBlock &BB,
+ std::vector<IRInstructionData *> &InstrList,
+ std::vector<unsigned> &IntegerMapping);
+
+ /// Maps an Instruction to a legal integer.
+ ///
+ /// \param [in] It - The Instruction to be mapped to an integer.
+ /// \param [in,out] IntegerMappingForBB - Vector of unsigned integers to
+ /// append to.
+ /// \param [in,out] InstrListForBB - Vector of InstructionData to append to.
+ /// \returns The integer \p It was mapped to.
+ unsigned mapToLegalUnsigned(BasicBlock::iterator &It,
+ std::vector<unsigned> &IntegerMappingForBB,
+ std::vector<IRInstructionData *> &InstrListForBB);
+
+ /// Maps an Instruction to an illegal integer.
+ ///
+ /// \param [in] It - The \p Instruction to be mapped to an integer.
+ /// \param [in,out] IntegerMappingForBB - Vector of unsigned integers to
+ /// append to.
+ /// \param [in,out] InstrListForBB - Vector of IRInstructionData to append to.
+ /// \param End - true if creating a dummy IRInstructionData at the end of a
+ /// basic block.
+ /// \returns The integer \p It was mapped to.
+ unsigned mapToIllegalUnsigned(
+ BasicBlock::iterator &It, std::vector<unsigned> &IntegerMappingForBB,
+ std::vector<IRInstructionData *> &InstrListForBB, bool End = false);
+
+ IRInstructionMapper(SpecificBumpPtrAllocator<IRInstructionData> *IDA,
+ SpecificBumpPtrAllocator<IRInstructionDataList> *IDLA)
+ : InstDataAllocator(IDA), IDLAllocator(IDLA) {
+ // Make sure that the implementation of DenseMapInfo<unsigned> hasn't
+ // changed.
+ assert(DenseMapInfo<unsigned>::getEmptyKey() == static_cast<unsigned>(-1) &&
+ "DenseMapInfo<unsigned>'s empty key isn't -1!");
+ assert(DenseMapInfo<unsigned>::getTombstoneKey() ==
+ static_cast<unsigned>(-2) &&
+ "DenseMapInfo<unsigned>'s tombstone key isn't -2!");
+
+ IDL = new (IDLAllocator->Allocate())
+ IRInstructionDataList();
+ }
+
+ /// Custom InstVisitor to classify different instructions for whether it can
+ /// be analyzed for similarity.
+ struct InstructionClassification
+ : public InstVisitor<InstructionClassification, InstrType> {
+ InstructionClassification() {}
+
+ // TODO: Determine a scheme to resolve when the label is similar enough.
+ InstrType visitBranchInst(BranchInst &BI) { return Illegal; }
+ // TODO: Determine a scheme to resolve when the labels are similar enough.
+ InstrType visitPHINode(PHINode &PN) { return Illegal; }
+ // TODO: Handle allocas.
+ InstrType visitAllocaInst(AllocaInst &AI) { return Illegal; }
+ // We exclude variable argument instructions since variable arguments
+ // requires extra checking of the argument list.
+ InstrType visitVAArgInst(VAArgInst &VI) { return Illegal; }
+ // We exclude all exception handling cases since they are so context
+ // dependent.
+ InstrType visitLandingPadInst(LandingPadInst &LPI) { return Illegal; }
+ InstrType visitFuncletPadInst(FuncletPadInst &FPI) { return Illegal; }
+ // DebugInfo should be included in the regions, but should not be
+ // analyzed for similarity as it has no bearing on the outcome of the
+ // program.
+ InstrType visitDbgInfoIntrinsic(DbgInfoIntrinsic &DII) { return Invisible; }
+ // TODO: Handle specific intrinsics.
+ InstrType visitIntrinsicInst(IntrinsicInst &II) { return Illegal; }
+ // We only allow call instructions where the function has a name and
+ // is not an indirect call.
+ InstrType visitCallInst(CallInst &CI) {
+ Function *F = CI.getCalledFunction();
+ if (!F || CI.isIndirectCall() || !F->hasName())
+ return Illegal;
+ return Legal;
+ }
+ // TODO: We do not current handle similarity that changes the control flow.
+ InstrType visitInvokeInst(InvokeInst &II) { return Illegal; }
+ // TODO: We do not current handle similarity that changes the control flow.
+ InstrType visitCallBrInst(CallBrInst &CBI) { return Illegal; }
+ // TODO: Handle interblock similarity.
+ InstrType visitTerminator(Instruction &I) { return Illegal; }
+ InstrType visitInstruction(Instruction &I) { return Legal; }
+ };
+
+ /// Maps an Instruction to a member of InstrType.
+ InstructionClassification InstClassifier;
+};
+
+/// This is a class that wraps a range of IRInstructionData from one point to
+/// another in the vector of IRInstructionData, which is a region of the
+/// program. It is also responsible for defining the structure within this
+/// region of instructions.
+///
+/// The structure of a region is defined through a value numbering system
+/// assigned to each unique value in a region at the creation of the
+/// IRSimilarityCandidate.
+///
+/// For example, for each Instruction we add a mapping for each new
+/// value seen in that Instruction.
+/// IR: Mapping Added:
+/// %add1 = add i32 %a, c1 %add1 -> 3, %a -> 1, c1 -> 2
+/// %add2 = add i32 %a, %1 %add2 -> 4
+/// %add3 = add i32 c2, c1 %add3 -> 6, c2 -> 5
+///
+/// We can compare IRSimilarityCandidates against one another.
+/// The \ref isSimilar function compares each IRInstructionData against one
+/// another and if we have the same sequences of IRInstructionData that would
+/// create the same hash, we have similar IRSimilarityCandidates.
+///
+/// We can also compare the structure of IRSimilarityCandidates. If we can
+/// create a mapping of registers in the region contained by one
+/// IRSimilarityCandidate to the region contained by different
+/// IRSimilarityCandidate, they can be considered structurally similar.
+///
+/// IRSimilarityCandidate1: IRSimilarityCandidate2:
+/// %add1 = add i32 %a, %b %add1 = add i32 %d, %e
+/// %add2 = add i32 %a, %c %add2 = add i32 %d, %f
+/// %add3 = add i32 c1, c2 %add3 = add i32 c3, c4
+///
+/// Can have the following mapping from candidate to candidate of:
+/// %a -> %d, %b -> %e, %c -> %f, c1 -> c3, c2 -> c4
+/// and can be considered similar.
+///
+/// IRSimilarityCandidate1: IRSimilarityCandidate2:
+/// %add1 = add i32 %a, %b %add1 = add i32 %d, c4
+/// %add2 = add i32 %a, %c %add2 = add i32 %d, %f
+/// %add3 = add i32 c1, c2 %add3 = add i32 c3, c4
+///
+/// We cannot create the same mapping since the use of c4 is not used in the
+/// same way as %b or c2.
+class IRSimilarityCandidate {
+private:
+ /// The start index of this IRSimilarityCandidate in the instruction list.
+ unsigned StartIdx = 0;
+
+ /// The number of instructions in this IRSimilarityCandidate.
+ unsigned Len = 0;
+
+ /// The first instruction in this IRSimilarityCandidate.
+ IRInstructionData *FirstInst = nullptr;
+
+ /// The last instruction in this IRSimilarityCandidate.
+ IRInstructionData *LastInst = nullptr;
+
+ /// Global Value Numbering structures
+ /// @{
+ /// Stores the mapping of the value to the number assigned to it in the
+ /// IRSimilarityCandidate.
+ DenseMap<Value *, unsigned> ValueToNumber;
+ /// Stores the mapping of the number to the value assigned this number.
+ DenseMap<unsigned, Value *> NumberToValue;
+ /// @}
+
+public:
+ /// \param StartIdx - The starting location of the region.
+ /// \param Len - The length of the region.
+ /// \param FirstInstIt - The starting IRInstructionData of the region.
+ /// \param LastInstIt - The ending IRInstructionData of the region.
+ IRSimilarityCandidate(unsigned StartIdx, unsigned Len,
+ IRInstructionData *FirstInstIt,
+ IRInstructionData *LastInstIt);
+
+ /// \param A - The first IRInstructionCandidate to compare.
+ /// \param B - The second IRInstructionCandidate to compare.
+ /// \returns True when every IRInstructionData in \p A is similar to every
+ /// IRInstructionData in \p B.
+ static bool isSimilar(const IRSimilarityCandidate &A,
+ const IRSimilarityCandidate &B);
+
+ /// \param A - The first IRInstructionCandidate to compare.
+ /// \param B - The second IRInstructionCandidate to compare.
+ /// \returns True when every IRInstructionData in \p A is structurally similar
+ /// to \p B.
+ static bool compareStructure(const IRSimilarityCandidate &A,
+ const IRSimilarityCandidate &B);
+
+ struct OperandMapping {
+ /// The IRSimilarityCandidate that holds the instruction the OperVals were
+ /// pulled from.
+ const IRSimilarityCandidate &IRSC;
+
+ /// The operand values to be analyzed.
+ ArrayRef<Value *> &OperVals;
+
+ /// The current mapping of global value numbers from one IRSimilarityCandidate
+ /// to another IRSimilarityCandidate.
+ DenseMap<unsigned, DenseSet<unsigned>> &ValueNumberMapping;
+ };
+
+ /// Compare the operands in \p A and \p B and check that the current mapping
+ /// of global value numbers from \p A to \p B and \p B to \A is consistent.
+ ///
+ /// \param A - The first IRInstructionCandidate, operand values, and current
+ /// operand mappings to compare.
+ /// \param B - The second IRInstructionCandidate, operand values, and current
+ /// operand mappings to compare.
+ /// \returns true if the IRSimilarityCandidates operands are compatible.
+ static bool compareNonCommutativeOperandMapping(OperandMapping A,
+ OperandMapping B);
+
+ /// Compare the operands in \p A and \p B and check that the current mapping
+ /// of global value numbers from \p A to \p B and \p B to \A is consistent
+ /// given that the operands are commutative.
+ ///
+ /// \param A - The first IRInstructionCandidate, operand values, and current
+ /// operand mappings to compare.
+ /// \param B - The second IRInstructionCandidate, operand values, and current
+ /// operand mappings to compare.
+ /// \returns true if the IRSimilarityCandidates operands are compatible.
+ static bool compareCommutativeOperandMapping(OperandMapping A,
+ OperandMapping B);
+
+ /// Compare the start and end indices of the two IRSimilarityCandidates for
+ /// whether they overlap. If the start instruction of one
+ /// IRSimilarityCandidate is less than the end instruction of the other, and
+ /// the start instruction of one is greater than the start instruction of the
+ /// other, they overlap.
+ ///
+ /// \returns true if the IRSimilarityCandidates do not have overlapping
+ /// instructions.
+ static bool overlap(const IRSimilarityCandidate &A,
+ const IRSimilarityCandidate &B);
+
+ /// \returns the number of instructions in this Candidate.
+ unsigned getLength() const { return Len; }
+
+ /// \returns the start index of this IRSimilarityCandidate.
+ unsigned getStartIdx() const { return StartIdx; }
+
+ /// \returns the end index of this IRSimilarityCandidate.
+ unsigned getEndIdx() const { return StartIdx + Len - 1; }
+
+ /// \returns The first IRInstructionData.
+ IRInstructionData *front() const { return FirstInst; }
+ /// \returns The last IRInstructionData.
+ IRInstructionData *back() const { return LastInst; }
+
+ /// \returns The first Instruction.
+ Instruction *frontInstruction() { return FirstInst->Inst; }
+ /// \returns The last Instruction
+ Instruction *backInstruction() { return LastInst->Inst; }
+
+ /// \returns The BasicBlock the IRSimilarityCandidate starts in.
+ BasicBlock *getStartBB() { return FirstInst->Inst->getParent(); }
+ /// \returns The BasicBlock the IRSimilarityCandidate ends in.
+ BasicBlock *getEndBB() { return LastInst->Inst->getParent(); }
+
+ /// \returns The Function that the IRSimilarityCandidate is located in.
+ Function *getFunction() { return getStartBB()->getParent(); }
+
+ /// Finds the positive number associated with \p V if it has been mapped.
+ /// \param [in] V - the Value to find.
+ /// \returns The positive number corresponding to the value.
+ /// \returns None if not present.
+ Optional<unsigned> getGVN(Value *V) {
+ assert(V != nullptr && "Value is a nullptr?");
+ DenseMap<Value *, unsigned>::iterator VNIt = ValueToNumber.find(V);
+ if (VNIt == ValueToNumber.end())
+ return None;
+ return VNIt->second;
+ }
+
+ /// Finds the Value associate with \p Num if it exists.
+ /// \param [in] Num - the number to find.
+ /// \returns The Value associated with the number.
+ /// \returns None if not present.
+ Optional<Value *> fromGVN(unsigned Num) {
+ DenseMap<unsigned, Value *>::iterator VNIt = NumberToValue.find(Num);
+ if (VNIt == NumberToValue.end())
+ return None;
+ assert(VNIt->second != nullptr && "Found value is a nullptr!");
+ return VNIt->second;
+ }
+
+ /// \param RHS -The IRSimilarityCandidate to compare against
+ /// \returns true if the IRSimilarityCandidate is occurs after the
+ /// IRSimilarityCandidate in the program.
+ bool operator<(const IRSimilarityCandidate &RHS) const {
+ return getStartIdx() > RHS.getStartIdx();
+ }
+
+ using iterator = IRInstructionDataList::iterator;
+ iterator begin() const { return iterator(front()); }
+ iterator end() const { return std::next(iterator(back())); }
+};
+
+typedef std::vector<IRSimilarityCandidate> SimilarityGroup;
+typedef std::vector<SimilarityGroup> SimilarityGroupList;
+
+/// This class puts all the pieces of the IRInstructionData,
+/// IRInstructionMapper, IRSimilarityCandidate together.
+///
+/// It first feeds the Module or vector of Modules into the IRInstructionMapper,
+/// and puts all the mapped instructions into a single long list of
+/// IRInstructionData.
+///
+/// The list of unsigned integers is given to the Suffix Tree or similar data
+/// structure to find repeated subsequences. We construct an
+/// IRSimilarityCandidate for each instance of the subsequence. We compare them
+/// against one another since These repeated subsequences can have different
+/// structure. For each different kind of structure found, we create a
+/// similarity group.
+///
+/// If we had four IRSimilarityCandidates A, B, C, and D where A, B and D are
+/// structurally similar to one another, while C is different we would have two
+/// SimilarityGroups:
+///
+/// SimilarityGroup 1: SimilarityGroup 2
+/// A, B, D C
+///
+/// A list of the different similarity groups is then returned after
+/// analyzing the module.
+class IRSimilarityIdentifier {
+public:
+ IRSimilarityIdentifier()
+ : Mapper(&InstDataAllocator, &InstDataListAllocator) {}
+
+ /// \param M the module to find similarity in.
+ explicit IRSimilarityIdentifier(Module &M)
+ : Mapper(&InstDataAllocator, &InstDataListAllocator) {
+ findSimilarity(M);
+ }
+
+private:
+ /// Map the instructions in the module to unsigned integers, using mapping
+ /// already present in the Mapper if possible.
+ ///
+ /// \param [in] M Module - To map to integers.
+ /// \param [in,out] InstrList - The vector to append IRInstructionData to.
+ /// \param [in,out] IntegerMapping - The vector to append integers to.
+ void populateMapper(Module &M, std::vector<IRInstructionData *> &InstrList,
+ std::vector<unsigned> &IntegerMapping);
+
+ /// Map the instructions in the modules vector to unsigned integers, using
+ /// mapping already present in the mapper if possible.
+ ///
+ /// \param [in] Modules - The list of modules to use to populate the mapper
+ /// \param [in,out] InstrList - The vector to append IRInstructionData to.
+ /// \param [in,out] IntegerMapping - The vector to append integers to.
+ void populateMapper(ArrayRef<std::unique_ptr<Module>> &Modules,
+ std::vector<IRInstructionData *> &InstrList,
+ std::vector<unsigned> &IntegerMapping);
+
+ /// Find the similarity candidates in \p InstrList and corresponding
+ /// \p UnsignedVec
+ ///
+ /// \param [in,out] InstrList - The vector to append IRInstructionData to.
+ /// \param [in,out] IntegerMapping - The vector to append integers to.
+ /// candidates found in the program.
+ void findCandidates(std::vector<IRInstructionData *> &InstrList,
+ std::vector<unsigned> &IntegerMapping);
+
+public:
+ // Find the IRSimilarityCandidates in the \p Modules and group by structural
+ // similarity in a SimilarityGroup, each group is returned in a
+ // SimilarityGroupList.
+ //
+ // \param [in] Modules - the modules to analyze.
+ // \returns The groups of similarity ranges found in the modules.
+ SimilarityGroupList &
+ findSimilarity(ArrayRef<std::unique_ptr<Module>> Modules);
+
+ // Find the IRSimilarityCandidates in the given Module grouped by structural
+ // similarity in a SimilarityGroup, contained inside a SimilarityGroupList.
+ //
+ // \param [in] M - the module to analyze.
+ // \returns The groups of similarity ranges found in the module.
+ SimilarityGroupList &findSimilarity(Module &M);
+
+ // Clears \ref SimilarityCandidates if it is already filled by a previous run.
+ void resetSimilarityCandidates() {
+ // If we've already analyzed a Module or set of Modules, so we must clear
+ // the SimilarityCandidates to make sure we do not have only old values
+ // hanging around.
+ if (SimilarityCandidates.hasValue())
+ SimilarityCandidates->clear();
+ else
+ SimilarityCandidates = SimilarityGroupList();
+ }
+
+ // \returns The groups of similarity ranges found in the most recently passed
+ // set of modules.
+ Optional<SimilarityGroupList> &getSimilarity() {
+ return SimilarityCandidates;
+ }
+
+private:
+ /// The allocator for IRInstructionData.
+ SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;
+
+ /// The allocator for IRInstructionDataLists.
+ SpecificBumpPtrAllocator<IRInstructionDataList> InstDataListAllocator;
+
+ /// Map Instructions to unsigned integers and wraps the Instruction in an
+ /// instance of IRInstructionData.
+ IRInstructionMapper Mapper;
+
+ /// The SimilarityGroups found with the most recent run of \ref
+ /// findSimilarity. None if there is no recent run.
+ Optional<SimilarityGroupList> SimilarityCandidates;
+};
+
+} // end namespace IRSimilarity
+
+/// An analysis pass based on legacy pass manager that runs and returns
+/// IRSimilarityIdentifier run on the Module.
+class IRSimilarityIdentifierWrapperPass : public ModulePass {
+ std::unique_ptr<IRSimilarity::IRSimilarityIdentifier> IRSI;
+
+public:
+ static char ID;
+ IRSimilarityIdentifierWrapperPass();
+
+ IRSimilarity::IRSimilarityIdentifier &getIRSI() { return *IRSI; }
+ const IRSimilarity::IRSimilarityIdentifier &getIRSI() const { return *IRSI; }
+
+ bool doInitialization(Module &M) override;
+ bool doFinalization(Module &M) override;
+ bool runOnModule(Module &M) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+};
+
+/// An analysis pass that runs and returns the IRSimilarityIdentifier run on the
+/// Module.
+class IRSimilarityAnalysis : public AnalysisInfoMixin<IRSimilarityAnalysis> {
+public:
+ typedef IRSimilarity::IRSimilarityIdentifier Result;
+
+ Result run(Module &M, ModuleAnalysisManager &);
+
+private:
+ friend AnalysisInfoMixin<IRSimilarityAnalysis>;
+ static AnalysisKey Key;
+};
+
+/// Printer pass that uses \c IRSimilarityAnalysis.
+class IRSimilarityAnalysisPrinterPass
+ : public PassInfoMixin<IRSimilarityAnalysisPrinterPass> {
+ raw_ostream &OS;
+
+public:
+ explicit IRSimilarityAnalysisPrinterPass(raw_ostream &OS) : OS(OS) {}
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_IRSIMILARITYIDENTIFIER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/IVDescriptors.h b/contrib/libs/llvm12/include/llvm/Analysis/IVDescriptors.h
index 524b215744..3077c10848 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/IVDescriptors.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/IVDescriptors.h
@@ -40,24 +40,24 @@ class ScalarEvolution;
class SCEV;
class DominatorTree;
-/// These are the kinds of recurrences that we support.
-enum class RecurKind {
- None, ///< Not a recurrence.
- Add, ///< Sum of integers.
- Mul, ///< Product of integers.
- Or, ///< Bitwise or logical OR of integers.
- And, ///< Bitwise or logical AND of integers.
- Xor, ///< Bitwise or logical XOR of integers.
- SMin, ///< Signed integer min implemented in terms of select(cmp()).
- SMax, ///< Signed integer max implemented in terms of select(cmp()).
- UMin, ///< Unisgned integer min implemented in terms of select(cmp()).
- UMax, ///< Unsigned integer max implemented in terms of select(cmp()).
- FAdd, ///< Sum of floats.
- FMul, ///< Product of floats.
- FMin, ///< FP min implemented in terms of select(cmp()).
- FMax ///< FP max implemented in terms of select(cmp()).
-};
-
+/// These are the kinds of recurrences that we support.
+enum class RecurKind {
+ None, ///< Not a recurrence.
+ Add, ///< Sum of integers.
+ Mul, ///< Product of integers.
+ Or, ///< Bitwise or logical OR of integers.
+ And, ///< Bitwise or logical AND of integers.
+ Xor, ///< Bitwise or logical XOR of integers.
+ SMin, ///< Signed integer min implemented in terms of select(cmp()).
+ SMax, ///< Signed integer max implemented in terms of select(cmp()).
+ UMin, ///< Unisgned integer min implemented in terms of select(cmp()).
+ UMax, ///< Unsigned integer max implemented in terms of select(cmp()).
+ FAdd, ///< Sum of floats.
+ FMul, ///< Product of floats.
+ FMin, ///< FP min implemented in terms of select(cmp()).
+ FMax ///< FP max implemented in terms of select(cmp()).
+};
+
/// The RecurrenceDescriptor is used to identify recurrences variables in a
/// loop. Reduction is a special case of recurrence that has uses of the
/// recurrence variable outside the loop. The method isReductionPHI identifies
@@ -74,11 +74,11 @@ class RecurrenceDescriptor {
public:
RecurrenceDescriptor() = default;
- RecurrenceDescriptor(Value *Start, Instruction *Exit, RecurKind K,
- FastMathFlags FMF, Instruction *UAI, Type *RT,
- bool Signed, SmallPtrSetImpl<Instruction *> &CI)
+ RecurrenceDescriptor(Value *Start, Instruction *Exit, RecurKind K,
+ FastMathFlags FMF, Instruction *UAI, Type *RT,
+ bool Signed, SmallPtrSetImpl<Instruction *> &CI)
: StartValue(Start), LoopExitInstr(Exit), Kind(K), FMF(FMF),
- UnsafeAlgebraInst(UAI), RecurrenceType(RT), IsSigned(Signed) {
+ UnsafeAlgebraInst(UAI), RecurrenceType(RT), IsSigned(Signed) {
CastInsts.insert(CI.begin(), CI.end());
}
@@ -86,22 +86,22 @@ public:
class InstDesc {
public:
InstDesc(bool IsRecur, Instruction *I, Instruction *UAI = nullptr)
- : IsRecurrence(IsRecur), PatternLastInst(I),
- RecKind(RecurKind::None), UnsafeAlgebraInst(UAI) {}
+ : IsRecurrence(IsRecur), PatternLastInst(I),
+ RecKind(RecurKind::None), UnsafeAlgebraInst(UAI) {}
- InstDesc(Instruction *I, RecurKind K, Instruction *UAI = nullptr)
- : IsRecurrence(true), PatternLastInst(I), RecKind(K),
+ InstDesc(Instruction *I, RecurKind K, Instruction *UAI = nullptr)
+ : IsRecurrence(true), PatternLastInst(I), RecKind(K),
UnsafeAlgebraInst(UAI) {}
- bool isRecurrence() const { return IsRecurrence; }
+ bool isRecurrence() const { return IsRecurrence; }
- bool hasUnsafeAlgebra() const { return UnsafeAlgebraInst != nullptr; }
+ bool hasUnsafeAlgebra() const { return UnsafeAlgebraInst != nullptr; }
- Instruction *getUnsafeAlgebraInst() const { return UnsafeAlgebraInst; }
+ Instruction *getUnsafeAlgebraInst() const { return UnsafeAlgebraInst; }
- RecurKind getRecKind() const { return RecKind; }
+ RecurKind getRecKind() const { return RecKind; }
- Instruction *getPatternInst() const { return PatternLastInst; }
+ Instruction *getPatternInst() const { return PatternLastInst; }
private:
// Is this instruction a recurrence candidate.
@@ -109,8 +109,8 @@ public:
// The last instruction in a min/max pattern (select of the select(icmp())
// pattern), or the current recurrence instruction otherwise.
Instruction *PatternLastInst;
- // If this is a min/max pattern.
- RecurKind RecKind;
+ // If this is a min/max pattern.
+ RecurKind RecKind;
// Recurrence has unsafe algebra.
Instruction *UnsafeAlgebraInst;
};
@@ -120,7 +120,7 @@ public:
/// select(icmp()) this function advances the instruction pointer 'I' from the
/// compare instruction to the select instruction and stores this pointer in
/// 'PatternLastInst' member of the returned struct.
- static InstDesc isRecurrenceInstr(Instruction *I, RecurKind Kind,
+ static InstDesc isRecurrenceInstr(Instruction *I, RecurKind Kind,
InstDesc &Prev, bool HasFunNoNaNAttr);
/// Returns true if instruction I has multiple uses in Insts
@@ -131,28 +131,28 @@ public:
/// Returns true if all uses of the instruction I is within the Set.
static bool areAllUsesIn(Instruction *I, SmallPtrSetImpl<Instruction *> &Set);
- /// Returns a struct describing if the instruction is a
+ /// Returns a struct describing if the instruction is a
/// Select(ICmp(X, Y), X, Y) instruction pattern corresponding to a min(X, Y)
- /// or max(X, Y). \p Prev specifies the description of an already processed
- /// select instruction, so its corresponding cmp can be matched to it.
- static InstDesc isMinMaxSelectCmpPattern(Instruction *I,
- const InstDesc &Prev);
+ /// or max(X, Y). \p Prev specifies the description of an already processed
+ /// select instruction, so its corresponding cmp can be matched to it.
+ static InstDesc isMinMaxSelectCmpPattern(Instruction *I,
+ const InstDesc &Prev);
/// Returns a struct describing if the instruction is a
/// Select(FCmp(X, Y), (Z = X op PHINode), PHINode) instruction pattern.
- static InstDesc isConditionalRdxPattern(RecurKind Kind, Instruction *I);
+ static InstDesc isConditionalRdxPattern(RecurKind Kind, Instruction *I);
/// Returns identity corresponding to the RecurrenceKind.
- static Constant *getRecurrenceIdentity(RecurKind K, Type *Tp);
+ static Constant *getRecurrenceIdentity(RecurKind K, Type *Tp);
- /// Returns the opcode corresponding to the RecurrenceKind.
- static unsigned getOpcode(RecurKind Kind);
+ /// Returns the opcode corresponding to the RecurrenceKind.
+ static unsigned getOpcode(RecurKind Kind);
/// Returns true if Phi is a reduction of type Kind and adds it to the
/// RecurrenceDescriptor. If either \p DB is non-null or \p AC and \p DT are
/// non-null, the minimal bit width needed to compute the reduction will be
/// computed.
- static bool AddReductionVar(PHINode *Phi, RecurKind Kind, Loop *TheLoop,
+ static bool AddReductionVar(PHINode *Phi, RecurKind Kind, Loop *TheLoop,
bool HasFunNoNaNAttr,
RecurrenceDescriptor &RedDes,
DemandedBits *DB = nullptr,
@@ -181,64 +181,64 @@ public:
DenseMap<Instruction *, Instruction *> &SinkAfter,
DominatorTree *DT);
- RecurKind getRecurrenceKind() const { return Kind; }
+ RecurKind getRecurrenceKind() const { return Kind; }
- unsigned getOpcode() const { return getOpcode(getRecurrenceKind()); }
+ unsigned getOpcode() const { return getOpcode(getRecurrenceKind()); }
- FastMathFlags getFastMathFlags() const { return FMF; }
+ FastMathFlags getFastMathFlags() const { return FMF; }
- TrackingVH<Value> getRecurrenceStartValue() const { return StartValue; }
+ TrackingVH<Value> getRecurrenceStartValue() const { return StartValue; }
- Instruction *getLoopExitInstr() const { return LoopExitInstr; }
+ Instruction *getLoopExitInstr() const { return LoopExitInstr; }
/// Returns true if the recurrence has unsafe algebra which requires a relaxed
/// floating-point model.
- bool hasUnsafeAlgebra() const { return UnsafeAlgebraInst != nullptr; }
+ bool hasUnsafeAlgebra() const { return UnsafeAlgebraInst != nullptr; }
/// Returns first unsafe algebra instruction in the PHI node's use-chain.
- Instruction *getUnsafeAlgebraInst() const { return UnsafeAlgebraInst; }
+ Instruction *getUnsafeAlgebraInst() const { return UnsafeAlgebraInst; }
/// Returns true if the recurrence kind is an integer kind.
- static bool isIntegerRecurrenceKind(RecurKind Kind);
+ static bool isIntegerRecurrenceKind(RecurKind Kind);
/// Returns true if the recurrence kind is a floating point kind.
- static bool isFloatingPointRecurrenceKind(RecurKind Kind);
+ static bool isFloatingPointRecurrenceKind(RecurKind Kind);
/// Returns true if the recurrence kind is an arithmetic kind.
- static bool isArithmeticRecurrenceKind(RecurKind Kind);
-
- /// Returns true if the recurrence kind is an integer min/max kind.
- static bool isIntMinMaxRecurrenceKind(RecurKind Kind) {
- return Kind == RecurKind::UMin || Kind == RecurKind::UMax ||
- Kind == RecurKind::SMin || Kind == RecurKind::SMax;
- }
-
- /// Returns true if the recurrence kind is a floating-point min/max kind.
- static bool isFPMinMaxRecurrenceKind(RecurKind Kind) {
- return Kind == RecurKind::FMin || Kind == RecurKind::FMax;
- }
-
- /// Returns true if the recurrence kind is any min/max kind.
- static bool isMinMaxRecurrenceKind(RecurKind Kind) {
- return isIntMinMaxRecurrenceKind(Kind) || isFPMinMaxRecurrenceKind(Kind);
- }
-
+ static bool isArithmeticRecurrenceKind(RecurKind Kind);
+
+ /// Returns true if the recurrence kind is an integer min/max kind.
+ static bool isIntMinMaxRecurrenceKind(RecurKind Kind) {
+ return Kind == RecurKind::UMin || Kind == RecurKind::UMax ||
+ Kind == RecurKind::SMin || Kind == RecurKind::SMax;
+ }
+
+ /// Returns true if the recurrence kind is a floating-point min/max kind.
+ static bool isFPMinMaxRecurrenceKind(RecurKind Kind) {
+ return Kind == RecurKind::FMin || Kind == RecurKind::FMax;
+ }
+
+ /// Returns true if the recurrence kind is any min/max kind.
+ static bool isMinMaxRecurrenceKind(RecurKind Kind) {
+ return isIntMinMaxRecurrenceKind(Kind) || isFPMinMaxRecurrenceKind(Kind);
+ }
+
/// Returns the type of the recurrence. This type can be narrower than the
/// actual type of the Phi if the recurrence has been type-promoted.
- Type *getRecurrenceType() const { return RecurrenceType; }
+ Type *getRecurrenceType() const { return RecurrenceType; }
/// Returns a reference to the instructions used for type-promoting the
/// recurrence.
- const SmallPtrSet<Instruction *, 8> &getCastInsts() const { return CastInsts; }
+ const SmallPtrSet<Instruction *, 8> &getCastInsts() const { return CastInsts; }
/// Returns true if all source operands of the recurrence are SExtInsts.
- bool isSigned() const { return IsSigned; }
-
- /// Attempts to find a chain of operations from Phi to LoopExitInst that can
- /// be treated as a set of reductions instructions for in-loop reductions.
- SmallVector<Instruction *, 4> getReductionOpChain(PHINode *Phi,
- Loop *L) const;
+ bool isSigned() const { return IsSigned; }
+ /// Attempts to find a chain of operations from Phi to LoopExitInst that can
+ /// be treated as a set of reductions instructions for in-loop reductions.
+ SmallVector<Instruction *, 4> getReductionOpChain(PHINode *Phi,
+ Loop *L) const;
+
private:
// The starting value of the recurrence.
// It does not have to be zero!
@@ -246,7 +246,7 @@ private:
// The instruction who's value is used outside the loop.
Instruction *LoopExitInstr = nullptr;
// The kind of the recurrence.
- RecurKind Kind = RecurKind::None;
+ RecurKind Kind = RecurKind::None;
// The fast-math flags on the recurrent instructions. We propagate these
// fast-math flags into the vectorized FP instructions we generate.
FastMathFlags FMF;
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/InlineAdvisor.h b/contrib/libs/llvm12/include/llvm/Analysis/InlineAdvisor.h
index e4ed1f1a81..37f8cc3442 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/InlineAdvisor.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/InlineAdvisor.h
@@ -16,10 +16,10 @@
#ifndef LLVM_INLINEADVISOR_H_
#define LLVM_INLINEADVISOR_H_
-#include "llvm/Analysis/InlineCost.h"
-#include "llvm/Config/llvm-config.h"
-#include "llvm/IR/PassManager.h"
-#include "llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h"
+#include "llvm/Analysis/InlineCost.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h"
#include <memory>
#include <unordered_set>
@@ -43,11 +43,11 @@ class OptimizationRemarkEmitter;
/// requires the full C Tensorflow API library, and evaluates models
/// dynamically. This mode also permits generating training logs, for offline
/// training.
-enum class InliningAdvisorMode : int {
- Default,
- Release,
- Development
-};
+enum class InliningAdvisorMode : int {
+ Default,
+ Release,
+ Development
+};
class InlineAdvisor;
/// Capture state between an inlining decision having had been made, and
@@ -73,7 +73,7 @@ public:
/// behavior by implementing the corresponding record*Impl.
///
/// Call after inlining succeeded, and did not result in deleting the callee.
- void recordInlining();
+ void recordInlining();
/// Call after inlining succeeded, and resulted in deleting the callee.
void recordInliningWithCalleeDeleted();
@@ -119,44 +119,44 @@ private:
assert(!Recorded && "Recording should happen exactly once");
Recorded = true;
}
- void recordInlineStatsIfNeeded();
+ void recordInlineStatsIfNeeded();
bool Recorded = false;
};
-class DefaultInlineAdvice : public InlineAdvice {
-public:
- DefaultInlineAdvice(InlineAdvisor *Advisor, CallBase &CB,
- Optional<InlineCost> OIC, OptimizationRemarkEmitter &ORE,
- bool EmitRemarks = true)
- : InlineAdvice(Advisor, CB, ORE, OIC.hasValue()), OriginalCB(&CB),
- OIC(OIC), EmitRemarks(EmitRemarks) {}
-
-private:
- void recordUnsuccessfulInliningImpl(const InlineResult &Result) override;
- void recordInliningWithCalleeDeletedImpl() override;
- void recordInliningImpl() override;
-
-private:
- CallBase *const OriginalCB;
- Optional<InlineCost> OIC;
- bool EmitRemarks;
-};
-
+class DefaultInlineAdvice : public InlineAdvice {
+public:
+ DefaultInlineAdvice(InlineAdvisor *Advisor, CallBase &CB,
+ Optional<InlineCost> OIC, OptimizationRemarkEmitter &ORE,
+ bool EmitRemarks = true)
+ : InlineAdvice(Advisor, CB, ORE, OIC.hasValue()), OriginalCB(&CB),
+ OIC(OIC), EmitRemarks(EmitRemarks) {}
+
+private:
+ void recordUnsuccessfulInliningImpl(const InlineResult &Result) override;
+ void recordInliningWithCalleeDeletedImpl() override;
+ void recordInliningImpl() override;
+
+private:
+ CallBase *const OriginalCB;
+ Optional<InlineCost> OIC;
+ bool EmitRemarks;
+};
+
/// Interface for deciding whether to inline a call site or not.
class InlineAdvisor {
public:
InlineAdvisor(InlineAdvisor &&) = delete;
- virtual ~InlineAdvisor();
+ virtual ~InlineAdvisor();
/// Get an InlineAdvice containing a recommendation on whether to
/// inline or not. \p CB is assumed to be a direct call. \p FAM is assumed to
- /// be up-to-date wrt previous inlining decisions. \p MandatoryOnly indicates
- /// only mandatory (always-inline) call sites should be recommended - this
- /// allows the InlineAdvisor track such inlininings.
+ /// be up-to-date wrt previous inlining decisions. \p MandatoryOnly indicates
+ /// only mandatory (always-inline) call sites should be recommended - this
+ /// allows the InlineAdvisor track such inlininings.
/// Returns an InlineAdvice with the inlining recommendation.
- std::unique_ptr<InlineAdvice> getAdvice(CallBase &CB,
- bool MandatoryOnly = false);
+ std::unique_ptr<InlineAdvice> getAdvice(CallBase &CB,
+ bool MandatoryOnly = false);
/// This must be called when the Inliner pass is entered, to allow the
/// InlineAdvisor update internal state, as result of function passes run
@@ -169,14 +169,14 @@ public:
virtual void onPassExit() {}
protected:
- InlineAdvisor(Module &M, FunctionAnalysisManager &FAM);
- virtual std::unique_ptr<InlineAdvice> getAdviceImpl(CallBase &CB) = 0;
- virtual std::unique_ptr<InlineAdvice> getMandatoryAdvice(CallBase &CB,
- bool Advice);
+ InlineAdvisor(Module &M, FunctionAnalysisManager &FAM);
+ virtual std::unique_ptr<InlineAdvice> getAdviceImpl(CallBase &CB) = 0;
+ virtual std::unique_ptr<InlineAdvice> getMandatoryAdvice(CallBase &CB,
+ bool Advice);
- Module &M;
+ Module &M;
FunctionAnalysisManager &FAM;
- std::unique_ptr<ImportedFunctionsInliningStatistics> ImportedFunctionsStats;
+ std::unique_ptr<ImportedFunctionsInliningStatistics> ImportedFunctionsStats;
/// We may want to defer deleting functions to after the inlining for a whole
/// module has finished. This allows us to reliably use function pointers as
@@ -191,14 +191,14 @@ protected:
return DeletedFunctions.count(F);
}
- enum class MandatoryInliningKind { NotMandatory, Always, Never };
-
- static MandatoryInliningKind getMandatoryKind(CallBase &CB,
- FunctionAnalysisManager &FAM,
- OptimizationRemarkEmitter &ORE);
-
- OptimizationRemarkEmitter &getCallerORE(CallBase &CB);
-
+ enum class MandatoryInliningKind { NotMandatory, Always, Never };
+
+ static MandatoryInliningKind getMandatoryKind(CallBase &CB,
+ FunctionAnalysisManager &FAM,
+ OptimizationRemarkEmitter &ORE);
+
+ OptimizationRemarkEmitter &getCallerORE(CallBase &CB);
+
private:
friend class InlineAdvice;
void markFunctionAsDeleted(Function *F);
@@ -210,12 +210,12 @@ private:
/// reusable as-is for inliner pass test scenarios, as well as for regular use.
class DefaultInlineAdvisor : public InlineAdvisor {
public:
- DefaultInlineAdvisor(Module &M, FunctionAnalysisManager &FAM,
- InlineParams Params)
- : InlineAdvisor(M, FAM), Params(Params) {}
+ DefaultInlineAdvisor(Module &M, FunctionAnalysisManager &FAM,
+ InlineParams Params)
+ : InlineAdvisor(M, FAM), Params(Params) {}
private:
- std::unique_ptr<InlineAdvice> getAdviceImpl(CallBase &CB) override;
+ std::unique_ptr<InlineAdvice> getAdviceImpl(CallBase &CB) override;
void onPassExit() override { freeDeletedFunctions(); }
@@ -235,8 +235,8 @@ public:
// InlineAdvisor must be preserved across analysis invalidations.
return false;
}
- bool tryCreate(InlineParams Params, InliningAdvisorMode Mode,
- StringRef ReplayFile);
+ bool tryCreate(InlineParams Params, InliningAdvisorMode Mode,
+ StringRef ReplayFile);
InlineAdvisor *getAdvisor() const { return Advisor.get(); }
void clear() { Advisor.reset(); }
@@ -254,12 +254,12 @@ std::unique_ptr<InlineAdvisor>
getReleaseModeAdvisor(Module &M, ModuleAnalysisManager &MAM);
#endif
-#ifdef LLVM_HAVE_TF_API
-std::unique_ptr<InlineAdvisor>
-getDevelopmentModeAdvisor(Module &M, ModuleAnalysisManager &MAM,
- std::function<bool(CallBase &)> GetDefaultAdvice);
-#endif
-
+#ifdef LLVM_HAVE_TF_API
+std::unique_ptr<InlineAdvisor>
+getDevelopmentModeAdvisor(Module &M, ModuleAnalysisManager &MAM,
+ std::function<bool(CallBase &)> GetDefaultAdvice);
+#endif
+
// Default (manual policy) decision making helper APIs. Shared with the legacy
// pass manager inliner.
@@ -278,9 +278,9 @@ void emitInlinedInto(OptimizationRemarkEmitter &ORE, DebugLoc DLoc,
bool ForProfileContext = false,
const char *PassName = nullptr);
-/// get call site location as string
-std::string getCallSiteLocation(DebugLoc DLoc);
-
+/// get call site location as string
+std::string getCallSiteLocation(DebugLoc DLoc);
+
/// Add location info to ORE message.
void addLocationToRemarks(OptimizationRemark &Remark, DebugLoc DLoc);
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/InlineSizeEstimatorAnalysis.h b/contrib/libs/llvm12/include/llvm/Analysis/InlineSizeEstimatorAnalysis.h
index f1536aa2c4..8537820b23 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/InlineSizeEstimatorAnalysis.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/InlineSizeEstimatorAnalysis.h
@@ -38,19 +38,19 @@ public:
private:
std::unique_ptr<TFModelEvaluator> Evaluator;
};
-
-class InlineSizeEstimatorAnalysisPrinterPass
- : public PassInfoMixin<InlineSizeEstimatorAnalysisPrinterPass> {
- raw_ostream &OS;
-
-public:
- explicit InlineSizeEstimatorAnalysisPrinterPass(raw_ostream &OS) : OS(OS) {}
-
- PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
-};
+
+class InlineSizeEstimatorAnalysisPrinterPass
+ : public PassInfoMixin<InlineSizeEstimatorAnalysisPrinterPass> {
+ raw_ostream &OS;
+
+public:
+ explicit InlineSizeEstimatorAnalysisPrinterPass(raw_ostream &OS) : OS(OS) {}
+
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
} // namespace llvm
#endif // LLVM_ANALYSIS_INLINESIZEESTIMATORANALYSIS_H
-
+
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/InstCount.h b/contrib/libs/llvm12/include/llvm/Analysis/InstCount.h
index ef18c1e5a1..ad65952c6f 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/InstCount.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/InstCount.h
@@ -1,39 +1,39 @@
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-parameter"
-#endif
-
-//===- InstCount.h - Collects the count of all instructions -----*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This pass collects the count of all instructions and reports them
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_ANALYSIS_INSTCOUNT_H
-#define LLVM_ANALYSIS_INSTCOUNT_H
-
-#include "llvm/IR/PassManager.h"
-
-namespace llvm {
-
-class Function;
-
-struct InstCountPass : PassInfoMixin<InstCountPass> {
- PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
-};
-
-} // end namespace llvm
-
-#endif // LLVM_ANALYSIS_INSTCOUNT_H
-
-#ifdef __GNUC__
-#pragma GCC diagnostic pop
-#endif
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- InstCount.h - Collects the count of all instructions -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass collects the count of all instructions and reports them
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_INSTCOUNT_H
+#define LLVM_ANALYSIS_INSTCOUNT_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+struct InstCountPass : PassInfoMixin<InstCountPass> {
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_INSTCOUNT_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/InstructionSimplify.h b/contrib/libs/llvm12/include/llvm/Analysis/InstructionSimplify.h
index 4add1631ba..cbff6d3736 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/InstructionSimplify.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/InstructionSimplify.h
@@ -33,10 +33,10 @@
// same call context of that function (and not split between caller and callee
// contexts of a directly recursive call, for example).
//
-// Additionally, these routines can't simplify to the instructions that are not
-// def-reachable, meaning we can't just scan the basic block for instructions
-// to simplify to.
-//
+// Additionally, these routines can't simplify to the instructions that are not
+// def-reachable, meaning we can't just scan the basic block for instructions
+// to simplify to.
+//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H
@@ -109,39 +109,39 @@ struct SimplifyQuery {
// be safely used.
const InstrInfoQuery IIQ;
- /// Controls whether simplifications are allowed to constrain the range of
- /// possible values for uses of undef. If it is false, simplifications are not
- /// allowed to assume a particular value for a use of undef for example.
- bool CanUseUndef = true;
-
+ /// Controls whether simplifications are allowed to constrain the range of
+ /// possible values for uses of undef. If it is false, simplifications are not
+ /// allowed to assume a particular value for a use of undef for example.
+ bool CanUseUndef = true;
+
SimplifyQuery(const DataLayout &DL, const Instruction *CXTI = nullptr)
: DL(DL), CxtI(CXTI) {}
SimplifyQuery(const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
- const Instruction *CXTI = nullptr, bool UseInstrInfo = true,
- bool CanUseUndef = true)
- : DL(DL), TLI(TLI), DT(DT), AC(AC), CxtI(CXTI), IIQ(UseInstrInfo),
- CanUseUndef(CanUseUndef) {}
+ const Instruction *CXTI = nullptr, bool UseInstrInfo = true,
+ bool CanUseUndef = true)
+ : DL(DL), TLI(TLI), DT(DT), AC(AC), CxtI(CXTI), IIQ(UseInstrInfo),
+ CanUseUndef(CanUseUndef) {}
SimplifyQuery getWithInstruction(Instruction *I) const {
SimplifyQuery Copy(*this);
Copy.CxtI = I;
return Copy;
}
- SimplifyQuery getWithoutUndef() const {
- SimplifyQuery Copy(*this);
- Copy.CanUseUndef = false;
- return Copy;
- }
-
- /// If CanUseUndef is true, returns whether \p V is undef.
- /// Otherwise always return false.
- bool isUndefValue(Value *V) const {
- if (!CanUseUndef)
- return false;
- return isa<UndefValue>(V);
- }
+ SimplifyQuery getWithoutUndef() const {
+ SimplifyQuery Copy(*this);
+ Copy.CanUseUndef = false;
+ return Copy;
+ }
+
+ /// If CanUseUndef is true, returns whether \p V is undef.
+ /// Otherwise always return false.
+ bool isUndefValue(Value *V) const {
+ if (!CanUseUndef)
+ return false;
+ return isa<UndefValue>(V);
+ }
};
// NOTE: the explicit multiple argument versions of these functions are
@@ -299,8 +299,8 @@ Value *SimplifyFreezeInst(Value *Op, const SimplifyQuery &Q);
Value *SimplifyInstruction(Instruction *I, const SimplifyQuery &Q,
OptimizationRemarkEmitter *ORE = nullptr);
-/// See if V simplifies when its operand Op is replaced with RepOp. If not,
-/// return null.
+/// See if V simplifies when its operand Op is replaced with RepOp. If not,
+/// return null.
/// AllowRefinement specifies whether the simplification can be a refinement,
/// or whether it needs to be strictly identical.
Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/IntervalIterator.h b/contrib/libs/llvm12/include/llvm/Analysis/IntervalIterator.h
index 105fcf3430..9476a63df6 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/IntervalIterator.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/IntervalIterator.h
@@ -88,7 +88,7 @@ inline void addNodeToInterval(Interval *Int, BasicBlock *BB) {
// BasicBlocks are added to the interval.
inline void addNodeToInterval(Interval *Int, Interval *I) {
// Add all of the nodes in I as new nodes in Int.
- llvm::append_range(Int->Nodes, I->Nodes);
+ llvm::append_range(Int->Nodes, I->Nodes);
}
template<class NodeTy, class OrigContainer_t, class GT = GraphTraits<NodeTy *>,
@@ -234,7 +234,7 @@ private:
if (Int->isSuccessor(NodeHeader)) {
// If we were in the successor list from before... remove from succ list
- llvm::erase_value(Int->Successors, NodeHeader);
+ llvm::erase_value(Int->Successors, NodeHeader);
}
// Now that we have discovered that Node is in the interval, perhaps some
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/IteratedDominanceFrontier.h b/contrib/libs/llvm12/include/llvm/Analysis/IteratedDominanceFrontier.h
index d3d6488d74..6036777c70 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/IteratedDominanceFrontier.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/IteratedDominanceFrontier.h
@@ -80,7 +80,7 @@ ChildrenGetterTy<BasicBlock, IsPostDom>::get(const NodeRef &N) {
return {Children.begin(), Children.end()};
}
- return GD->template getChildren<IsPostDom>(N);
+ return GD->template getChildren<IsPostDom>(N);
}
} // end of namespace IDFCalculatorDetail
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/LazyBranchProbabilityInfo.h b/contrib/libs/llvm12/include/llvm/Analysis/LazyBranchProbabilityInfo.h
index fd7252c103..f0dfb97b52 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/LazyBranchProbabilityInfo.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/LazyBranchProbabilityInfo.h
@@ -70,7 +70,7 @@ class LazyBranchProbabilityInfoPass : public FunctionPass {
BranchProbabilityInfo &getCalculated() {
if (!Calculated) {
assert(F && LI && "call setAnalysis");
- BPI.calculate(*F, *LI, TLI, nullptr, nullptr);
+ BPI.calculate(*F, *LI, TLI, nullptr, nullptr);
Calculated = true;
}
return BPI;
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/LazyCallGraph.h b/contrib/libs/llvm12/include/llvm/Analysis/LazyCallGraph.h
index 2b0fa56f9d..a67d8079cd 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/LazyCallGraph.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/LazyCallGraph.h
@@ -1053,29 +1053,29 @@ public:
/// fully visited by the DFS prior to calling this routine.
void removeDeadFunction(Function &F);
- /// Add a new function split/outlined from an existing function.
- ///
- /// The new function may only reference other functions that the original
- /// function did.
- ///
- /// The original function must reference (either directly or indirectly) the
- /// new function.
- ///
- /// The new function may also reference the original function.
- /// It may end up in a parent SCC in the case that the original function's
- /// edge to the new function is a ref edge, and the edge back is a call edge.
- void addSplitFunction(Function &OriginalFunction, Function &NewFunction);
-
- /// Add new ref-recursive functions split/outlined from an existing function.
- ///
- /// The new functions may only reference other functions that the original
- /// function did. The new functions may reference (not call) the original
- /// function.
- ///
- /// The original function must reference (not call) all new functions.
- /// All new functions must reference (not call) each other.
- void addSplitRefRecursiveFunctions(Function &OriginalFunction,
- ArrayRef<Function *> NewFunctions);
+ /// Add a new function split/outlined from an existing function.
+ ///
+ /// The new function may only reference other functions that the original
+ /// function did.
+ ///
+ /// The original function must reference (either directly or indirectly) the
+ /// new function.
+ ///
+ /// The new function may also reference the original function.
+ /// It may end up in a parent SCC in the case that the original function's
+ /// edge to the new function is a ref edge, and the edge back is a call edge.
+ void addSplitFunction(Function &OriginalFunction, Function &NewFunction);
+
+ /// Add new ref-recursive functions split/outlined from an existing function.
+ ///
+ /// The new functions may only reference other functions that the original
+ /// function did. The new functions may reference (not call) the original
+ /// function.
+ ///
+ /// The original function must reference (not call) all new functions.
+ /// All new functions must reference (not call) each other.
+ void addSplitRefRecursiveFunctions(Function &OriginalFunction,
+ ArrayRef<Function *> NewFunctions);
///@}
@@ -1180,11 +1180,11 @@ private:
/// the NodeMap.
Node &insertInto(Function &F, Node *&MappedN);
- /// Helper to initialize a new node created outside of creating SCCs and add
- /// it to the NodeMap if necessary. For example, useful when a function is
- /// split.
- Node &initNode(Function &F);
-
+ /// Helper to initialize a new node created outside of creating SCCs and add
+ /// it to the NodeMap if necessary. For example, useful when a function is
+ /// split.
+ Node &initNode(Function &F);
+
/// Helper to update pointers back to the graph object during moves.
void updateGraphPtrs();
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/LazyValueInfo.h b/contrib/libs/llvm12/include/llvm/Analysis/LazyValueInfo.h
index e729bfb6ae..ade4e43345 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/LazyValueInfo.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/LazyValueInfo.h
@@ -78,20 +78,20 @@ public:
Instruction *CxtI = nullptr);
/// Determine whether the specified value comparison with a constant is known
- /// to be true or false at the specified instruction.
- /// \p Pred is a CmpInst predicate. If \p UseBlockValue is true, the block
- /// value is also taken into account.
+ /// to be true or false at the specified instruction.
+ /// \p Pred is a CmpInst predicate. If \p UseBlockValue is true, the block
+ /// value is also taken into account.
Tristate getPredicateAt(unsigned Pred, Value *V, Constant *C,
- Instruction *CxtI, bool UseBlockValue = false);
+ Instruction *CxtI, bool UseBlockValue = false);
- /// Determine whether the specified value is known to be a constant at the
- /// specified instruction. Return null if not.
- Constant *getConstant(Value *V, Instruction *CxtI);
+ /// Determine whether the specified value is known to be a constant at the
+ /// specified instruction. Return null if not.
+ Constant *getConstant(Value *V, Instruction *CxtI);
/// Return the ConstantRange constraint that is known to hold for the
- /// specified value at the specified instruction. This may only be called
+ /// specified value at the specified instruction. This may only be called
/// on integer-typed Values.
- ConstantRange getConstantRange(Value *V, Instruction *CxtI,
+ ConstantRange getConstantRange(Value *V, Instruction *CxtI,
bool UndefAllowed = true);
/// Determine whether the specified value is known to be a
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/Lint.h b/contrib/libs/llvm12/include/llvm/Analysis/Lint.h
index 69edc46162..76e5c760e1 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/Lint.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/Lint.h
@@ -26,34 +26,34 @@
#ifndef LLVM_ANALYSIS_LINT_H
#define LLVM_ANALYSIS_LINT_H
-#include "llvm/IR/PassManager.h"
-
+#include "llvm/IR/PassManager.h"
+
namespace llvm {
class FunctionPass;
class Module;
class Function;
-FunctionPass *createLintLegacyPassPass();
+FunctionPass *createLintLegacyPassPass();
-/// Lint a module.
+/// Lint a module.
///
/// This should only be used for debugging, because it plays games with
/// PassManagers and stuff.
-void lintModule(const Module &M);
-
-// Lint a function.
-void lintFunction(const Function &F);
+void lintModule(const Module &M);
-class LintPass : public PassInfoMixin<LintPass> {
-public:
- PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
-};
+// Lint a function.
+void lintFunction(const Function &F);
-} // namespace llvm
+class LintPass : public PassInfoMixin<LintPass> {
+public:
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
-#endif // LLVM_ANALYSIS_LINT_H
+} // namespace llvm
+#endif // LLVM_ANALYSIS_LINT_H
+
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/Loads.h b/contrib/libs/llvm12/include/llvm/Analysis/Loads.h
index e9ee328a15..686b6cddc9 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/Loads.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/Loads.h
@@ -162,15 +162,15 @@ Value *FindAvailablePtrLoadStore(Value *Ptr, Type *AccessTy, bool AtLeastAtomic,
BasicBlock::iterator &ScanFrom,
unsigned MaxInstsToScan, AAResults *AA,
bool *IsLoadCSE, unsigned *NumScanedInst);
-
-/// Returns true if a pointer value \p A can be replace with another pointer
-/// value \B if they are deemed equal through some means (e.g. information from
-/// conditions).
-/// NOTE: the current implementations is incomplete and unsound. It does not
-/// reject all invalid cases yet, but will be made stricter in the future. In
-/// particular this means returning true means unknown if replacement is safe.
-bool canReplacePointersIfEqual(Value *A, Value *B, const DataLayout &DL,
- Instruction *CtxI);
+
+/// Returns true if a pointer value \p A can be replace with another pointer
+/// value \B if they are deemed equal through some means (e.g. information from
+/// conditions).
+/// NOTE: the current implementations is incomplete and unsound. It does not
+/// reject all invalid cases yet, but will be made stricter in the future. In
+/// particular this means returning true means unknown if replacement is safe.
+bool canReplacePointersIfEqual(Value *A, Value *B, const DataLayout &DL,
+ Instruction *CtxI);
}
#endif
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/LoopAccessAnalysis.h b/contrib/libs/llvm12/include/llvm/Analysis/LoopAccessAnalysis.h
index e1460d75cf..2dca69742a 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/LoopAccessAnalysis.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/LoopAccessAnalysis.h
@@ -178,8 +178,8 @@ public:
MemoryDepChecker(PredicatedScalarEvolution &PSE, const Loop *L)
: PSE(PSE), InnermostLoop(L), AccessIdx(0), MaxSafeDepDistBytes(0),
- MaxSafeVectorWidthInBits(-1U),
- FoundNonConstantDistanceDependence(false),
+ MaxSafeVectorWidthInBits(-1U),
+ FoundNonConstantDistanceDependence(false),
Status(VectorizationSafetyStatus::Safe), RecordDependences(true) {}
/// Register the location (instructions are given increasing numbers)
@@ -212,21 +212,21 @@ public:
return Status == VectorizationSafetyStatus::Safe;
}
- /// Return true if the number of elements that are safe to operate on
- /// simultaneously is not bounded.
- bool isSafeForAnyVectorWidth() const {
- return MaxSafeVectorWidthInBits == UINT_MAX;
- }
-
+ /// Return true if the number of elements that are safe to operate on
+ /// simultaneously is not bounded.
+ bool isSafeForAnyVectorWidth() const {
+ return MaxSafeVectorWidthInBits == UINT_MAX;
+ }
+
/// The maximum number of bytes of a vector register we can vectorize
/// the accesses safely with.
uint64_t getMaxSafeDepDistBytes() { return MaxSafeDepDistBytes; }
/// Return the number of elements that are safe to operate on
/// simultaneously, multiplied by the size of the element in bits.
- uint64_t getMaxSafeVectorWidthInBits() const {
- return MaxSafeVectorWidthInBits;
- }
+ uint64_t getMaxSafeVectorWidthInBits() const {
+ return MaxSafeVectorWidthInBits;
+ }
/// In same cases when the dependency check fails we can still
/// vectorize the loop with a dynamic array access check.
@@ -291,7 +291,7 @@ private:
/// operate on simultaneously, multiplied by the size of the element in bits.
/// The size of the element is taken from the memory access that is most
/// restrictive.
- uint64_t MaxSafeVectorWidthInBits;
+ uint64_t MaxSafeVectorWidthInBits;
/// If we see a non-constant dependence distance we can still try to
/// vectorize this loop with runtime checks.
@@ -434,7 +434,7 @@ public:
bool UseDependencies);
/// Returns the checks that generateChecks created.
- const SmallVectorImpl<RuntimePointerCheck> &getChecks() const {
+ const SmallVectorImpl<RuntimePointerCheck> &getChecks() const {
return Checks;
}
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/LoopAnalysisManager.h b/contrib/libs/llvm12/include/llvm/Analysis/LoopAnalysisManager.h
index 3b1043055d..b341eba1f9 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/LoopAnalysisManager.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/LoopAnalysisManager.h
@@ -64,7 +64,7 @@ struct LoopStandardAnalysisResults {
ScalarEvolution &SE;
TargetLibraryInfo &TLI;
TargetTransformInfo &TTI;
- BlockFrequencyInfo *BFI;
+ BlockFrequencyInfo *BFI;
MemorySSA *MSSA;
};
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/LoopCacheAnalysis.h b/contrib/libs/llvm12/include/llvm/Analysis/LoopCacheAnalysis.h
index 7b61a992aa..db618c6151 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/LoopCacheAnalysis.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/LoopCacheAnalysis.h
@@ -23,18 +23,18 @@
#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/Instructions.h"
-#include "llvm/IR/PassManager.h"
+#include "llvm/IR/PassManager.h"
#include "llvm/Support/raw_ostream.h"
namespace llvm {
-class AAResults;
-class DependenceInfo;
+class AAResults;
+class DependenceInfo;
class LPMUpdater;
-class ScalarEvolution;
-class SCEV;
-class TargetTransformInfo;
-
+class ScalarEvolution;
+class SCEV;
+class TargetTransformInfo;
+
using CacheCostTy = int64_t;
using LoopVectorTy = SmallVector<Loop *, 8>;
@@ -78,7 +78,7 @@ public:
/// the same chace line iff the distance between them in the innermost
/// dimension is less than the cache line size. Return None if unsure.
Optional<bool> hasSpacialReuse(const IndexedReference &Other, unsigned CLS,
- AAResults &AA) const;
+ AAResults &AA) const;
/// Return true if the current object and the indexed reference \p Other
/// have distance smaller than \p MaxDistance in the dimension associated with
@@ -86,7 +86,7 @@ public:
/// MaxDistance and None if unsure.
Optional<bool> hasTemporalReuse(const IndexedReference &Other,
unsigned MaxDistance, const Loop &L,
- DependenceInfo &DI, AAResults &AA) const;
+ DependenceInfo &DI, AAResults &AA) const;
/// Compute the cost of the reference w.r.t. the given loop \p L when it is
/// considered in the innermost position in the loop nest.
@@ -126,7 +126,7 @@ private:
/// Return true if the given reference \p Other is definetely aliased with
/// the indexed reference represented by this class.
- bool isAliased(const IndexedReference &Other, AAResults &AA) const;
+ bool isAliased(const IndexedReference &Other, AAResults &AA) const;
private:
/// True if the reference can be delinearized, false otherwise.
@@ -191,7 +191,7 @@ public:
/// between array elements accessed in a loop so that the elements are
/// classified to have temporal reuse.
CacheCost(const LoopVectorTy &Loops, const LoopInfo &LI, ScalarEvolution &SE,
- TargetTransformInfo &TTI, AAResults &AA, DependenceInfo &DI,
+ TargetTransformInfo &TTI, AAResults &AA, DependenceInfo &DI,
Optional<unsigned> TRT = None);
/// Create a CacheCost for the loop nest rooted by \p Root.
@@ -205,9 +205,9 @@ public:
/// Return the estimated cost of loop \p L if the given loop is part of the
/// loop nest associated with this object. Return -1 otherwise.
CacheCostTy getLoopCost(const Loop &L) const {
- auto IT = llvm::find_if(LoopCosts, [&L](const LoopCacheCostTy &LCC) {
- return LCC.first == &L;
- });
+ auto IT = llvm::find_if(LoopCosts, [&L](const LoopCacheCostTy &LCC) {
+ return LCC.first == &L;
+ });
return (IT != LoopCosts.end()) ? (*IT).second : -1;
}
@@ -266,7 +266,7 @@ private:
const LoopInfo &LI;
ScalarEvolution &SE;
TargetTransformInfo &TTI;
- AAResults &AA;
+ AAResults &AA;
DependenceInfo &DI;
};
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/LoopInfo.h b/contrib/libs/llvm12/include/llvm/Analysis/LoopInfo.h
index 0674e1d61f..52ee839a13 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/LoopInfo.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/LoopInfo.h
@@ -163,17 +163,17 @@ public:
reverse_iterator rbegin() const { return getSubLoops().rbegin(); }
reverse_iterator rend() const { return getSubLoops().rend(); }
- // LoopInfo does not detect irreducible control flow, just natural
- // loops. That is, it is possible that there is cyclic control
- // flow within the "innermost loop" or around the "outermost
- // loop".
-
- /// Return true if the loop does not contain any (natural) loops.
- bool isInnermost() const { return getSubLoops().empty(); }
- /// Return true if the loop does not have a parent (natural) loop
- // (i.e. it is outermost, which is the same as top-level).
- bool isOutermost() const { return getParentLoop() == nullptr; }
-
+ // LoopInfo does not detect irreducible control flow, just natural
+ // loops. That is, it is possible that there is cyclic control
+ // flow within the "innermost loop" or around the "outermost
+ // loop".
+
+ /// Return true if the loop does not contain any (natural) loops.
+ bool isInnermost() const { return getSubLoops().empty(); }
+ /// Return true if the loop does not have a parent (natural) loop
+ // (i.e. it is outermost, which is the same as top-level).
+ bool isOutermost() const { return getParentLoop() == nullptr; }
+
/// Get a list of the basic blocks which make up this loop.
ArrayRef<BlockT *> getBlocks() const {
assert(!isInvalid() && "Loop not in a valid state!");
@@ -309,9 +309,9 @@ public:
/// Otherwise return null.
BlockT *getUniqueExitBlock() const;
- /// Return true if this loop does not have any exit blocks.
- bool hasNoExitBlocks() const;
-
+ /// Return true if this loop does not have any exit blocks.
+ bool hasNoExitBlocks() const;
+
/// Edge type.
typedef std::pair<BlockT *, BlockT *> Edge;
@@ -850,9 +850,9 @@ public:
/// unrolling pass is run more than once (which it generally is).
void setLoopAlreadyUnrolled();
- /// Add llvm.loop.mustprogress to this loop's loop id metadata.
- void setLoopMustProgress();
-
+ /// Add llvm.loop.mustprogress to this loop's loop id metadata.
+ void setLoopMustProgress();
+
void dump() const;
void dumpVerbose() const;
@@ -997,7 +997,7 @@ public:
LoopT *removeLoop(iterator I) {
assert(I != end() && "Cannot remove end iterator!");
LoopT *L = *I;
- assert(L->isOutermost() && "Not a top-level loop!");
+ assert(L->isOutermost() && "Not a top-level loop!");
TopLevelLoops.erase(TopLevelLoops.begin() + (I - begin()));
return L;
}
@@ -1025,7 +1025,7 @@ public:
/// This adds the specified loop to the collection of top-level loops.
void addTopLevelLoop(LoopT *New) {
- assert(New->isOutermost() && "Loop already in subloop!");
+ assert(New->isOutermost() && "Loop already in subloop!");
TopLevelLoops.push_back(New);
}
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/LoopInfoImpl.h b/contrib/libs/llvm12/include/llvm/Analysis/LoopInfoImpl.h
index 6032e9babb..72b1f3d0d5 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/LoopInfoImpl.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/LoopInfoImpl.h
@@ -75,13 +75,13 @@ void LoopBase<BlockT, LoopT>::getExitBlocks(
ExitBlocks.push_back(Succ);
}
-template <class BlockT, class LoopT>
-bool LoopBase<BlockT, LoopT>::hasNoExitBlocks() const {
- SmallVector<BlockT *, 8> ExitBlocks;
- getExitBlocks(ExitBlocks);
- return ExitBlocks.empty();
-}
-
+template <class BlockT, class LoopT>
+bool LoopBase<BlockT, LoopT>::hasNoExitBlocks() const {
+ SmallVector<BlockT *, 8> ExitBlocks;
+ getExitBlocks(ExitBlocks);
+ return ExitBlocks.empty();
+}
+
/// getExitBlock - If getExitBlocks would return exactly one block,
/// return that block. Otherwise return null.
template <class BlockT, class LoopT>
@@ -516,7 +516,7 @@ void PopulateLoopsDFS<BlockT, LoopT>::insertIntoLoop(BlockT *Block) {
if (Subloop && Block == Subloop->getHeader()) {
// We reach this point once per subloop after processing all the blocks in
// the subloop.
- if (!Subloop->isOutermost())
+ if (!Subloop->isOutermost())
Subloop->getParentLoop()->getSubLoopsVector().push_back(Subloop);
else
LI->addTopLevelLoop(Subloop);
@@ -680,13 +680,13 @@ static void compareLoops(const LoopT *L, const LoopT *OtherL,
"Mismatched basic blocks in the loops!");
const SmallPtrSetImpl<const BlockT *> &BlocksSet = L->getBlocksSet();
- const SmallPtrSetImpl<const BlockT *> &OtherBlocksSet =
- OtherL->getBlocksSet();
+ const SmallPtrSetImpl<const BlockT *> &OtherBlocksSet =
+ OtherL->getBlocksSet();
assert(BlocksSet.size() == OtherBlocksSet.size() &&
- llvm::all_of(BlocksSet,
- [&OtherBlocksSet](const BlockT *BB) {
- return OtherBlocksSet.count(BB);
- }) &&
+ llvm::all_of(BlocksSet,
+ [&OtherBlocksSet](const BlockT *BB) {
+ return OtherBlocksSet.count(BB);
+ }) &&
"Mismatched basic blocks in BlocksSets!");
}
#endif
@@ -696,7 +696,7 @@ void LoopInfoBase<BlockT, LoopT>::verify(
const DomTreeBase<BlockT> &DomTree) const {
DenseSet<const LoopT *> Loops;
for (iterator I = begin(), E = end(); I != E; ++I) {
- assert((*I)->isOutermost() && "Top-level loop has a parent!");
+ assert((*I)->isOutermost() && "Top-level loop has a parent!");
(*I)->verifyLoopNest(&Loops);
}
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/LoopNestAnalysis.h b/contrib/libs/llvm12/include/llvm/Analysis/LoopNestAnalysis.h
index 2c7c3a21e3..4c6de08553 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/LoopNestAnalysis.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/LoopNestAnalysis.h
@@ -21,7 +21,7 @@
#ifndef LLVM_ANALYSIS_LOOPNESTANALYSIS_H
#define LLVM_ANALYSIS_LOOPNESTANALYSIS_H
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/Analysis/LoopInfo.h"
@@ -67,12 +67,12 @@ public:
/// getMaxPerfectDepth(Loop_i) would return 2.
static unsigned getMaxPerfectDepth(const Loop &Root, ScalarEvolution &SE);
- /// Recursivelly traverse all empty 'single successor' basic blocks of \p From
- /// (if there are any). Return the last basic block found or \p End if it was
- /// reached during the search.
- static const BasicBlock &skipEmptyBlockUntil(const BasicBlock *From,
- const BasicBlock *End);
-
+ /// Recursivelly traverse all empty 'single successor' basic blocks of \p From
+ /// (if there are any). Return the last basic block found or \p End if it was
+ /// reached during the search.
+ static const BasicBlock &skipEmptyBlockUntil(const BasicBlock *From,
+ const BasicBlock *End);
+
/// Return the outermost loop in the loop nest.
Loop &getOutermostLoop() const { return *Loops.front(); }
@@ -138,16 +138,16 @@ public:
/// Return true if all loops in the loop nest are in simplify form.
bool areAllLoopsSimplifyForm() const {
- return all_of(Loops, [](const Loop *L) { return L->isLoopSimplifyForm(); });
- }
-
- /// Return true if all loops in the loop nest are in rotated form.
- bool areAllLoopsRotatedForm() const {
- return all_of(Loops, [](const Loop *L) { return L->isRotatedForm(); });
+ return all_of(Loops, [](const Loop *L) { return L->isLoopSimplifyForm(); });
}
- StringRef getName() const { return Loops.front()->getName(); }
-
+ /// Return true if all loops in the loop nest are in rotated form.
+ bool areAllLoopsRotatedForm() const {
+ return all_of(Loops, [](const Loop *L) { return L->isRotatedForm(); });
+ }
+
+ StringRef getName() const { return Loops.front()->getName(); }
+
protected:
const unsigned MaxPerfectDepth; // maximum perfect nesting depth level.
LoopVectorTy Loops; // the loops in the nest (in breadth first order).
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/MLInlineAdvisor.h b/contrib/libs/llvm12/include/llvm/Analysis/MLInlineAdvisor.h
index da34d4fe96..6f4a551bb6 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/MLInlineAdvisor.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/MLInlineAdvisor.h
@@ -47,13 +47,13 @@ public:
const MLModelRunner &getModelRunner() const { return *ModelRunner.get(); }
protected:
- std::unique_ptr<InlineAdvice> getAdviceImpl(CallBase &CB) override;
-
- std::unique_ptr<InlineAdvice> getMandatoryAdvice(CallBase &CB,
- bool Advice) override;
-
- virtual std::unique_ptr<MLInlineAdvice> getMandatoryAdviceImpl(CallBase &CB);
+ std::unique_ptr<InlineAdvice> getAdviceImpl(CallBase &CB) override;
+ std::unique_ptr<InlineAdvice> getMandatoryAdvice(CallBase &CB,
+ bool Advice) override;
+
+ virtual std::unique_ptr<MLInlineAdvice> getMandatoryAdviceImpl(CallBase &CB);
+
virtual std::unique_ptr<MLInlineAdvice>
getAdviceFromModel(CallBase &CB, OptimizationRemarkEmitter &ORE);
@@ -113,7 +113,7 @@ private:
} // namespace llvm
#endif // LLVM_ANALYSIS_MLINLINEADVISOR_H
-
+
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/MemDerefPrinter.h b/contrib/libs/llvm12/include/llvm/Analysis/MemDerefPrinter.h
index c327d44935..22d163f8d1 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/MemDerefPrinter.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/MemDerefPrinter.h
@@ -1,35 +1,35 @@
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-parameter"
-#endif
-
-//===- MemDerefPrinter.h - Printer for isDereferenceablePointer -----------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_ANALYSIS_MEMDEREFPRINTER_H
-#define LLVM_ANALYSIS_MEMDEREFPRINTER_H
-
-#include "llvm/IR/PassManager.h"
-
-namespace llvm {
-class MemDerefPrinterPass : public PassInfoMixin<MemDerefPrinterPass> {
- raw_ostream &OS;
-
-public:
- MemDerefPrinterPass(raw_ostream &OS) : OS(OS) {}
- PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
-};
-} // namespace llvm
-
-#endif // LLVM_ANALYSIS_MEMDEREFPRINTER_H
-
-#ifdef __GNUC__
-#pragma GCC diagnostic pop
-#endif
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- MemDerefPrinter.h - Printer for isDereferenceablePointer -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_MEMDEREFPRINTER_H
+#define LLVM_ANALYSIS_MEMDEREFPRINTER_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class MemDerefPrinterPass : public PassInfoMixin<MemDerefPrinterPass> {
+ raw_ostream &OS;
+
+public:
+ MemDerefPrinterPass(raw_ostream &OS) : OS(OS) {}
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // namespace llvm
+
+#endif // LLVM_ANALYSIS_MEMDEREFPRINTER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/MemoryDependenceAnalysis.h b/contrib/libs/llvm12/include/llvm/Analysis/MemoryDependenceAnalysis.h
index 84ec93a5cf..137c7e59d0 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/MemoryDependenceAnalysis.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/MemoryDependenceAnalysis.h
@@ -309,7 +309,7 @@ private:
/// The maximum size of the dereferences of the pointer.
///
/// May be UnknownSize if the sizes are unknown.
- LocationSize Size = LocationSize::afterPointer();
+ LocationSize Size = LocationSize::afterPointer();
/// The AA tags associated with dereferences of the pointer.
///
/// The members may be null if there are no tags or conflicting tags.
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/MemoryLocation.h b/contrib/libs/llvm12/include/llvm/Analysis/MemoryLocation.h
index 331bbb7515..b4cc30b572 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/MemoryLocation.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/MemoryLocation.h
@@ -71,10 +71,10 @@ class VAArgInst;
// None.
class LocationSize {
enum : uint64_t {
- BeforeOrAfterPointer = ~uint64_t(0),
- AfterPointer = BeforeOrAfterPointer - 1,
- MapEmpty = BeforeOrAfterPointer - 2,
- MapTombstone = BeforeOrAfterPointer - 3,
+ BeforeOrAfterPointer = ~uint64_t(0),
+ AfterPointer = BeforeOrAfterPointer - 1,
+ MapEmpty = BeforeOrAfterPointer - 2,
+ MapTombstone = BeforeOrAfterPointer - 3,
ImpreciseBit = uint64_t(1) << 63,
// The maximum value we can represent without falling back to 'unknown'.
@@ -89,11 +89,11 @@ class LocationSize {
constexpr LocationSize(uint64_t Raw, DirectConstruction): Value(Raw) {}
- static_assert(AfterPointer & ImpreciseBit,
- "AfterPointer is imprecise by definition.");
- static_assert(BeforeOrAfterPointer & ImpreciseBit,
- "BeforeOrAfterPointer is imprecise by definition.");
-
+ static_assert(AfterPointer & ImpreciseBit,
+ "AfterPointer is imprecise by definition.");
+ static_assert(BeforeOrAfterPointer & ImpreciseBit,
+ "BeforeOrAfterPointer is imprecise by definition.");
+
public:
// FIXME: Migrate all users to construct via either `precise` or `upperBound`,
// to make it more obvious at the callsite the kind of size that they're
@@ -102,12 +102,12 @@ public:
// Since the overwhelming majority of users of this provide precise values,
// this assumes the provided value is precise.
constexpr LocationSize(uint64_t Raw)
- : Value(Raw > MaxValue ? AfterPointer : Raw) {}
+ : Value(Raw > MaxValue ? AfterPointer : Raw) {}
static LocationSize precise(uint64_t Value) { return LocationSize(Value); }
static LocationSize precise(TypeSize Value) {
if (Value.isScalable())
- return afterPointer();
+ return afterPointer();
return precise(Value.getFixedSize());
}
@@ -116,27 +116,27 @@ public:
if (LLVM_UNLIKELY(Value == 0))
return precise(0);
if (LLVM_UNLIKELY(Value > MaxValue))
- return afterPointer();
+ return afterPointer();
return LocationSize(Value | ImpreciseBit, Direct);
}
static LocationSize upperBound(TypeSize Value) {
if (Value.isScalable())
- return afterPointer();
+ return afterPointer();
return upperBound(Value.getFixedSize());
}
- /// Any location after the base pointer (but still within the underlying
- /// object).
- constexpr static LocationSize afterPointer() {
- return LocationSize(AfterPointer, Direct);
- }
-
- /// Any location before or after the base pointer (but still within the
- /// underlying object).
- constexpr static LocationSize beforeOrAfterPointer() {
- return LocationSize(BeforeOrAfterPointer, Direct);
+ /// Any location after the base pointer (but still within the underlying
+ /// object).
+ constexpr static LocationSize afterPointer() {
+ return LocationSize(AfterPointer, Direct);
}
+ /// Any location before or after the base pointer (but still within the
+ /// underlying object).
+ constexpr static LocationSize beforeOrAfterPointer() {
+ return LocationSize(BeforeOrAfterPointer, Direct);
+ }
+
// Sentinel values, generally used for maps.
constexpr static LocationSize mapTombstone() {
return LocationSize(MapTombstone, Direct);
@@ -151,24 +151,24 @@ public:
if (Other == *this)
return *this;
- if (Value == BeforeOrAfterPointer || Other.Value == BeforeOrAfterPointer)
- return beforeOrAfterPointer();
- if (Value == AfterPointer || Other.Value == AfterPointer)
- return afterPointer();
+ if (Value == BeforeOrAfterPointer || Other.Value == BeforeOrAfterPointer)
+ return beforeOrAfterPointer();
+ if (Value == AfterPointer || Other.Value == AfterPointer)
+ return afterPointer();
return upperBound(std::max(getValue(), Other.getValue()));
}
- bool hasValue() const {
- return Value != AfterPointer && Value != BeforeOrAfterPointer;
- }
+ bool hasValue() const {
+ return Value != AfterPointer && Value != BeforeOrAfterPointer;
+ }
uint64_t getValue() const {
assert(hasValue() && "Getting value from an unknown LocationSize!");
return Value & ~ImpreciseBit;
}
// Returns whether or not this value is precise. Note that if a value is
- // precise, it's guaranteed to not be unknown.
+ // precise, it's guaranteed to not be unknown.
bool isPrecise() const {
return (Value & ImpreciseBit) == 0;
}
@@ -176,9 +176,9 @@ public:
// Convenience method to check if this LocationSize's value is 0.
bool isZero() const { return hasValue() && getValue() == 0; }
- /// Whether accesses before the base pointer are possible.
- bool mayBeBeforePointer() const { return Value == BeforeOrAfterPointer; }
-
+ /// Whether accesses before the base pointer are possible.
+ bool mayBeBeforePointer() const { return Value == BeforeOrAfterPointer; }
+
bool operator==(const LocationSize &Other) const {
return Value == Other.Value;
}
@@ -269,30 +269,30 @@ public:
return getForArgument(Call, ArgIdx, &TLI);
}
- /// Return a location that may access any location after Ptr, while remaining
- /// within the underlying object.
- static MemoryLocation getAfter(const Value *Ptr,
- const AAMDNodes &AATags = AAMDNodes()) {
- return MemoryLocation(Ptr, LocationSize::afterPointer(), AATags);
- }
-
- /// Return a location that may access any location before or after Ptr, while
- /// remaining within the underlying object.
- static MemoryLocation
- getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags = AAMDNodes()) {
- return MemoryLocation(Ptr, LocationSize::beforeOrAfterPointer(), AATags);
- }
-
+ /// Return a location that may access any location after Ptr, while remaining
+ /// within the underlying object.
+ static MemoryLocation getAfter(const Value *Ptr,
+ const AAMDNodes &AATags = AAMDNodes()) {
+ return MemoryLocation(Ptr, LocationSize::afterPointer(), AATags);
+ }
+
+ /// Return a location that may access any location before or after Ptr, while
+ /// remaining within the underlying object.
+ static MemoryLocation
+ getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags = AAMDNodes()) {
+ return MemoryLocation(Ptr, LocationSize::beforeOrAfterPointer(), AATags);
+ }
+
// Return the exact size if the exact size is known at compiletime,
// otherwise return MemoryLocation::UnknownSize.
static uint64_t getSizeOrUnknown(const TypeSize &T) {
return T.isScalable() ? UnknownSize : T.getFixedSize();
}
- MemoryLocation()
- : Ptr(nullptr), Size(LocationSize::beforeOrAfterPointer()), AATags() {}
-
- explicit MemoryLocation(const Value *Ptr, LocationSize Size,
+ MemoryLocation()
+ : Ptr(nullptr), Size(LocationSize::beforeOrAfterPointer()), AATags() {}
+
+ explicit MemoryLocation(const Value *Ptr, LocationSize Size,
const AAMDNodes &AATags = AAMDNodes())
: Ptr(Ptr), Size(Size), AATags(AATags) {}
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/MemorySSA.h b/contrib/libs/llvm12/include/llvm/Analysis/MemorySSA.h
index a8b23dccd2..906e09dcc9 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/MemorySSA.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/MemorySSA.h
@@ -95,7 +95,7 @@
#include "llvm/IR/DerivedUser.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Module.h"
-#include "llvm/IR/Operator.h"
+#include "llvm/IR/Operator.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/User.h"
@@ -116,7 +116,7 @@ namespace llvm {
/// Enables memory ssa as a dependency for loop passes.
extern cl::opt<bool> EnableMSSALoopDependency;
-class AllocaInst;
+class AllocaInst;
class Function;
class Instruction;
class MemoryAccess;
@@ -279,7 +279,7 @@ public:
// Retrieve AliasResult type of the optimized access. Ideally this would be
// returned by the caching walker and may go away in the future.
Optional<AliasResult> getOptimizedAccessType() const {
- return isOptimized() ? OptimizedAccessAlias : None;
+ return isOptimized() ? OptimizedAccessAlias : None;
}
/// Reset the ID of what this MemoryUse was optimized to, causing it to
@@ -1185,11 +1185,11 @@ class upward_defs_iterator
using BaseT = upward_defs_iterator::iterator_facade_base;
public:
- upward_defs_iterator(const MemoryAccessPair &Info, DominatorTree *DT,
- bool *PerformedPhiTranslation = nullptr)
+ upward_defs_iterator(const MemoryAccessPair &Info, DominatorTree *DT,
+ bool *PerformedPhiTranslation = nullptr)
: DefIterator(Info.first), Location(Info.second),
- OriginalAccess(Info.first), DT(DT),
- PerformedPhiTranslation(PerformedPhiTranslation) {
+ OriginalAccess(Info.first), DT(DT),
+ PerformedPhiTranslation(PerformedPhiTranslation) {
CurrentPair.first = nullptr;
WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
@@ -1221,42 +1221,42 @@ public:
BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
private:
- /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
- /// loop. In particular, this guarantees that it only references a single
- /// MemoryLocation during execution of the containing function.
- bool IsGuaranteedLoopInvariant(Value *Ptr) const;
-
+ /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
+ /// loop. In particular, this guarantees that it only references a single
+ /// MemoryLocation during execution of the containing function.
+ bool IsGuaranteedLoopInvariant(Value *Ptr) const;
+
void fillInCurrentPair() {
CurrentPair.first = *DefIterator;
- CurrentPair.second = Location;
+ CurrentPair.second = Location;
if (WalkingPhi && Location.Ptr) {
- // Mark size as unknown, if the location is not guaranteed to be
- // loop-invariant for any possible loop in the function. Setting the size
- // to unknown guarantees that any memory accesses that access locations
- // after the pointer are considered as clobbers, which is important to
- // catch loop carried dependences.
- if (Location.Ptr &&
- !IsGuaranteedLoopInvariant(const_cast<Value *>(Location.Ptr)))
- CurrentPair.second =
- Location.getWithNewSize(LocationSize::beforeOrAfterPointer());
+ // Mark size as unknown, if the location is not guaranteed to be
+ // loop-invariant for any possible loop in the function. Setting the size
+ // to unknown guarantees that any memory accesses that access locations
+ // after the pointer are considered as clobbers, which is important to
+ // catch loop carried dependences.
+ if (Location.Ptr &&
+ !IsGuaranteedLoopInvariant(const_cast<Value *>(Location.Ptr)))
+ CurrentPair.second =
+ Location.getWithNewSize(LocationSize::beforeOrAfterPointer());
PHITransAddr Translator(
const_cast<Value *>(Location.Ptr),
OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
-
+
if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
DefIterator.getPhiArgBlock(), DT,
- true)) {
- Value *TransAddr = Translator.getAddr();
- if (TransAddr != Location.Ptr) {
- CurrentPair.second = CurrentPair.second.getWithNewPtr(TransAddr);
-
- if (TransAddr &&
- !IsGuaranteedLoopInvariant(const_cast<Value *>(TransAddr)))
- CurrentPair.second = CurrentPair.second.getWithNewSize(
- LocationSize::beforeOrAfterPointer());
-
- if (PerformedPhiTranslation)
- *PerformedPhiTranslation = true;
+ true)) {
+ Value *TransAddr = Translator.getAddr();
+ if (TransAddr != Location.Ptr) {
+ CurrentPair.second = CurrentPair.second.getWithNewPtr(TransAddr);
+
+ if (TransAddr &&
+ !IsGuaranteedLoopInvariant(const_cast<Value *>(TransAddr)))
+ CurrentPair.second = CurrentPair.second.getWithNewSize(
+ LocationSize::beforeOrAfterPointer());
+
+ if (PerformedPhiTranslation)
+ *PerformedPhiTranslation = true;
}
}
}
@@ -1266,15 +1266,15 @@ private:
memoryaccess_def_iterator DefIterator;
MemoryLocation Location;
MemoryAccess *OriginalAccess = nullptr;
- DominatorTree *DT = nullptr;
+ DominatorTree *DT = nullptr;
bool WalkingPhi = false;
- bool *PerformedPhiTranslation = nullptr;
+ bool *PerformedPhiTranslation = nullptr;
};
-inline upward_defs_iterator
-upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT,
- bool *PerformedPhiTranslation = nullptr) {
- return upward_defs_iterator(Pair, &DT, PerformedPhiTranslation);
+inline upward_defs_iterator
+upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT,
+ bool *PerformedPhiTranslation = nullptr) {
+ return upward_defs_iterator(Pair, &DT, PerformedPhiTranslation);
}
inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/MemorySSAUpdater.h b/contrib/libs/llvm12/include/llvm/Analysis/MemorySSAUpdater.h
index 2f1cfde006..f469eaa962 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/MemorySSAUpdater.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/MemorySSAUpdater.h
@@ -126,11 +126,11 @@ public:
ArrayRef<BasicBlock *> ExitBlocks,
ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT);
- /// Apply CFG updates, analogous with the DT edge updates. By default, the
- /// DT is assumed to be already up to date. If UpdateDTFirst is true, first
- /// update the DT with the same updates.
- void applyUpdates(ArrayRef<CFGUpdate> Updates, DominatorTree &DT,
- bool UpdateDTFirst = false);
+ /// Apply CFG updates, analogous with the DT edge updates. By default, the
+ /// DT is assumed to be already up to date. If UpdateDTFirst is true, first
+ /// update the DT with the same updates.
+ void applyUpdates(ArrayRef<CFGUpdate> Updates, DominatorTree &DT,
+ bool UpdateDTFirst = false);
/// Apply CFG insert updates, analogous with the DT edge updates.
void applyInsertUpdates(ArrayRef<CFGUpdate> Updates, DominatorTree &DT);
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/ModuleDebugInfoPrinter.h b/contrib/libs/llvm12/include/llvm/Analysis/ModuleDebugInfoPrinter.h
index 7472cc410a..838aa9d908 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/ModuleDebugInfoPrinter.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/ModuleDebugInfoPrinter.h
@@ -1,40 +1,40 @@
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-parameter"
-#endif
-
-//===- ModuleDebugInfoPrinter.h - -----------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_ANALYSIS_MODULEDEBUGINFOPRINTER_H
-#define LLVM_ANALYSIS_MODULEDEBUGINFOPRINTER_H
-
-#include "llvm/IR/DebugInfo.h"
-#include "llvm/IR/PassManager.h"
-#include "llvm/Support/raw_ostream.h"
-
-namespace llvm {
-
-class ModuleDebugInfoPrinterPass
- : public PassInfoMixin<ModuleDebugInfoPrinterPass> {
- DebugInfoFinder Finder;
- raw_ostream &OS;
-
-public:
- explicit ModuleDebugInfoPrinterPass(raw_ostream &OS);
- PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
-};
-} // end namespace llvm
-
-#endif // LLVM_ANALYSIS_MODULEDEBUGINFOPRINTER_H
-
-#ifdef __GNUC__
-#pragma GCC diagnostic pop
-#endif
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- ModuleDebugInfoPrinter.h - -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_MODULEDEBUGINFOPRINTER_H
+#define LLVM_ANALYSIS_MODULEDEBUGINFOPRINTER_H
+
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+class ModuleDebugInfoPrinterPass
+ : public PassInfoMixin<ModuleDebugInfoPrinterPass> {
+ DebugInfoFinder Finder;
+ raw_ostream &OS;
+
+public:
+ explicit ModuleDebugInfoPrinterPass(raw_ostream &OS);
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_MODULEDEBUGINFOPRINTER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/MustExecute.h b/contrib/libs/llvm12/include/llvm/Analysis/MustExecute.h
index d6998f6de4..4f21c153cc 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/MustExecute.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/MustExecute.h
@@ -34,8 +34,8 @@
#include "llvm/ADT/DenseSet.h"
#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/Analysis/InstructionPrecedenceTracking.h"
-#include "llvm/IR/PassManager.h"
-#include "llvm/Support/raw_ostream.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/raw_ostream.h"
namespace llvm {
@@ -550,23 +550,23 @@ private:
MustBeExecutedIterator EndIterator;
};
-class MustExecutePrinterPass : public PassInfoMixin<MustExecutePrinterPass> {
- raw_ostream &OS;
-
-public:
- MustExecutePrinterPass(raw_ostream &OS) : OS(OS) {}
- PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
-};
-
-class MustBeExecutedContextPrinterPass
- : public PassInfoMixin<MustBeExecutedContextPrinterPass> {
- raw_ostream &OS;
-
-public:
- MustBeExecutedContextPrinterPass(raw_ostream &OS) : OS(OS) {}
- PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
-};
-
+class MustExecutePrinterPass : public PassInfoMixin<MustExecutePrinterPass> {
+ raw_ostream &OS;
+
+public:
+ MustExecutePrinterPass(raw_ostream &OS) : OS(OS) {}
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+class MustBeExecutedContextPrinterPass
+ : public PassInfoMixin<MustBeExecutedContextPrinterPass> {
+ raw_ostream &OS;
+
+public:
+ MustBeExecutedContextPrinterPass(raw_ostream &OS) : OS(OS) {}
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
} // namespace llvm
#endif
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/ObjCARCAnalysisUtils.h b/contrib/libs/llvm12/include/llvm/Analysis/ObjCARCAnalysisUtils.h
index 7c9b1ff296..f0814f34db 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/ObjCARCAnalysisUtils.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/ObjCARCAnalysisUtils.h
@@ -37,9 +37,9 @@
#include "llvm/IR/ValueHandle.h"
namespace llvm {
-
-class AAResults;
-
+
+class AAResults;
+
namespace objcarc {
/// A handy option to enable/disable all ARC Optimizations.
@@ -73,9 +73,9 @@ inline bool ModuleHasARC(const Module &M) {
/// This is a wrapper around getUnderlyingObject which also knows how to
/// look through objc_retain and objc_autorelease calls, which we know to return
/// their argument verbatim.
-inline const Value *GetUnderlyingObjCPtr(const Value *V) {
+inline const Value *GetUnderlyingObjCPtr(const Value *V) {
for (;;) {
- V = getUnderlyingObject(V);
+ V = getUnderlyingObject(V);
if (!IsForwarding(GetBasicARCInstKind(V)))
break;
V = cast<CallInst>(V)->getArgOperand(0);
@@ -86,12 +86,12 @@ inline const Value *GetUnderlyingObjCPtr(const Value *V) {
/// A wrapper for GetUnderlyingObjCPtr used for results memoization.
inline const Value *
-GetUnderlyingObjCPtrCached(const Value *V,
+GetUnderlyingObjCPtrCached(const Value *V,
DenseMap<const Value *, WeakTrackingVH> &Cache) {
if (auto InCache = Cache.lookup(V))
return InCache;
- const Value *Computed = GetUnderlyingObjCPtr(V);
+ const Value *Computed = GetUnderlyingObjCPtr(V);
Cache[V] = const_cast<Value *>(Computed);
return Computed;
}
@@ -154,7 +154,7 @@ inline bool IsPotentialRetainableObjPtr(const Value *Op) {
return false;
// Special arguments can not be a valid retainable object pointer.
if (const Argument *Arg = dyn_cast<Argument>(Op))
- if (Arg->hasPassPointeeByValueCopyAttr() || Arg->hasNestAttr() ||
+ if (Arg->hasPassPointeeByValueCopyAttr() || Arg->hasNestAttr() ||
Arg->hasStructRetAttr())
return false;
// Only consider values with pointer types.
@@ -170,7 +170,7 @@ inline bool IsPotentialRetainableObjPtr(const Value *Op) {
return true;
}
-bool IsPotentialRetainableObjPtr(const Value *Op, AAResults &AA);
+bool IsPotentialRetainableObjPtr(const Value *Op, AAResults &AA);
/// Helper for GetARCInstKind. Determines what kind of construct CS
/// is.
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/OptimizationRemarkEmitter.h b/contrib/libs/llvm12/include/llvm/Analysis/OptimizationRemarkEmitter.h
index 3bbbe85e8a..4301fd4818 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/OptimizationRemarkEmitter.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/OptimizationRemarkEmitter.h
@@ -95,15 +95,15 @@ public:
/// provide more context so that non-trivial false positives can be quickly
/// detected by the user.
bool allowExtraAnalysis(StringRef PassName) const {
- return OptimizationRemarkEmitter::allowExtraAnalysis(*F, PassName);
- }
- static bool allowExtraAnalysis(const Function &F, StringRef PassName) {
- return allowExtraAnalysis(F.getContext(), PassName);
- }
- static bool allowExtraAnalysis(LLVMContext &Ctx, StringRef PassName) {
- return Ctx.getLLVMRemarkStreamer() ||
- Ctx.getDiagHandlerPtr()->isAnyRemarkEnabled(PassName);
+ return OptimizationRemarkEmitter::allowExtraAnalysis(*F, PassName);
}
+ static bool allowExtraAnalysis(const Function &F, StringRef PassName) {
+ return allowExtraAnalysis(F.getContext(), PassName);
+ }
+ static bool allowExtraAnalysis(LLVMContext &Ctx, StringRef PassName) {
+ return Ctx.getLLVMRemarkStreamer() ||
+ Ctx.getDiagHandlerPtr()->isAnyRemarkEnabled(PassName);
+ }
private:
const Function *F;
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/PhiValues.h b/contrib/libs/llvm12/include/llvm/Analysis/PhiValues.h
index 15a722d067..c01f89dab5 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/PhiValues.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/PhiValues.h
@@ -28,7 +28,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/ValueHandle.h"
@@ -47,7 +47,7 @@ class Function;
/// it is queried.
class PhiValues {
public:
- using ValueSet = SmallSetVector<Value *, 4>;
+ using ValueSet = SmallSetVector<Value *, 4>;
/// Construct an empty PhiValues.
PhiValues(const Function &F) : F(F) {}
@@ -77,7 +77,7 @@ public:
FunctionAnalysisManager::Invalidator &);
private:
- using ConstValueSet = SmallSetVector<const Value *, 4>;
+ using ConstValueSet = SmallSetVector<const Value *, 4>;
/// The next depth number to be used by processPhi.
unsigned int NextDepthNumber = 1;
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/ProfileSummaryInfo.h b/contrib/libs/llvm12/include/llvm/Analysis/ProfileSummaryInfo.h
index c862044992..9f31d7041a 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/ProfileSummaryInfo.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/ProfileSummaryInfo.h
@@ -45,7 +45,7 @@ class Function;
// units. This would require making this depend on BFI.
class ProfileSummaryInfo {
private:
- const Module &M;
+ const Module &M;
std::unique_ptr<ProfileSummary> Summary;
void computeThresholds();
// Count thresholds to answer isHotCount and isColdCount queries.
@@ -65,8 +65,8 @@ private:
mutable DenseMap<int, uint64_t> ThresholdCache;
public:
- ProfileSummaryInfo(const Module &M) : M(M) { refresh(); }
-
+ ProfileSummaryInfo(const Module &M) : M(M) { refresh(); }
+
ProfileSummaryInfo(ProfileSummaryInfo &&Arg) = default;
/// If no summary is present, attempt to refresh.
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/RegionInfoImpl.h b/contrib/libs/llvm12/include/llvm/Analysis/RegionInfoImpl.h
index 11c12808ff..e1a50f12d7 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/RegionInfoImpl.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/RegionInfoImpl.h
@@ -592,8 +592,8 @@ bool RegionInfoBase<Tr>::isRegion(BlockT *entry, BlockT *exit) const {
// Exit is the header of a loop that contains the entry. In this case,
// the dominance frontier must only contain the exit.
if (!DT->dominates(entry, exit)) {
- for (BlockT *successor : *entrySuccs) {
- if (successor != exit && successor != entry)
+ for (BlockT *successor : *entrySuccs) {
+ if (successor != exit && successor != entry)
return false;
}
@@ -822,7 +822,7 @@ void RegionInfoBase<Tr>::verifyAnalysis() const {
// Region pass manager support.
template <class Tr>
typename Tr::RegionT *RegionInfoBase<Tr>::getRegionFor(BlockT *BB) const {
- return BBtoRegion.lookup(BB);
+ return BBtoRegion.lookup(BB);
}
template <class Tr>
@@ -897,7 +897,7 @@ typename Tr::RegionT *RegionInfoBase<Tr>::getCommonRegion(RegionT *A,
template <class Tr>
typename Tr::RegionT *
RegionInfoBase<Tr>::getCommonRegion(SmallVectorImpl<RegionT *> &Regions) const {
- RegionT *ret = Regions.pop_back_val();
+ RegionT *ret = Regions.pop_back_val();
for (RegionT *R : Regions)
ret = getCommonRegion(ret, R);
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/ReplayInlineAdvisor.h b/contrib/libs/llvm12/include/llvm/Analysis/ReplayInlineAdvisor.h
index d2e2d7d785..40d6ff8d78 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/ReplayInlineAdvisor.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/ReplayInlineAdvisor.h
@@ -1,52 +1,52 @@
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-parameter"
-#endif
-
-//===- ReplayInlineAdvisor.h - Replay Inline Advisor interface -*- C++ --*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-#ifndef LLVM_ANALYSIS_REPLAYINLINEADVISOR_H
-#define LLVM_ANALYSIS_REPLAYINLINEADVISOR_H
-
-#include "llvm/ADT/StringSet.h"
-#include "llvm/Analysis/InlineAdvisor.h"
-#include "llvm/IR/LLVMContext.h"
-
-namespace llvm {
-class BasicBlock;
-class CallBase;
-class Function;
-class Module;
-class OptimizationRemarkEmitter;
-
-/// Replay inline advisor that uses optimization remarks from inlining of
-/// previous build to guide current inlining. This is useful for inliner tuning.
-class ReplayInlineAdvisor : public InlineAdvisor {
-public:
- ReplayInlineAdvisor(Module &M, FunctionAnalysisManager &FAM,
- LLVMContext &Context,
- std::unique_ptr<InlineAdvisor> OriginalAdvisor,
- StringRef RemarksFile, bool EmitRemarks);
- std::unique_ptr<InlineAdvice> getAdviceImpl(CallBase &CB) override;
- bool areReplayRemarksLoaded() const { return HasReplayRemarks; }
-
-private:
- StringSet<> InlineSitesFromRemarks;
- std::unique_ptr<InlineAdvisor> OriginalAdvisor;
- bool HasReplayRemarks = false;
- bool EmitRemarks = false;
-};
-} // namespace llvm
-#endif // LLVM_ANALYSIS_REPLAYINLINEADVISOR_H
-
-#ifdef __GNUC__
-#pragma GCC diagnostic pop
-#endif
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- ReplayInlineAdvisor.h - Replay Inline Advisor interface -*- C++ --*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+#ifndef LLVM_ANALYSIS_REPLAYINLINEADVISOR_H
+#define LLVM_ANALYSIS_REPLAYINLINEADVISOR_H
+
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Analysis/InlineAdvisor.h"
+#include "llvm/IR/LLVMContext.h"
+
+namespace llvm {
+class BasicBlock;
+class CallBase;
+class Function;
+class Module;
+class OptimizationRemarkEmitter;
+
+/// Replay inline advisor that uses optimization remarks from inlining of
+/// previous build to guide current inlining. This is useful for inliner tuning.
+class ReplayInlineAdvisor : public InlineAdvisor {
+public:
+ ReplayInlineAdvisor(Module &M, FunctionAnalysisManager &FAM,
+ LLVMContext &Context,
+ std::unique_ptr<InlineAdvisor> OriginalAdvisor,
+ StringRef RemarksFile, bool EmitRemarks);
+ std::unique_ptr<InlineAdvice> getAdviceImpl(CallBase &CB) override;
+ bool areReplayRemarksLoaded() const { return HasReplayRemarks; }
+
+private:
+ StringSet<> InlineSitesFromRemarks;
+ std::unique_ptr<InlineAdvisor> OriginalAdvisor;
+ bool HasReplayRemarks = false;
+ bool EmitRemarks = false;
+};
+} // namespace llvm
+#endif // LLVM_ANALYSIS_REPLAYINLINEADVISOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/ScalarEvolution.h b/contrib/libs/llvm12/include/llvm/Analysis/ScalarEvolution.h
index 12110dd0c6..9788c9a473 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/ScalarEvolution.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/ScalarEvolution.h
@@ -77,7 +77,7 @@ class StructType;
class TargetLibraryInfo;
class Type;
class Value;
-enum SCEVTypes : unsigned short;
+enum SCEVTypes : unsigned short;
/// This class represents an analyzed expression in the program. These are
/// opaque objects that the client is not allowed to do much with directly.
@@ -90,7 +90,7 @@ class SCEV : public FoldingSetNode {
FoldingSetNodeIDRef FastID;
// The SCEV baseclass this node corresponds to
- const SCEVTypes SCEVType;
+ const SCEVTypes SCEVType;
protected:
// Estimated complexity of this node's expression tree size.
@@ -127,13 +127,13 @@ public:
NoWrapMask = (1 << 3) - 1
};
- explicit SCEV(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
+ explicit SCEV(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
unsigned short ExpressionSize)
: FastID(ID), SCEVType(SCEVTy), ExpressionSize(ExpressionSize) {}
SCEV(const SCEV &) = delete;
SCEV &operator=(const SCEV &) = delete;
- SCEVTypes getSCEVType() const { return SCEVType; }
+ SCEVTypes getSCEVType() const { return SCEVType; }
/// Return the LLVM type of this SCEV expression.
Type *getType() const;
@@ -519,7 +519,7 @@ public:
const SCEV *getConstant(ConstantInt *V);
const SCEV *getConstant(const APInt &Val);
const SCEV *getConstant(Type *Ty, uint64_t V, bool isSigned = false);
- const SCEV *getPtrToIntExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
+ const SCEV *getPtrToIntExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
const SCEV *getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
const SCEV *getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
const SCEV *getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
@@ -581,9 +581,9 @@ public:
/// \p IndexExprs The expressions for the indices.
const SCEV *getGEPExpr(GEPOperator *GEP,
const SmallVectorImpl<const SCEV *> &IndexExprs);
- const SCEV *getAbsExpr(const SCEV *Op, bool IsNSW);
- const SCEV *getSignumExpr(const SCEV *Op);
- const SCEV *getMinMaxExpr(SCEVTypes Kind,
+ const SCEV *getAbsExpr(const SCEV *Op, bool IsNSW);
+ const SCEV *getSignumExpr(const SCEV *Op);
+ const SCEV *getMinMaxExpr(SCEVTypes Kind,
SmallVectorImpl<const SCEV *> &Operands);
const SCEV *getSMaxExpr(const SCEV *LHS, const SCEV *RHS);
const SCEV *getSMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
@@ -602,22 +602,22 @@ public:
/// Return a SCEV for the constant 1 of a specific type.
const SCEV *getOne(Type *Ty) { return getConstant(Ty, 1); }
- /// Return a SCEV for the constant -1 of a specific type.
- const SCEV *getMinusOne(Type *Ty) {
- return getConstant(Ty, -1, /*isSigned=*/true);
- }
-
- /// Return an expression for sizeof ScalableTy that is type IntTy, where
- /// ScalableTy is a scalable vector type.
- const SCEV *getSizeOfScalableVectorExpr(Type *IntTy,
- ScalableVectorType *ScalableTy);
-
- /// Return an expression for the alloc size of AllocTy that is type IntTy
+ /// Return a SCEV for the constant -1 of a specific type.
+ const SCEV *getMinusOne(Type *Ty) {
+ return getConstant(Ty, -1, /*isSigned=*/true);
+ }
+
+ /// Return an expression for sizeof ScalableTy that is type IntTy, where
+ /// ScalableTy is a scalable vector type.
+ const SCEV *getSizeOfScalableVectorExpr(Type *IntTy,
+ ScalableVectorType *ScalableTy);
+
+ /// Return an expression for the alloc size of AllocTy that is type IntTy
const SCEV *getSizeOfExpr(Type *IntTy, Type *AllocTy);
- /// Return an expression for the store size of StoreTy that is type IntTy
- const SCEV *getStoreSizeOfExpr(Type *IntTy, Type *StoreTy);
-
+ /// Return an expression for the store size of StoreTy that is type IntTy
+ const SCEV *getStoreSizeOfExpr(Type *IntTy, Type *StoreTy);
+
/// Return an expression for offsetof on the given field with type IntTy
const SCEV *getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo);
@@ -701,12 +701,12 @@ public:
bool isLoopEntryGuardedByCond(const Loop *L, ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS);
- /// Test whether entry to the basic block is protected by a conditional
- /// between LHS and RHS.
- bool isBasicBlockEntryGuardedByCond(const BasicBlock *BB,
- ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS);
-
+ /// Test whether entry to the basic block is protected by a conditional
+ /// between LHS and RHS.
+ bool isBasicBlockEntryGuardedByCond(const BasicBlock *BB,
+ ICmpInst::Predicate Pred, const SCEV *LHS,
+ const SCEV *RHS);
+
/// Test whether the backedge of the loop is protected by a conditional
/// between LHS and RHS. This is used to eliminate casts.
bool isLoopBackedgeGuardedByCond(const Loop *L, ICmpInst::Predicate Pred,
@@ -726,8 +726,8 @@ public:
/// before taking the branch. For loops with multiple exits, it may not be
/// the number times that the loop header executes if the loop exits
/// prematurely via another branch.
- unsigned getSmallConstantTripCount(const Loop *L,
- const BasicBlock *ExitingBlock);
+ unsigned getSmallConstantTripCount(const Loop *L,
+ const BasicBlock *ExitingBlock);
/// Returns the upper bound of the loop trip count as a normal unsigned
/// value.
@@ -749,7 +749,7 @@ public:
/// for getSmallConstantTripCount, this assumes that control exits the loop
/// via ExitingBlock.
unsigned getSmallConstantTripMultiple(const Loop *L,
- const BasicBlock *ExitingBlock);
+ const BasicBlock *ExitingBlock);
/// The terms "backedge taken count" and "exit count" are used
/// interchangeably to refer to the number of times the backedge of a loop
@@ -760,8 +760,8 @@ public:
Exact,
/// A constant which provides an upper bound on the exact trip count.
ConstantMaximum,
- /// An expression which provides an upper bound on the exact trip count.
- SymbolicMaximum,
+ /// An expression which provides an upper bound on the exact trip count.
+ SymbolicMaximum,
};
/// Return the number of times the backedge executes before the given exit
@@ -769,8 +769,8 @@ public:
/// For a single exit loop, this value is equivelent to the result of
/// getBackedgeTakenCount. The loop is guaranteed to exit (via *some* exit)
/// before the backedge is executed (ExitCount + 1) times. Note that there
- /// is no guarantee about *which* exit is taken on the exiting iteration.
- const SCEV *getExitCount(const Loop *L, const BasicBlock *ExitingBlock,
+ /// is no guarantee about *which* exit is taken on the exiting iteration.
+ const SCEV *getExitCount(const Loop *L, const BasicBlock *ExitingBlock,
ExitCountKind Kind = Exact);
/// If the specified loop has a predictable backedge-taken count, return it,
@@ -798,16 +798,16 @@ public:
/// SCEVCouldNotCompute object.
const SCEV *getConstantMaxBackedgeTakenCount(const Loop *L) {
return getBackedgeTakenCount(L, ConstantMaximum);
- }
-
- /// When successful, this returns a SCEV that is greater than or equal
- /// to (i.e. a "conservative over-approximation") of the value returend by
- /// getBackedgeTakenCount. If such a value cannot be computed, it returns the
- /// SCEVCouldNotCompute object.
- const SCEV *getSymbolicMaxBackedgeTakenCount(const Loop *L) {
- return getBackedgeTakenCount(L, SymbolicMaximum);
- }
-
+ }
+
+ /// When successful, this returns a SCEV that is greater than or equal
+ /// to (i.e. a "conservative over-approximation") of the value returend by
+ /// getBackedgeTakenCount. If such a value cannot be computed, it returns the
+ /// SCEVCouldNotCompute object.
+ const SCEV *getSymbolicMaxBackedgeTakenCount(const Loop *L) {
+ return getBackedgeTakenCount(L, SymbolicMaximum);
+ }
+
/// Return true if the backedge taken count is either the value returned by
/// getConstantMaxBackedgeTakenCount or zero.
bool isBackedgeTakenCountMaxOrZero(const Loop *L);
@@ -945,11 +945,11 @@ public:
bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS,
const SCEV *RHS);
- /// Test if the given expression is known to satisfy the condition described
- /// by Pred, LHS, and RHS in the given Context.
- bool isKnownPredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS, const Instruction *Context);
-
+ /// Test if the given expression is known to satisfy the condition described
+ /// by Pred, LHS, and RHS in the given Context.
+ bool isKnownPredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS,
+ const SCEV *RHS, const Instruction *Context);
+
/// Test if the condition described by Pred, LHS, RHS is known to be true on
/// every iteration of the loop of the recurrency LHS.
bool isKnownOnEveryIteration(ICmpInst::Predicate Pred,
@@ -960,47 +960,47 @@ public:
/// around. A predicate is said to be monotonically decreasing if may go
/// from being true to being false as the loop iterates, but never the other
/// way around.
- enum MonotonicPredicateType {
- MonotonicallyIncreasing,
- MonotonicallyDecreasing
- };
-
- /// If, for all loop invariant X, the predicate "LHS `Pred` X" is
- /// monotonically increasing or decreasing, returns
- /// Some(MonotonicallyIncreasing) and Some(MonotonicallyDecreasing)
- /// respectively. If we could not prove either of these facts, returns None.
- Optional<MonotonicPredicateType>
- getMonotonicPredicateType(const SCEVAddRecExpr *LHS,
- ICmpInst::Predicate Pred);
-
- struct LoopInvariantPredicate {
- ICmpInst::Predicate Pred;
- const SCEV *LHS;
- const SCEV *RHS;
-
- LoopInvariantPredicate(ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS)
- : Pred(Pred), LHS(LHS), RHS(RHS) {}
- };
- /// If the result of the predicate LHS `Pred` RHS is loop invariant with
- /// respect to L, return a LoopInvariantPredicate with LHS and RHS being
- /// invariants, available at L's entry. Otherwise, return None.
- Optional<LoopInvariantPredicate>
- getLoopInvariantPredicate(ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS, const Loop *L);
-
- /// If the result of the predicate LHS `Pred` RHS is loop invariant with
- /// respect to L at given Context during at least first MaxIter iterations,
- /// return a LoopInvariantPredicate with LHS and RHS being invariants,
- /// available at L's entry. Otherwise, return None. The predicate should be
- /// the loop's exit condition.
- Optional<LoopInvariantPredicate>
- getLoopInvariantExitCondDuringFirstIterations(ICmpInst::Predicate Pred,
- const SCEV *LHS,
- const SCEV *RHS, const Loop *L,
- const Instruction *Context,
- const SCEV *MaxIter);
-
+ enum MonotonicPredicateType {
+ MonotonicallyIncreasing,
+ MonotonicallyDecreasing
+ };
+
+ /// If, for all loop invariant X, the predicate "LHS `Pred` X" is
+ /// monotonically increasing or decreasing, returns
+ /// Some(MonotonicallyIncreasing) and Some(MonotonicallyDecreasing)
+ /// respectively. If we could not prove either of these facts, returns None.
+ Optional<MonotonicPredicateType>
+ getMonotonicPredicateType(const SCEVAddRecExpr *LHS,
+ ICmpInst::Predicate Pred);
+
+ struct LoopInvariantPredicate {
+ ICmpInst::Predicate Pred;
+ const SCEV *LHS;
+ const SCEV *RHS;
+
+ LoopInvariantPredicate(ICmpInst::Predicate Pred, const SCEV *LHS,
+ const SCEV *RHS)
+ : Pred(Pred), LHS(LHS), RHS(RHS) {}
+ };
+ /// If the result of the predicate LHS `Pred` RHS is loop invariant with
+ /// respect to L, return a LoopInvariantPredicate with LHS and RHS being
+ /// invariants, available at L's entry. Otherwise, return None.
+ Optional<LoopInvariantPredicate>
+ getLoopInvariantPredicate(ICmpInst::Predicate Pred, const SCEV *LHS,
+ const SCEV *RHS, const Loop *L);
+
+ /// If the result of the predicate LHS `Pred` RHS is loop invariant with
+ /// respect to L at given Context during at least first MaxIter iterations,
+ /// return a LoopInvariantPredicate with LHS and RHS being invariants,
+ /// available at L's entry. Otherwise, return None. The predicate should be
+ /// the loop's exit condition.
+ Optional<LoopInvariantPredicate>
+ getLoopInvariantExitCondDuringFirstIterations(ICmpInst::Predicate Pred,
+ const SCEV *LHS,
+ const SCEV *RHS, const Loop *L,
+ const Instruction *Context,
+ const SCEV *MaxIter);
+
/// Simplify LHS and RHS in a comparison with predicate Pred. Return true
/// iff any changes were made. If the operands are provably equal or
/// unequal, LHS and RHS are set to the same value and Pred is set to either
@@ -1170,20 +1170,20 @@ public:
const SCEV *S, const Loop *L,
SmallPtrSetImpl<const SCEVPredicate *> &Preds);
- /// Compute \p LHS - \p RHS and returns the result as an APInt if it is a
- /// constant, and None if it isn't.
- ///
- /// This is intended to be a cheaper version of getMinusSCEV. We can be
- /// frugal here since we just bail out of actually constructing and
- /// canonicalizing an expression in the cases where the result isn't going
- /// to be a constant.
- Optional<APInt> computeConstantDifference(const SCEV *LHS, const SCEV *RHS);
-
- /// Update no-wrap flags of an AddRec. This may drop the cached info about
- /// this AddRec (such as range info) in case if new flags may potentially
- /// sharpen it.
- void setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags);
-
+ /// Compute \p LHS - \p RHS and returns the result as an APInt if it is a
+ /// constant, and None if it isn't.
+ ///
+ /// This is intended to be a cheaper version of getMinusSCEV. We can be
+ /// frugal here since we just bail out of actually constructing and
+ /// canonicalizing an expression in the cases where the result isn't going
+ /// to be a constant.
+ Optional<APInt> computeConstantDifference(const SCEV *LHS, const SCEV *RHS);
+
+ /// Update no-wrap flags of an AddRec. This may drop the cached info about
+ /// this AddRec (such as range info) in case if new flags may potentially
+ /// sharpen it.
+ void setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags);
+
private:
/// A CallbackVH to arrange for ScalarEvolution to be notified whenever a
/// Value is deleted.
@@ -1264,7 +1264,7 @@ private:
ValueExprMapType ValueExprMap;
/// Mark predicate values currently being processed by isImpliedCond.
- SmallPtrSet<const Value *, 6> PendingLoopPredicates;
+ SmallPtrSet<const Value *, 6> PendingLoopPredicates;
/// Mark SCEVUnknown Phis currently being processed by getRangeRef.
SmallPtrSet<const PHINode *, 6> PendingPhiRanges;
@@ -1367,41 +1367,41 @@ private:
/// never have more than one computable exit.
SmallVector<ExitNotTakenInfo, 1> ExitNotTaken;
- /// Expression indicating the least constant maximum backedge-taken count of
- /// the loop that is known, or a SCEVCouldNotCompute. This expression is
- /// only valid if the redicates associated with all loop exits are true.
- const SCEV *ConstantMax;
-
- /// Indicating if \c ExitNotTaken has an element for every exiting block in
- /// the loop.
- bool IsComplete;
-
- /// Expression indicating the least maximum backedge-taken count of the loop
- /// that is known, or a SCEVCouldNotCompute. Lazily computed on first query.
- const SCEV *SymbolicMax = nullptr;
-
+ /// Expression indicating the least constant maximum backedge-taken count of
+ /// the loop that is known, or a SCEVCouldNotCompute. This expression is
+ /// only valid if the redicates associated with all loop exits are true.
+ const SCEV *ConstantMax;
+
+ /// Indicating if \c ExitNotTaken has an element for every exiting block in
+ /// the loop.
+ bool IsComplete;
+
+ /// Expression indicating the least maximum backedge-taken count of the loop
+ /// that is known, or a SCEVCouldNotCompute. Lazily computed on first query.
+ const SCEV *SymbolicMax = nullptr;
+
/// True iff the backedge is taken either exactly Max or zero times.
bool MaxOrZero = false;
- bool isComplete() const { return IsComplete; }
- const SCEV *getConstantMax() const { return ConstantMax; }
+ bool isComplete() const { return IsComplete; }
+ const SCEV *getConstantMax() const { return ConstantMax; }
public:
- BackedgeTakenInfo() : ConstantMax(nullptr), IsComplete(false) {}
+ BackedgeTakenInfo() : ConstantMax(nullptr), IsComplete(false) {}
BackedgeTakenInfo(BackedgeTakenInfo &&) = default;
BackedgeTakenInfo &operator=(BackedgeTakenInfo &&) = default;
using EdgeExitInfo = std::pair<BasicBlock *, ExitLimit>;
/// Initialize BackedgeTakenInfo from a list of exact exit counts.
- BackedgeTakenInfo(ArrayRef<EdgeExitInfo> ExitCounts, bool IsComplete,
- const SCEV *ConstantMax, bool MaxOrZero);
+ BackedgeTakenInfo(ArrayRef<EdgeExitInfo> ExitCounts, bool IsComplete,
+ const SCEV *ConstantMax, bool MaxOrZero);
/// Test whether this BackedgeTakenInfo contains any computed information,
/// or whether it's all SCEVCouldNotCompute values.
bool hasAnyInfo() const {
- return !ExitNotTaken.empty() ||
- !isa<SCEVCouldNotCompute>(getConstantMax());
+ return !ExitNotTaken.empty() ||
+ !isa<SCEVCouldNotCompute>(getConstantMax());
}
/// Test whether this BackedgeTakenInfo contains complete information.
@@ -1432,22 +1432,22 @@ private:
/// edge, or SCEVCouldNotCompute. The loop is guaranteed not to exit via
/// this block before this number of iterations, but may exit via another
/// block.
- const SCEV *getExact(const BasicBlock *ExitingBlock,
- ScalarEvolution *SE) const;
+ const SCEV *getExact(const BasicBlock *ExitingBlock,
+ ScalarEvolution *SE) const;
- /// Get the constant max backedge taken count for the loop.
- const SCEV *getConstantMax(ScalarEvolution *SE) const;
+ /// Get the constant max backedge taken count for the loop.
+ const SCEV *getConstantMax(ScalarEvolution *SE) const;
- /// Get the constant max backedge taken count for the particular loop exit.
- const SCEV *getConstantMax(const BasicBlock *ExitingBlock,
- ScalarEvolution *SE) const;
-
- /// Get the symbolic max backedge taken count for the loop.
- const SCEV *getSymbolicMax(const Loop *L, ScalarEvolution *SE);
+ /// Get the constant max backedge taken count for the particular loop exit.
+ const SCEV *getConstantMax(const BasicBlock *ExitingBlock,
+ ScalarEvolution *SE) const;
+ /// Get the symbolic max backedge taken count for the loop.
+ const SCEV *getSymbolicMax(const Loop *L, ScalarEvolution *SE);
+
/// Return true if the number of times this backedge is taken is either the
- /// value returned by getConstantMax or zero.
- bool isConstantMaxOrZero(ScalarEvolution *SE) const;
+ /// value returned by getConstantMax or zero.
+ bool isConstantMaxOrZero(ScalarEvolution *SE) const;
/// Return true if any backedge taken count expressions refer to the given
/// subexpression.
@@ -1552,13 +1552,13 @@ private:
ConstantRange getRangeForAffineAR(const SCEV *Start, const SCEV *Stop,
const SCEV *MaxBECount, unsigned BitWidth);
- /// Determines the range for the affine non-self-wrapping SCEVAddRecExpr {\p
- /// Start,+,\p Stop}<nw>.
- ConstantRange getRangeForAffineNoSelfWrappingAR(const SCEVAddRecExpr *AddRec,
- const SCEV *MaxBECount,
- unsigned BitWidth,
- RangeSignHint SignHint);
-
+ /// Determines the range for the affine non-self-wrapping SCEVAddRecExpr {\p
+ /// Start,+,\p Stop}<nw>.
+ ConstantRange getRangeForAffineNoSelfWrappingAR(const SCEVAddRecExpr *AddRec,
+ const SCEV *MaxBECount,
+ unsigned BitWidth,
+ RangeSignHint SignHint);
+
/// Try to compute a range for the affine SCEVAddRecExpr {\p Start,+,\p
/// Stop} by "factoring out" a ternary expression from the add recurrence.
/// Helper called by \c getRange.
@@ -1604,7 +1604,7 @@ private:
/// Return the BackedgeTakenInfo for the given loop, lazily computing new
/// values if the loop hasn't been analyzed yet. The returned result is
/// guaranteed not to be predicated.
- BackedgeTakenInfo &getBackedgeTakenInfo(const Loop *L);
+ BackedgeTakenInfo &getBackedgeTakenInfo(const Loop *L);
/// Similar to getBackedgeTakenInfo, but will add predicates as required
/// with the purpose of returning complete information.
@@ -1637,11 +1637,11 @@ private:
bool ExitIfTrue, bool ControlsExit,
bool AllowPredicates = false);
- /// Return a symbolic upper bound for the backedge taken count of the loop.
- /// This is more general than getConstantMaxBackedgeTakenCount as it returns
- /// an arbitrary expression as opposed to only constants.
- const SCEV *computeSymbolicMaxBackedgeTakenCount(const Loop *L);
-
+ /// Return a symbolic upper bound for the backedge taken count of the loop.
+ /// This is more general than getConstantMaxBackedgeTakenCount as it returns
+ /// an arbitrary expression as opposed to only constants.
+ const SCEV *computeSymbolicMaxBackedgeTakenCount(const Loop *L);
+
// Helper functions for computeExitLimitFromCond to avoid exponential time
// complexity.
@@ -1679,10 +1679,10 @@ private:
Value *ExitCond, bool ExitIfTrue,
bool ControlsExit,
bool AllowPredicates);
- Optional<ScalarEvolution::ExitLimit>
- computeExitLimitFromCondFromBinOp(ExitLimitCacheTy &Cache, const Loop *L,
- Value *ExitCond, bool ExitIfTrue,
- bool ControlsExit, bool AllowPredicates);
+ Optional<ScalarEvolution::ExitLimit>
+ computeExitLimitFromCondFromBinOp(ExitLimitCacheTy &Cache, const Loop *L,
+ Value *ExitCond, bool ExitIfTrue,
+ bool ControlsExit, bool AllowPredicates);
/// Compute the number of times the backedge of the specified loop will
/// execute if its exit condition were a conditional branch of the ICmpInst
@@ -1761,44 +1761,44 @@ private:
/// Return a predecessor of BB (which may not be an immediate predecessor)
/// which has exactly one successor from which BB is reachable, or null if
/// no such block is found.
- std::pair<const BasicBlock *, const BasicBlock *>
- getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB) const;
+ std::pair<const BasicBlock *, const BasicBlock *>
+ getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB) const;
/// Test whether the condition described by Pred, LHS, and RHS is true
- /// whenever the given FoundCondValue value evaluates to true in given
- /// Context. If Context is nullptr, then the found predicate is true
- /// everywhere. LHS and FoundLHS may have different type width.
+ /// whenever the given FoundCondValue value evaluates to true in given
+ /// Context. If Context is nullptr, then the found predicate is true
+ /// everywhere. LHS and FoundLHS may have different type width.
bool isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
- const Value *FoundCondValue, bool Inverse,
- const Instruction *Context = nullptr);
-
- /// Test whether the condition described by Pred, LHS, and RHS is true
- /// whenever the given FoundCondValue value evaluates to true in given
- /// Context. If Context is nullptr, then the found predicate is true
- /// everywhere. LHS and FoundLHS must have same type width.
- bool isImpliedCondBalancedTypes(ICmpInst::Predicate Pred, const SCEV *LHS,
- const SCEV *RHS,
- ICmpInst::Predicate FoundPred,
- const SCEV *FoundLHS, const SCEV *FoundRHS,
- const Instruction *Context);
+ const Value *FoundCondValue, bool Inverse,
+ const Instruction *Context = nullptr);
/// Test whether the condition described by Pred, LHS, and RHS is true
+ /// whenever the given FoundCondValue value evaluates to true in given
+ /// Context. If Context is nullptr, then the found predicate is true
+ /// everywhere. LHS and FoundLHS must have same type width.
+ bool isImpliedCondBalancedTypes(ICmpInst::Predicate Pred, const SCEV *LHS,
+ const SCEV *RHS,
+ ICmpInst::Predicate FoundPred,
+ const SCEV *FoundLHS, const SCEV *FoundRHS,
+ const Instruction *Context);
+
+ /// Test whether the condition described by Pred, LHS, and RHS is true
/// whenever the condition described by FoundPred, FoundLHS, FoundRHS is
- /// true in given Context. If Context is nullptr, then the found predicate is
- /// true everywhere.
+ /// true in given Context. If Context is nullptr, then the found predicate is
+ /// true everywhere.
bool isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
ICmpInst::Predicate FoundPred, const SCEV *FoundLHS,
- const SCEV *FoundRHS,
- const Instruction *Context = nullptr);
+ const SCEV *FoundRHS,
+ const Instruction *Context = nullptr);
/// Test whether the condition described by Pred, LHS, and RHS is true
/// whenever the condition described by Pred, FoundLHS, and FoundRHS is
- /// true in given Context. If Context is nullptr, then the found predicate is
- /// true everywhere.
+ /// true in given Context. If Context is nullptr, then the found predicate is
+ /// true everywhere.
bool isImpliedCondOperands(ICmpInst::Predicate Pred, const SCEV *LHS,
const SCEV *RHS, const SCEV *FoundLHS,
- const SCEV *FoundRHS,
- const Instruction *Context = nullptr);
+ const SCEV *FoundRHS,
+ const Instruction *Context = nullptr);
/// Test whether the condition described by Pred, LHS, and RHS is true
/// whenever the condition described by Pred, FoundLHS, and FoundRHS is
@@ -1831,7 +1831,7 @@ private:
/// Return true if the condition denoted by \p LHS \p Pred \p RHS is implied
/// by a call to @llvm.experimental.guard in \p BB.
- bool isImpliedViaGuard(const BasicBlock *BB, ICmpInst::Predicate Pred,
+ bool isImpliedViaGuard(const BasicBlock *BB, ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS);
/// Test whether the condition described by Pred, LHS, and RHS is true
@@ -1849,18 +1849,18 @@ private:
/// whenever the condition described by Pred, FoundLHS, and FoundRHS is
/// true.
///
- /// This routine tries to weaken the known condition basing on fact that
- /// FoundLHS is an AddRec.
- bool isImpliedCondOperandsViaAddRecStart(ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS,
- const SCEV *FoundLHS,
- const SCEV *FoundRHS,
- const Instruction *Context);
-
- /// Test whether the condition described by Pred, LHS, and RHS is true
- /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
- /// true.
- ///
+ /// This routine tries to weaken the known condition basing on fact that
+ /// FoundLHS is an AddRec.
+ bool isImpliedCondOperandsViaAddRecStart(ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS,
+ const SCEV *FoundLHS,
+ const SCEV *FoundRHS,
+ const Instruction *Context);
+
+ /// Test whether the condition described by Pred, LHS, and RHS is true
+ /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
+ /// true.
+ ///
/// This routine tries to figure out predicate for Phis which are SCEVUnknown
/// if it is true for every possible incoming value from their respective
/// basic blocks.
@@ -1919,18 +1919,18 @@ private:
/// Try to prove NSW or NUW on \p AR relying on ConstantRange manipulation.
SCEV::NoWrapFlags proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR);
- /// Try to prove NSW on \p AR by proving facts about conditions known on
- /// entry and backedge.
- SCEV::NoWrapFlags proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR);
-
- /// Try to prove NUW on \p AR by proving facts about conditions known on
- /// entry and backedge.
- SCEV::NoWrapFlags proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR);
-
- Optional<MonotonicPredicateType>
- getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS,
- ICmpInst::Predicate Pred);
-
+ /// Try to prove NSW on \p AR by proving facts about conditions known on
+ /// entry and backedge.
+ SCEV::NoWrapFlags proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR);
+
+ /// Try to prove NUW on \p AR by proving facts about conditions known on
+ /// entry and backedge.
+ SCEV::NoWrapFlags proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR);
+
+ Optional<MonotonicPredicateType>
+ getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS,
+ ICmpInst::Predicate Pred);
+
/// Return SCEV no-wrap flags that can be proven based on reasoning about
/// how poison produced from no-wrap flags on this value (e.g. a nuw add)
/// would trigger undefined behavior on overflow.
@@ -2028,9 +2028,9 @@ private:
/// Assign A and B to LHS and RHS, respectively.
bool matchURem(const SCEV *Expr, const SCEV *&LHS, const SCEV *&RHS);
- /// Try to apply information from loop guards for \p L to \p Expr.
- const SCEV *applyLoopGuards(const SCEV *Expr, const Loop *L);
-
+ /// Try to apply information from loop guards for \p L to \p Expr.
+ const SCEV *applyLoopGuards(const SCEV *Expr, const Loop *L);
+
/// Look for a SCEV expression with type `SCEVType` and operands `Ops` in
/// `UniqueSCEVs`.
///
@@ -2039,7 +2039,7 @@ private:
/// constructed to look up the SCEV and the third component is the insertion
/// point.
std::tuple<SCEV *, FoldingSetNodeID, void *>
- findExistingSCEVInCache(SCEVTypes SCEVType, ArrayRef<const SCEV *> Ops);
+ findExistingSCEVInCache(SCEVTypes SCEVType, ArrayRef<const SCEV *> Ops);
FoldingSet<SCEV> UniqueSCEVs;
FoldingSet<SCEVPredicate> UniquePreds;
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/ScalarEvolutionDivision.h b/contrib/libs/llvm12/include/llvm/Analysis/ScalarEvolutionDivision.h
index ddcd264525..370172fd74 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/ScalarEvolutionDivision.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/ScalarEvolutionDivision.h
@@ -40,7 +40,7 @@ public:
// Except in the trivial case described above, we do not know how to divide
// Expr by Denominator for the following functions with empty implementation.
- void visitPtrToIntExpr(const SCEVPtrToIntExpr *Numerator) {}
+ void visitPtrToIntExpr(const SCEVPtrToIntExpr *Numerator) {}
void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {}
void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {}
void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {}
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/ScalarEvolutionExpressions.h b/contrib/libs/llvm12/include/llvm/Analysis/ScalarEvolutionExpressions.h
index 1841c92d43..d0d9f176df 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/ScalarEvolutionExpressions.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/ScalarEvolutionExpressions.h
@@ -42,12 +42,12 @@ class ConstantRange;
class Loop;
class Type;
- enum SCEVTypes : unsigned short {
+ enum SCEVTypes : unsigned short {
// These should be ordered in terms of increasing complexity to make the
// folders simpler.
scConstant, scTruncate, scZeroExtend, scSignExtend, scAddExpr, scMulExpr,
scUDivExpr, scAddRecExpr, scUMaxExpr, scSMaxExpr, scUMinExpr, scSMinExpr,
- scPtrToInt, scUnknown, scCouldNotCompute
+ scPtrToInt, scUnknown, scCouldNotCompute
};
/// This class represents a constant integer value.
@@ -81,58 +81,58 @@ class Type;
/// This is the base class for unary cast operator classes.
class SCEVCastExpr : public SCEV {
protected:
- std::array<const SCEV *, 1> Operands;
+ std::array<const SCEV *, 1> Operands;
Type *Ty;
- SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op,
- Type *ty);
+ SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op,
+ Type *ty);
public:
- const SCEV *getOperand() const { return Operands[0]; }
- const SCEV *getOperand(unsigned i) const {
- assert(i == 0 && "Operand index out of range!");
- return Operands[0];
- }
- using op_iterator = std::array<const SCEV *, 1>::const_iterator;
- using op_range = iterator_range<op_iterator>;
-
- op_range operands() const {
- return make_range(Operands.begin(), Operands.end());
- }
- size_t getNumOperands() const { return 1; }
+ const SCEV *getOperand() const { return Operands[0]; }
+ const SCEV *getOperand(unsigned i) const {
+ assert(i == 0 && "Operand index out of range!");
+ return Operands[0];
+ }
+ using op_iterator = std::array<const SCEV *, 1>::const_iterator;
+ using op_range = iterator_range<op_iterator>;
+
+ op_range operands() const {
+ return make_range(Operands.begin(), Operands.end());
+ }
+ size_t getNumOperands() const { return 1; }
Type *getType() const { return Ty; }
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const SCEV *S) {
- return S->getSCEVType() == scPtrToInt || S->getSCEVType() == scTruncate ||
- S->getSCEVType() == scZeroExtend ||
- S->getSCEVType() == scSignExtend;
- }
- };
-
- /// This class represents a cast from a pointer to a pointer-sized integer
- /// value.
- class SCEVPtrToIntExpr : public SCEVCastExpr {
- friend class ScalarEvolution;
-
- SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, Type *ITy);
-
- public:
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const SCEV *S) {
- return S->getSCEVType() == scPtrToInt;
- }
- };
-
- /// This is the base class for unary integral cast operator classes.
- class SCEVIntegralCastExpr : public SCEVCastExpr {
- protected:
- SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
- const SCEV *op, Type *ty);
-
- public:
- /// Methods for support type inquiry through isa, cast, and dyn_cast:
- static bool classof(const SCEV *S) {
+ return S->getSCEVType() == scPtrToInt || S->getSCEVType() == scTruncate ||
+ S->getSCEVType() == scZeroExtend ||
+ S->getSCEVType() == scSignExtend;
+ }
+ };
+
+ /// This class represents a cast from a pointer to a pointer-sized integer
+ /// value.
+ class SCEVPtrToIntExpr : public SCEVCastExpr {
+ friend class ScalarEvolution;
+
+ SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, Type *ITy);
+
+ public:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S) {
+ return S->getSCEVType() == scPtrToInt;
+ }
+ };
+
+ /// This is the base class for unary integral cast operator classes.
+ class SCEVIntegralCastExpr : public SCEVCastExpr {
+ protected:
+ SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
+ const SCEV *op, Type *ty);
+
+ public:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S) {
return S->getSCEVType() == scTruncate ||
S->getSCEVType() == scZeroExtend ||
S->getSCEVType() == scSignExtend;
@@ -141,7 +141,7 @@ class Type;
/// This class represents a truncation of an integer value to a
/// smaller integer value.
- class SCEVTruncateExpr : public SCEVIntegralCastExpr {
+ class SCEVTruncateExpr : public SCEVIntegralCastExpr {
friend class ScalarEvolution;
SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
@@ -156,7 +156,7 @@ class Type;
/// This class represents a zero extension of a small integer value
/// to a larger integer value.
- class SCEVZeroExtendExpr : public SCEVIntegralCastExpr {
+ class SCEVZeroExtendExpr : public SCEVIntegralCastExpr {
friend class ScalarEvolution;
SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
@@ -171,7 +171,7 @@ class Type;
/// This class represents a sign extension of a small integer value
/// to a larger integer value.
- class SCEVSignExtendExpr : public SCEVIntegralCastExpr {
+ class SCEVSignExtendExpr : public SCEVIntegralCastExpr {
friend class ScalarEvolution;
SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
@@ -310,29 +310,29 @@ class Type;
class SCEVUDivExpr : public SCEV {
friend class ScalarEvolution;
- std::array<const SCEV *, 2> Operands;
+ std::array<const SCEV *, 2> Operands;
SCEVUDivExpr(const FoldingSetNodeIDRef ID, const SCEV *lhs, const SCEV *rhs)
- : SCEV(ID, scUDivExpr, computeExpressionSize({lhs, rhs})) {
- Operands[0] = lhs;
- Operands[1] = rhs;
- }
+ : SCEV(ID, scUDivExpr, computeExpressionSize({lhs, rhs})) {
+ Operands[0] = lhs;
+ Operands[1] = rhs;
+ }
public:
- const SCEV *getLHS() const { return Operands[0]; }
- const SCEV *getRHS() const { return Operands[1]; }
- size_t getNumOperands() const { return 2; }
- const SCEV *getOperand(unsigned i) const {
- assert((i == 0 || i == 1) && "Operand index out of range!");
- return i == 0 ? getLHS() : getRHS();
- }
-
- using op_iterator = std::array<const SCEV *, 2>::const_iterator;
- using op_range = iterator_range<op_iterator>;
- op_range operands() const {
- return make_range(Operands.begin(), Operands.end());
- }
-
+ const SCEV *getLHS() const { return Operands[0]; }
+ const SCEV *getRHS() const { return Operands[1]; }
+ size_t getNumOperands() const { return 2; }
+ const SCEV *getOperand(unsigned i) const {
+ assert((i == 0 || i == 1) && "Operand index out of range!");
+ return i == 0 ? getLHS() : getRHS();
+ }
+
+ using op_iterator = std::array<const SCEV *, 2>::const_iterator;
+ using op_range = iterator_range<op_iterator>;
+ op_range operands() const {
+ return make_range(Operands.begin(), Operands.end());
+ }
+
Type *getType() const {
// In most cases the types of LHS and RHS will be the same, but in some
// crazy cases one or the other may be a pointer. ScalarEvolution doesn't
@@ -448,7 +448,7 @@ class Type;
public:
static bool classof(const SCEV *S) {
- return isMinMaxType(S->getSCEVType());
+ return isMinMaxType(S->getSCEVType());
}
static enum SCEVTypes negate(enum SCEVTypes T) {
@@ -577,8 +577,8 @@ class Type;
switch (S->getSCEVType()) {
case scConstant:
return ((SC*)this)->visitConstant((const SCEVConstant*)S);
- case scPtrToInt:
- return ((SC *)this)->visitPtrToIntExpr((const SCEVPtrToIntExpr *)S);
+ case scPtrToInt:
+ return ((SC *)this)->visitPtrToIntExpr((const SCEVPtrToIntExpr *)S);
case scTruncate:
return ((SC*)this)->visitTruncateExpr((const SCEVTruncateExpr*)S);
case scZeroExtend:
@@ -606,7 +606,7 @@ class Type;
case scCouldNotCompute:
return ((SC*)this)->visitCouldNotCompute((const SCEVCouldNotCompute*)S);
}
- llvm_unreachable("Unknown SCEV kind!");
+ llvm_unreachable("Unknown SCEV kind!");
}
RetVal visitCouldNotCompute(const SCEVCouldNotCompute *S) {
@@ -643,13 +643,13 @@ class Type;
switch (S->getSCEVType()) {
case scConstant:
case scUnknown:
- continue;
- case scPtrToInt:
+ continue;
+ case scPtrToInt:
case scTruncate:
case scZeroExtend:
case scSignExtend:
push(cast<SCEVCastExpr>(S)->getOperand());
- continue;
+ continue;
case scAddExpr:
case scMulExpr:
case scSMaxExpr:
@@ -659,17 +659,17 @@ class Type;
case scAddRecExpr:
for (const auto *Op : cast<SCEVNAryExpr>(S)->operands())
push(Op);
- continue;
+ continue;
case scUDivExpr: {
const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
push(UDiv->getLHS());
push(UDiv->getRHS());
- continue;
+ continue;
}
case scCouldNotCompute:
llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
}
- llvm_unreachable("Unknown SCEV kind!");
+ llvm_unreachable("Unknown SCEV kind!");
}
}
};
@@ -737,13 +737,13 @@ class Type;
return Constant;
}
- const SCEV *visitPtrToIntExpr(const SCEVPtrToIntExpr *Expr) {
- const SCEV *Operand = ((SC *)this)->visit(Expr->getOperand());
- return Operand == Expr->getOperand()
- ? Expr
- : SE.getPtrToIntExpr(Operand, Expr->getType());
- }
-
+ const SCEV *visitPtrToIntExpr(const SCEVPtrToIntExpr *Expr) {
+ const SCEV *Operand = ((SC *)this)->visit(Expr->getOperand());
+ return Operand == Expr->getOperand()
+ ? Expr
+ : SE.getPtrToIntExpr(Operand, Expr->getType());
+ }
+
const SCEV *visitTruncateExpr(const SCEVTruncateExpr *Expr) {
const SCEV *Operand = ((SC*)this)->visit(Expr->getOperand());
return Operand == Expr->getOperand()
@@ -854,30 +854,30 @@ class Type;
};
using ValueToValueMap = DenseMap<const Value *, Value *>;
- using ValueToSCEVMapTy = DenseMap<const Value *, const SCEV *>;
+ using ValueToSCEVMapTy = DenseMap<const Value *, const SCEV *>;
/// The SCEVParameterRewriter takes a scalar evolution expression and updates
- /// the SCEVUnknown components following the Map (Value -> SCEV).
+ /// the SCEVUnknown components following the Map (Value -> SCEV).
class SCEVParameterRewriter : public SCEVRewriteVisitor<SCEVParameterRewriter> {
public:
static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE,
- ValueToSCEVMapTy &Map) {
- SCEVParameterRewriter Rewriter(SE, Map);
+ ValueToSCEVMapTy &Map) {
+ SCEVParameterRewriter Rewriter(SE, Map);
return Rewriter.visit(Scev);
}
- SCEVParameterRewriter(ScalarEvolution &SE, ValueToSCEVMapTy &M)
- : SCEVRewriteVisitor(SE), Map(M) {}
+ SCEVParameterRewriter(ScalarEvolution &SE, ValueToSCEVMapTy &M)
+ : SCEVRewriteVisitor(SE), Map(M) {}
const SCEV *visitUnknown(const SCEVUnknown *Expr) {
- auto I = Map.find(Expr->getValue());
- if (I == Map.end())
- return Expr;
- return I->second;
+ auto I = Map.find(Expr->getValue());
+ if (I == Map.end())
+ return Expr;
+ return I->second;
}
private:
- ValueToSCEVMapTy &Map;
+ ValueToSCEVMapTy &Map;
};
using LoopToScevMapT = DenseMap<const Loop *, const SCEV *>;
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/SparsePropagation.h b/contrib/libs/llvm12/include/llvm/Analysis/SparsePropagation.h
index 6f35675503..5996920354 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/SparsePropagation.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/SparsePropagation.h
@@ -492,7 +492,7 @@ void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::Solve() {
// Process the basic block work list.
while (!BBWorkList.empty()) {
- BasicBlock *BB = BBWorkList.pop_back_val();
+ BasicBlock *BB = BBWorkList.pop_back_val();
LLVM_DEBUG(dbgs() << "\nPopped off BBWL: " << *BB);
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/StackLifetime.h b/contrib/libs/llvm12/include/llvm/Analysis/StackLifetime.h
index c17d154b19..4257507b9c 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/StackLifetime.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/StackLifetime.h
@@ -20,7 +20,7 @@
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Support/raw_ostream.h"
@@ -129,8 +129,8 @@ private:
DenseMap<const BasicBlock *, SmallVector<std::pair<unsigned, Marker>, 4>>
BBMarkers;
- bool HasUnknownLifetimeStartOrEnd = false;
-
+ bool HasUnknownLifetimeStartOrEnd = false;
+
void dumpAllocas() const;
void dumpBlockLiveness() const;
void dumpLiveRanges() const;
@@ -176,9 +176,9 @@ public:
static inline raw_ostream &operator<<(raw_ostream &OS, const BitVector &V) {
OS << "{";
- ListSeparator LS;
- for (int Idx = V.find_first(); Idx >= 0; Idx = V.find_next(Idx))
- OS << LS << Idx;
+ ListSeparator LS;
+ for (int Idx = V.find_first(); Idx >= 0; Idx = V.find_next(Idx))
+ OS << LS << Idx;
OS << "}";
return OS;
}
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/StackSafetyAnalysis.h b/contrib/libs/llvm12/include/llvm/Analysis/StackSafetyAnalysis.h
index 783af1e5e3..a4121213eb 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/StackSafetyAnalysis.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/StackSafetyAnalysis.h
@@ -58,8 +58,8 @@ public:
/// StackSafety assumes that missing parameter information means possibility
/// of access to the parameter with any offset, so we can correctly link
/// code without StackSafety information, e.g. non-ThinLTO.
- std::vector<FunctionSummary::ParamAccess>
- getParamAccesses(ModuleSummaryIndex &Index) const;
+ std::vector<FunctionSummary::ParamAccess>
+ getParamAccesses(ModuleSummaryIndex &Index) const;
};
class StackSafetyGlobalInfo {
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/SyncDependenceAnalysis.h b/contrib/libs/llvm12/include/llvm/Analysis/SyncDependenceAnalysis.h
index f31348e744..ea0db64a28 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/SyncDependenceAnalysis.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/SyncDependenceAnalysis.h
@@ -28,7 +28,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/LoopInfo.h"
#include <memory>
-#include <unordered_map>
+#include <unordered_map>
namespace llvm {
@@ -38,27 +38,27 @@ class Loop;
class PostDominatorTree;
using ConstBlockSet = SmallPtrSet<const BasicBlock *, 4>;
-struct ControlDivergenceDesc {
- // Join points of divergent disjoint paths.
- ConstBlockSet JoinDivBlocks;
- // Divergent loop exits
- ConstBlockSet LoopDivBlocks;
-};
-
-struct ModifiedPO {
- std::vector<const BasicBlock *> LoopPO;
- std::unordered_map<const BasicBlock *, unsigned> POIndex;
- void appendBlock(const BasicBlock &BB) {
- POIndex[&BB] = LoopPO.size();
- LoopPO.push_back(&BB);
- }
- unsigned getIndexOf(const BasicBlock &BB) const {
- return POIndex.find(&BB)->second;
- }
- unsigned size() const { return LoopPO.size(); }
- const BasicBlock *getBlockAt(unsigned Idx) const { return LoopPO[Idx]; }
-};
-
+struct ControlDivergenceDesc {
+ // Join points of divergent disjoint paths.
+ ConstBlockSet JoinDivBlocks;
+ // Divergent loop exits
+ ConstBlockSet LoopDivBlocks;
+};
+
+struct ModifiedPO {
+ std::vector<const BasicBlock *> LoopPO;
+ std::unordered_map<const BasicBlock *, unsigned> POIndex;
+ void appendBlock(const BasicBlock &BB) {
+ POIndex[&BB] = LoopPO.size();
+ LoopPO.push_back(&BB);
+ }
+ unsigned getIndexOf(const BasicBlock &BB) const {
+ return POIndex.find(&BB)->second;
+ }
+ unsigned size() const { return LoopPO.size(); }
+ const BasicBlock *getBlockAt(unsigned Idx) const { return LoopPO[Idx]; }
+};
+
/// \brief Relates points of divergent control to join points in
/// reducible CFGs.
///
@@ -79,19 +79,19 @@ public:
/// header. Those exit blocks are added to the returned set.
/// If L is the parent loop of \p Term and an exit of L is in the returned
/// set then L is a divergent loop.
- const ControlDivergenceDesc &getJoinBlocks(const Instruction &Term);
+ const ControlDivergenceDesc &getJoinBlocks(const Instruction &Term);
private:
- static ControlDivergenceDesc EmptyDivergenceDesc;
-
- ModifiedPO LoopPO;
+ static ControlDivergenceDesc EmptyDivergenceDesc;
+ ModifiedPO LoopPO;
+
const DominatorTree &DT;
const PostDominatorTree &PDT;
const LoopInfo &LI;
- std::map<const Instruction *, std::unique_ptr<ControlDivergenceDesc>>
- CachedControlDivDescs;
+ std::map<const Instruction *, std::unique_ptr<ControlDivergenceDesc>>
+ CachedControlDivDescs;
};
} // namespace llvm
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/TargetLibraryInfo.def b/contrib/libs/llvm12/include/llvm/Analysis/TargetLibraryInfo.def
index defc95d006..3b8edfe4cc 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/TargetLibraryInfo.def
+++ b/contrib/libs/llvm12/include/llvm/Analysis/TargetLibraryInfo.def
@@ -262,12 +262,12 @@ TLI_DEFINE_STRING_INTERNAL("__atanhf_finite")
/// long double __atanhl_finite(long double x);
TLI_DEFINE_ENUM_INTERNAL(atanhl_finite)
TLI_DEFINE_STRING_INTERNAL("__atanhl_finite")
-/// void __atomic_load(size_t size, void *mptr, void *vptr, int smodel);
-TLI_DEFINE_ENUM_INTERNAL(atomic_load)
-TLI_DEFINE_STRING_INTERNAL("__atomic_load")
-/// void __atomic_store(size_t size, void *mptr, void *vptr, int smodel);
-TLI_DEFINE_ENUM_INTERNAL(atomic_store)
-TLI_DEFINE_STRING_INTERNAL("__atomic_store")
+/// void __atomic_load(size_t size, void *mptr, void *vptr, int smodel);
+TLI_DEFINE_ENUM_INTERNAL(atomic_load)
+TLI_DEFINE_STRING_INTERNAL("__atomic_load")
+/// void __atomic_store(size_t size, void *mptr, void *vptr, int smodel);
+TLI_DEFINE_ENUM_INTERNAL(atomic_store)
+TLI_DEFINE_STRING_INTERNAL("__atomic_store")
/// double __cosh_finite(double x);
TLI_DEFINE_ENUM_INTERNAL(cosh_finite)
TLI_DEFINE_STRING_INTERNAL("__cosh_finite")
@@ -366,9 +366,9 @@ TLI_DEFINE_STRING_INTERNAL("__memcpy_chk")
/// void *__memmove_chk(void *s1, const void *s2, size_t n, size_t s1size);
TLI_DEFINE_ENUM_INTERNAL(memmove_chk)
TLI_DEFINE_STRING_INTERNAL("__memmove_chk")
-/// void *__mempcpy_chk(void *s1, const void *s2, size_t n, size_t s1size);
-TLI_DEFINE_ENUM_INTERNAL(mempcpy_chk)
-TLI_DEFINE_STRING_INTERNAL("__mempcpy_chk")
+/// void *__mempcpy_chk(void *s1, const void *s2, size_t n, size_t s1size);
+TLI_DEFINE_ENUM_INTERNAL(mempcpy_chk)
+TLI_DEFINE_STRING_INTERNAL("__mempcpy_chk")
/// void *__memset_chk(void *s, char v, size_t n, size_t s1size);
TLI_DEFINE_ENUM_INTERNAL(memset_chk)
TLI_DEFINE_STRING_INTERNAL("__memset_chk")
@@ -1420,18 +1420,18 @@ TLI_DEFINE_STRING_INTERNAL("utimes")
/// void *valloc(size_t size);
TLI_DEFINE_ENUM_INTERNAL(valloc)
TLI_DEFINE_STRING_INTERNAL("valloc")
-/// void *vec_calloc(size_t count, size_t size);
-TLI_DEFINE_ENUM_INTERNAL(vec_calloc)
-TLI_DEFINE_STRING_INTERNAL("vec_calloc")
-/// void vec_free(void *ptr);
-TLI_DEFINE_ENUM_INTERNAL(vec_free)
-TLI_DEFINE_STRING_INTERNAL("vec_free")
-/// void *vec_malloc(size_t size);
-TLI_DEFINE_ENUM_INTERNAL(vec_malloc)
-TLI_DEFINE_STRING_INTERNAL("vec_malloc")
-/// void *vec_realloc(void *ptr, size_t size);
-TLI_DEFINE_ENUM_INTERNAL(vec_realloc)
-TLI_DEFINE_STRING_INTERNAL("vec_realloc")
+/// void *vec_calloc(size_t count, size_t size);
+TLI_DEFINE_ENUM_INTERNAL(vec_calloc)
+TLI_DEFINE_STRING_INTERNAL("vec_calloc")
+/// void vec_free(void *ptr);
+TLI_DEFINE_ENUM_INTERNAL(vec_free)
+TLI_DEFINE_STRING_INTERNAL("vec_free")
+/// void *vec_malloc(size_t size);
+TLI_DEFINE_ENUM_INTERNAL(vec_malloc)
+TLI_DEFINE_STRING_INTERNAL("vec_malloc")
+/// void *vec_realloc(void *ptr, size_t size);
+TLI_DEFINE_ENUM_INTERNAL(vec_realloc)
+TLI_DEFINE_STRING_INTERNAL("vec_realloc")
/// int vfprintf(FILE *stream, const char *format, va_list ap);
TLI_DEFINE_ENUM_INTERNAL(vfprintf)
TLI_DEFINE_STRING_INTERNAL("vfprintf")
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/TargetLibraryInfo.h b/contrib/libs/llvm12/include/llvm/Analysis/TargetLibraryInfo.h
index c313c1b850..eb758dc4e8 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/TargetLibraryInfo.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/TargetLibraryInfo.h
@@ -95,7 +95,7 @@ public:
enum VectorLibrary {
NoLibrary, // Don't use any vector library.
Accelerate, // Use Accelerate framework.
- LIBMVEC_X86,// GLIBC Vector Math library.
+ LIBMVEC_X86,// GLIBC Vector Math library.
MASSV, // IBM MASS vector library.
SVML // Intel short vector math library.
};
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/TargetTransformInfo.h b/contrib/libs/llvm12/include/llvm/Analysis/TargetTransformInfo.h
index c4317b08d6..29ccd951e1 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/TargetTransformInfo.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/TargetTransformInfo.h
@@ -28,13 +28,13 @@
#ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
#define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
-#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/DataTypes.h"
-#include "llvm/Support/InstructionCost.h"
+#include "llvm/Support/InstructionCost.h"
#include <functional>
namespace llvm {
@@ -51,7 +51,7 @@ class CallBase;
class ExtractElementInst;
class Function;
class GlobalValue;
-class InstCombiner;
+class InstCombiner;
class IntrinsicInst;
class LoadInst;
class LoopAccessInfo;
@@ -66,7 +66,7 @@ class TargetLibraryInfo;
class Type;
class User;
class Value;
-struct KnownBits;
+struct KnownBits;
template <typename T> class Optional;
/// Information about a load/store intrinsic defined by the target.
@@ -101,7 +101,7 @@ struct HardwareLoopInfo {
Loop *L = nullptr;
BasicBlock *ExitBlock = nullptr;
BranchInst *ExitBranch = nullptr;
- const SCEV *TripCount = nullptr;
+ const SCEV *TripCount = nullptr;
IntegerType *CountType = nullptr;
Value *LoopDecrement = nullptr; // Decrement the loop counter by this
// value in every iteration.
@@ -125,7 +125,7 @@ class IntrinsicCostAttributes {
SmallVector<Type *, 4> ParamTys;
SmallVector<const Value *, 4> Arguments;
FastMathFlags FMF;
- ElementCount VF = ElementCount::getFixed(1);
+ ElementCount VF = ElementCount::getFixed(1);
// If ScalarizationCost is UINT_MAX, the cost of scalarizing the
// arguments and the return value will be computed based on types.
unsigned ScalarizationCost = std::numeric_limits<unsigned>::max();
@@ -136,10 +136,10 @@ public:
IntrinsicCostAttributes(Intrinsic::ID Id, const CallBase &CI);
IntrinsicCostAttributes(Intrinsic::ID Id, const CallBase &CI,
- ElementCount Factor);
+ ElementCount Factor);
IntrinsicCostAttributes(Intrinsic::ID Id, const CallBase &CI,
- ElementCount Factor, unsigned ScalarCost);
+ ElementCount Factor, unsigned ScalarCost);
IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
ArrayRef<Type *> Tys, FastMathFlags Flags);
@@ -162,7 +162,7 @@ public:
Intrinsic::ID getID() const { return IID; }
const IntrinsicInst *getInst() const { return II; }
Type *getReturnType() const { return RetTy; }
- ElementCount getVectorFactor() const { return VF; }
+ ElementCount getVectorFactor() const { return VF; }
FastMathFlags getFlags() const { return FMF; }
unsigned getScalarizationCost() const { return ScalarizationCost; }
const SmallVectorImpl<const Value *> &getArgs() const { return Arguments; }
@@ -239,24 +239,24 @@ public:
///
/// Note, this method does not cache the cost calculation and it
/// can be expensive in some cases.
- InstructionCost getInstructionCost(const Instruction *I,
- enum TargetCostKind kind) const {
- InstructionCost Cost;
+ InstructionCost getInstructionCost(const Instruction *I,
+ enum TargetCostKind kind) const {
+ InstructionCost Cost;
switch (kind) {
case TCK_RecipThroughput:
- Cost = getInstructionThroughput(I);
- break;
+ Cost = getInstructionThroughput(I);
+ break;
case TCK_Latency:
- Cost = getInstructionLatency(I);
- break;
+ Cost = getInstructionLatency(I);
+ break;
case TCK_CodeSize:
case TCK_SizeAndLatency:
- Cost = getUserCost(I, kind);
- break;
+ Cost = getUserCost(I, kind);
+ break;
}
- if (Cost == -1)
- Cost.setInvalid();
- return Cost;
+ if (Cost == -1)
+ Cost.setInvalid();
+ return Cost;
}
/// Underlying constants for 'cost' values in this interface.
@@ -296,9 +296,9 @@ public:
/// individual classes of instructions would be better.
unsigned getInliningThresholdMultiplier() const;
- /// \returns A value to be added to the inlining threshold.
- unsigned adjustInliningThreshold(const CallBase *CB) const;
-
+ /// \returns A value to be added to the inlining threshold.
+ unsigned adjustInliningThreshold(const CallBase *CB) const;
+
/// \returns Vector bonus in percent.
///
/// Vector bonuses: We want to more aggressively inline vector-dense kernels
@@ -342,7 +342,7 @@ public:
/// This is a helper function which calls the two-argument getUserCost
/// with \p Operands which are the current operands U has.
int getUserCost(const User *U, TargetCostKind CostKind) const {
- SmallVector<const Value *, 4> Operands(U->operand_values());
+ SmallVector<const Value *, 4> Operands(U->operand_values());
return getUserCost(U, Operands, CostKind);
}
@@ -397,8 +397,8 @@ public:
bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const;
- unsigned getAssumedAddrSpace(const Value *V) const;
-
+ unsigned getAssumedAddrSpace(const Value *V) const;
+
/// Rewrite intrinsic call \p II such that \p OldV will be replaced with \p
/// NewV, which has a different address space. This should happen for every
/// operand index that collectFlatAddressOperands returned for the intrinsic.
@@ -562,29 +562,29 @@ public:
/// target-independent defaults with information from \p L and \p SE.
void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
PeelingPreferences &PP) const;
-
- /// Targets can implement their own combinations for target-specific
- /// intrinsics. This function will be called from the InstCombine pass every
- /// time a target-specific intrinsic is encountered.
- ///
- /// \returns None to not do anything target specific or a value that will be
- /// returned from the InstCombiner. It is possible to return null and stop
- /// further processing of the intrinsic by returning nullptr.
- Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
- IntrinsicInst &II) const;
- /// Can be used to implement target-specific instruction combining.
- /// \see instCombineIntrinsic
- Optional<Value *>
- simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
- APInt DemandedMask, KnownBits &Known,
- bool &KnownBitsComputed) const;
- /// Can be used to implement target-specific instruction combining.
- /// \see instCombineIntrinsic
- Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
- InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
- APInt &UndefElts2, APInt &UndefElts3,
- std::function<void(Instruction *, unsigned, APInt, APInt &)>
- SimplifyAndSetOp) const;
+
+ /// Targets can implement their own combinations for target-specific
+ /// intrinsics. This function will be called from the InstCombine pass every
+ /// time a target-specific intrinsic is encountered.
+ ///
+ /// \returns None to not do anything target specific or a value that will be
+ /// returned from the InstCombiner. It is possible to return null and stop
+ /// further processing of the intrinsic by returning nullptr.
+ Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
+ IntrinsicInst &II) const;
+ /// Can be used to implement target-specific instruction combining.
+ /// \see instCombineIntrinsic
+ Optional<Value *>
+ simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
+ APInt DemandedMask, KnownBits &Known,
+ bool &KnownBitsComputed) const;
+ /// Can be used to implement target-specific instruction combining.
+ /// \see instCombineIntrinsic
+ Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
+ InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
+ APInt &UndefElts2, APInt &UndefElts3,
+ std::function<void(Instruction *, unsigned, APInt, APInt &)>
+ SimplifyAndSetOp) const;
/// @}
/// \name Scalar Target Information
@@ -626,11 +626,11 @@ public:
bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
TargetTransformInfo::LSRCost &C2) const;
- /// Return true if LSR major cost is number of registers. Targets which
- /// implement their own isLSRCostLess and unset number of registers as major
- /// cost should return false, otherwise return true.
- bool isNumRegsMajorCostOfLSR() const;
-
+ /// Return true if LSR major cost is number of registers. Targets which
+ /// implement their own isLSRCostLess and unset number of registers as major
+ /// cost should return false, otherwise return true.
+ bool isNumRegsMajorCostOfLSR() const;
+
/// \returns true if LSR should not optimize a chain that includes \p I.
bool isProfitableLSRChainElement(Instruction *I) const;
@@ -720,9 +720,9 @@ public:
/// Return true if this type is legal.
bool isTypeLegal(Type *Ty) const;
- /// Returns the estimated number of registers required to represent \p Ty.
- unsigned getRegUsageForType(Type *Ty) const;
-
+ /// Returns the estimated number of registers required to represent \p Ty.
+ unsigned getRegUsageForType(Type *Ty) const;
+
/// Return true if switches should be turned into lookup tables for the
/// target.
bool shouldBuildLookupTables() const;
@@ -831,9 +831,9 @@ public:
/// Return the expected cost of materialization for the given integer
/// immediate of the specified type for a given instruction. The cost can be
/// zero if the immediate can be folded into the specified instruction.
- int getIntImmCostInst(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty,
- TargetCostKind CostKind,
- Instruction *Inst = nullptr) const;
+ int getIntImmCostInst(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty,
+ TargetCostKind CostKind,
+ Instruction *Inst = nullptr) const;
int getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty, TargetCostKind CostKind) const;
@@ -897,10 +897,10 @@ public:
static ReductionKind matchVectorSplittingReduction(
const ExtractElementInst *ReduxRoot, unsigned &Opcode, VectorType *&Ty);
- static ReductionKind matchVectorReduction(const ExtractElementInst *ReduxRoot,
- unsigned &Opcode, VectorType *&Ty,
- bool &IsPairwise);
-
+ static ReductionKind matchVectorReduction(const ExtractElementInst *ReduxRoot,
+ unsigned &Opcode, VectorType *&Ty,
+ bool &IsPairwise);
+
/// Additional information about an operand's possible values.
enum OperandValueKind {
OK_AnyValue, // Operand can have any value.
@@ -937,10 +937,10 @@ public:
/// \return The width of the smallest vector register type.
unsigned getMinVectorRegisterBitWidth() const;
- /// \return The maximum value of vscale if the target specifies an
- /// architectural maximum vector length, and None otherwise.
- Optional<unsigned> getMaxVScale() const;
-
+ /// \return The maximum value of vscale if the target specifies an
+ /// architectural maximum vector length, and None otherwise.
+ Optional<unsigned> getMaxVScale() const;
+
/// \return True if the vectorization factor should be chosen to
/// make the vector of the smallest element type match the size of a
/// vector register. For wider element types, this could result in
@@ -954,11 +954,11 @@ public:
/// applies when shouldMaximizeVectorBandwidth returns true.
unsigned getMinimumVF(unsigned ElemWidth) const;
- /// \return The maximum vectorization factor for types of given element
- /// bit width and opcode, or 0 if there is no maximum VF.
- /// Currently only used by the SLP vectorizer.
- unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const;
-
+ /// \return The maximum vectorization factor for types of given element
+ /// bit width and opcode, or 0 if there is no maximum VF.
+ /// Currently only used by the SLP vectorizer.
+ unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const;
+
/// \return True if it should be considered for address type promotion.
/// \p AllowPromotionWithoutCommonHeader Set true if promoting \p I is
/// profitable without finding other extensions fed by the same input.
@@ -1061,47 +1061,47 @@ public:
int getShuffleCost(ShuffleKind Kind, VectorType *Tp, int Index = 0,
VectorType *SubTp = nullptr) const;
- /// Represents a hint about the context in which a cast is used.
- ///
- /// For zext/sext, the context of the cast is the operand, which must be a
- /// load of some kind. For trunc, the context is of the cast is the single
- /// user of the instruction, which must be a store of some kind.
- ///
- /// This enum allows the vectorizer to give getCastInstrCost an idea of the
- /// type of cast it's dealing with, as not every cast is equal. For instance,
- /// the zext of a load may be free, but the zext of an interleaving load can
- //// be (very) expensive!
- ///
- /// See \c getCastContextHint to compute a CastContextHint from a cast
- /// Instruction*. Callers can use it if they don't need to override the
- /// context and just want it to be calculated from the instruction.
- ///
- /// FIXME: This handles the types of load/store that the vectorizer can
- /// produce, which are the cases where the context instruction is most
- /// likely to be incorrect. There are other situations where that can happen
- /// too, which might be handled here but in the long run a more general
- /// solution of costing multiple instructions at the same times may be better.
- enum class CastContextHint : uint8_t {
- None, ///< The cast is not used with a load/store of any kind.
- Normal, ///< The cast is used with a normal load/store.
- Masked, ///< The cast is used with a masked load/store.
- GatherScatter, ///< The cast is used with a gather/scatter.
- Interleave, ///< The cast is used with an interleaved load/store.
- Reversed, ///< The cast is used with a reversed load/store.
- };
-
- /// Calculates a CastContextHint from \p I.
- /// This should be used by callers of getCastInstrCost if they wish to
- /// determine the context from some instruction.
- /// \returns the CastContextHint for ZExt/SExt/Trunc, None if \p I is nullptr,
- /// or if it's another type of cast.
- static CastContextHint getCastContextHint(const Instruction *I);
-
+ /// Represents a hint about the context in which a cast is used.
+ ///
+ /// For zext/sext, the context of the cast is the operand, which must be a
+ /// load of some kind. For trunc, the context is of the cast is the single
+ /// user of the instruction, which must be a store of some kind.
+ ///
+ /// This enum allows the vectorizer to give getCastInstrCost an idea of the
+ /// type of cast it's dealing with, as not every cast is equal. For instance,
+ /// the zext of a load may be free, but the zext of an interleaving load can
+ //// be (very) expensive!
+ ///
+ /// See \c getCastContextHint to compute a CastContextHint from a cast
+ /// Instruction*. Callers can use it if they don't need to override the
+ /// context and just want it to be calculated from the instruction.
+ ///
+ /// FIXME: This handles the types of load/store that the vectorizer can
+ /// produce, which are the cases where the context instruction is most
+ /// likely to be incorrect. There are other situations where that can happen
+ /// too, which might be handled here but in the long run a more general
+ /// solution of costing multiple instructions at the same times may be better.
+ enum class CastContextHint : uint8_t {
+ None, ///< The cast is not used with a load/store of any kind.
+ Normal, ///< The cast is used with a normal load/store.
+ Masked, ///< The cast is used with a masked load/store.
+ GatherScatter, ///< The cast is used with a gather/scatter.
+ Interleave, ///< The cast is used with an interleaved load/store.
+ Reversed, ///< The cast is used with a reversed load/store.
+ };
+
+ /// Calculates a CastContextHint from \p I.
+ /// This should be used by callers of getCastInstrCost if they wish to
+ /// determine the context from some instruction.
+ /// \returns the CastContextHint for ZExt/SExt/Trunc, None if \p I is nullptr,
+ /// or if it's another type of cast.
+ static CastContextHint getCastContextHint(const Instruction *I);
+
/// \return The expected cost of cast instructions, such as bitcast, trunc,
/// zext, etc. If there is an existing instruction that holds Opcode, it
/// may be passed in the 'I' parameter.
int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
- TTI::CastContextHint CCH,
+ TTI::CastContextHint CCH,
TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
const Instruction *I = nullptr) const;
@@ -1117,14 +1117,14 @@ public:
/// \returns The expected cost of compare and select instructions. If there
/// is an existing instruction that holds Opcode, it may be passed in the
- /// 'I' parameter. The \p VecPred parameter can be used to indicate the select
- /// is using a compare with the specified predicate as condition. When vector
- /// types are passed, \p VecPred must be used for all lanes.
- int getCmpSelInstrCost(
- unsigned Opcode, Type *ValTy, Type *CondTy = nullptr,
- CmpInst::Predicate VecPred = CmpInst::BAD_ICMP_PREDICATE,
- TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
- const Instruction *I = nullptr) const;
+ /// 'I' parameter. The \p VecPred parameter can be used to indicate the select
+ /// is using a compare with the specified predicate as condition. When vector
+ /// types are passed, \p VecPred must be used for all lanes.
+ int getCmpSelInstrCost(
+ unsigned Opcode, Type *ValTy, Type *CondTy = nullptr,
+ CmpInst::Predicate VecPred = CmpInst::BAD_ICMP_PREDICATE,
+ TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
+ const Instruction *I = nullptr) const;
/// \return The expected cost of vector Insert and Extract.
/// Use -1 to indicate that there is no information on the index value.
@@ -1192,16 +1192,16 @@ public:
VectorType *Ty, VectorType *CondTy, bool IsPairwiseForm, bool IsUnsigned,
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
- /// Calculate the cost of an extended reduction pattern, similar to
- /// getArithmeticReductionCost of an Add reduction with an extension and
- /// optional multiply. This is the cost of as:
- /// ResTy vecreduce.add(ext(Ty A)), or if IsMLA flag is set then:
- /// ResTy vecreduce.add(mul(ext(Ty A), ext(Ty B)). The reduction happens
- /// on a VectorType with ResTy elements and Ty lanes.
- InstructionCost getExtendedAddReductionCost(
- bool IsMLA, bool IsUnsigned, Type *ResTy, VectorType *Ty,
- TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
-
+ /// Calculate the cost of an extended reduction pattern, similar to
+ /// getArithmeticReductionCost of an Add reduction with an extension and
+ /// optional multiply. This is the cost of as:
+ /// ResTy vecreduce.add(ext(Ty A)), or if IsMLA flag is set then:
+ /// ResTy vecreduce.add(mul(ext(Ty A), ext(Ty B)). The reduction happens
+ /// on a VectorType with ResTy elements and Ty lanes.
+ InstructionCost getExtendedAddReductionCost(
+ bool IsMLA, bool IsUnsigned, Type *ResTy, VectorType *Ty,
+ TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
+
/// \returns The cost of Intrinsic instructions. Analyses the real arguments.
/// Three cases are handled: 1. scalar instruction 2. vector instruction
/// 3. scalar instruction which is to be vectorized.
@@ -1337,24 +1337,24 @@ public:
bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
ReductionFlags Flags) const;
- /// \returns True if the target prefers reductions in loop.
- bool preferInLoopReduction(unsigned Opcode, Type *Ty,
- ReductionFlags Flags) const;
-
- /// \returns True if the target prefers reductions select kept in the loop
- /// when tail folding. i.e.
- /// loop:
- /// p = phi (0, s)
- /// a = add (p, x)
- /// s = select (mask, a, p)
- /// vecreduce.add(s)
- ///
- /// As opposed to the normal scheme of p = phi (0, a) which allows the select
- /// to be pulled out of the loop. If the select(.., add, ..) can be predicated
- /// by the target, this can lead to cleaner code generation.
- bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
- ReductionFlags Flags) const;
-
+ /// \returns True if the target prefers reductions in loop.
+ bool preferInLoopReduction(unsigned Opcode, Type *Ty,
+ ReductionFlags Flags) const;
+
+ /// \returns True if the target prefers reductions select kept in the loop
+ /// when tail folding. i.e.
+ /// loop:
+ /// p = phi (0, s)
+ /// a = add (p, x)
+ /// s = select (mask, a, p)
+ /// vecreduce.add(s)
+ ///
+ /// As opposed to the normal scheme of p = phi (0, a) which allows the select
+ /// to be pulled out of the loop. If the select(.., add, ..) can be predicated
+ /// by the target, this can lead to cleaner code generation.
+ bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
+ ReductionFlags Flags) const;
+
/// \returns True if the target wants to expand the given reduction intrinsic
/// into a shuffle sequence.
bool shouldExpandReduction(const IntrinsicInst *II) const;
@@ -1363,9 +1363,9 @@ public:
/// to a stack reload.
unsigned getGISelRematGlobalCost() const;
- /// \returns True if the target supports scalable vectors.
- bool supportsScalableVectors() const;
-
+ /// \returns True if the target supports scalable vectors.
+ bool supportsScalableVectors() const;
+
/// \name Vector Predication Information
/// @{
/// Whether the target supports the %evl parameter of VP intrinsic efficiently
@@ -1405,7 +1405,7 @@ public:
ArrayRef<const Value *> Operands,
TTI::TargetCostKind CostKind) = 0;
virtual unsigned getInliningThresholdMultiplier() = 0;
- virtual unsigned adjustInliningThreshold(const CallBase *CB) = 0;
+ virtual unsigned adjustInliningThreshold(const CallBase *CB) = 0;
virtual int getInlinerVectorBonusPercent() = 0;
virtual int getMemcpyCost(const Instruction *I) = 0;
virtual unsigned
@@ -1422,7 +1422,7 @@ public:
virtual bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
Intrinsic::ID IID) const = 0;
virtual bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const = 0;
- virtual unsigned getAssumedAddrSpace(const Value *V) const = 0;
+ virtual unsigned getAssumedAddrSpace(const Value *V) const = 0;
virtual Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
Value *OldV,
Value *NewV) const = 0;
@@ -1440,17 +1440,17 @@ public:
AssumptionCache &AC, TargetLibraryInfo *TLI,
DominatorTree *DT, const LoopAccessInfo *LAI) = 0;
virtual bool emitGetActiveLaneMask() = 0;
- virtual Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
- IntrinsicInst &II) = 0;
- virtual Optional<Value *>
- simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
- APInt DemandedMask, KnownBits &Known,
- bool &KnownBitsComputed) = 0;
- virtual Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
- InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
- APInt &UndefElts2, APInt &UndefElts3,
- std::function<void(Instruction *, unsigned, APInt, APInt &)>
- SimplifyAndSetOp) = 0;
+ virtual Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
+ IntrinsicInst &II) = 0;
+ virtual Optional<Value *>
+ simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
+ APInt DemandedMask, KnownBits &Known,
+ bool &KnownBitsComputed) = 0;
+ virtual Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
+ InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
+ APInt &UndefElts2, APInt &UndefElts3,
+ std::function<void(Instruction *, unsigned, APInt, APInt &)>
+ SimplifyAndSetOp) = 0;
virtual bool isLegalAddImmediate(int64_t Imm) = 0;
virtual bool isLegalICmpImmediate(int64_t Imm) = 0;
virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
@@ -1459,7 +1459,7 @@ public:
Instruction *I) = 0;
virtual bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
TargetTransformInfo::LSRCost &C2) = 0;
- virtual bool isNumRegsMajorCostOfLSR() = 0;
+ virtual bool isNumRegsMajorCostOfLSR() = 0;
virtual bool isProfitableLSRChainElement(Instruction *I) = 0;
virtual bool canMacroFuseCmp() = 0;
virtual bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE,
@@ -1486,7 +1486,7 @@ public:
virtual bool isProfitableToHoist(Instruction *I) = 0;
virtual bool useAA() = 0;
virtual bool isTypeLegal(Type *Ty) = 0;
- virtual unsigned getRegUsageForType(Type *Ty) = 0;
+ virtual unsigned getRegUsageForType(Type *Ty) = 0;
virtual bool shouldBuildLookupTables() = 0;
virtual bool shouldBuildLookupTablesForConstant(Constant *C) = 0;
virtual bool useColdCCForColdCall(Function &F) = 0;
@@ -1517,8 +1517,8 @@ public:
virtual int getIntImmCost(const APInt &Imm, Type *Ty,
TargetCostKind CostKind) = 0;
virtual int getIntImmCostInst(unsigned Opc, unsigned Idx, const APInt &Imm,
- Type *Ty, TargetCostKind CostKind,
- Instruction *Inst = nullptr) = 0;
+ Type *Ty, TargetCostKind CostKind,
+ Instruction *Inst = nullptr) = 0;
virtual int getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
const APInt &Imm, Type *Ty,
TargetCostKind CostKind) = 0;
@@ -1528,10 +1528,10 @@ public:
virtual const char *getRegisterClassName(unsigned ClassID) const = 0;
virtual unsigned getRegisterBitWidth(bool Vector) const = 0;
virtual unsigned getMinVectorRegisterBitWidth() = 0;
- virtual Optional<unsigned> getMaxVScale() const = 0;
+ virtual Optional<unsigned> getMaxVScale() const = 0;
virtual bool shouldMaximizeVectorBandwidth(bool OptSize) const = 0;
virtual unsigned getMinimumVF(unsigned ElemWidth) const = 0;
- virtual unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const = 0;
+ virtual unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const = 0;
virtual bool shouldConsiderAddressTypePromotion(
const Instruction &I, bool &AllowPromotionWithoutCommonHeader) = 0;
virtual unsigned getCacheLineSize() const = 0;
@@ -1573,7 +1573,7 @@ public:
virtual int getShuffleCost(ShuffleKind Kind, VectorType *Tp, int Index,
VectorType *SubTp) = 0;
virtual int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
- CastContextHint CCH,
+ CastContextHint CCH,
TTI::TargetCostKind CostKind,
const Instruction *I) = 0;
virtual int getExtractWithExtendCost(unsigned Opcode, Type *Dst,
@@ -1581,7 +1581,7 @@ public:
virtual int getCFInstrCost(unsigned Opcode,
TTI::TargetCostKind CostKind) = 0;
virtual int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
- CmpInst::Predicate VecPred,
+ CmpInst::Predicate VecPred,
TTI::TargetCostKind CostKind,
const Instruction *I) = 0;
virtual int getVectorInstrCost(unsigned Opcode, Type *Val,
@@ -1609,9 +1609,9 @@ public:
virtual int getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
bool IsPairwiseForm, bool IsUnsigned,
TTI::TargetCostKind CostKind) = 0;
- virtual InstructionCost getExtendedAddReductionCost(
- bool IsMLA, bool IsUnsigned, Type *ResTy, VectorType *Ty,
- TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) = 0;
+ virtual InstructionCost getExtendedAddReductionCost(
+ bool IsMLA, bool IsUnsigned, Type *ResTy, VectorType *Ty,
+ TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) = 0;
virtual int getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
TTI::TargetCostKind CostKind) = 0;
virtual int getCallInstrCost(Function *F, Type *RetTy,
@@ -1659,13 +1659,13 @@ public:
VectorType *VecTy) const = 0;
virtual bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
ReductionFlags) const = 0;
- virtual bool preferInLoopReduction(unsigned Opcode, Type *Ty,
- ReductionFlags) const = 0;
- virtual bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
- ReductionFlags) const = 0;
+ virtual bool preferInLoopReduction(unsigned Opcode, Type *Ty,
+ ReductionFlags) const = 0;
+ virtual bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
+ ReductionFlags) const = 0;
virtual bool shouldExpandReduction(const IntrinsicInst *II) const = 0;
virtual unsigned getGISelRematGlobalCost() const = 0;
- virtual bool supportsScalableVectors() const = 0;
+ virtual bool supportsScalableVectors() const = 0;
virtual bool hasActiveVectorLength() const = 0;
virtual int getInstructionLatency(const Instruction *I) = 0;
};
@@ -1690,9 +1690,9 @@ public:
unsigned getInliningThresholdMultiplier() override {
return Impl.getInliningThresholdMultiplier();
}
- unsigned adjustInliningThreshold(const CallBase *CB) override {
- return Impl.adjustInliningThreshold(CB);
- }
+ unsigned adjustInliningThreshold(const CallBase *CB) override {
+ return Impl.adjustInliningThreshold(CB);
+ }
int getInlinerVectorBonusPercent() override {
return Impl.getInlinerVectorBonusPercent();
}
@@ -1726,10 +1726,10 @@ public:
return Impl.isNoopAddrSpaceCast(FromAS, ToAS);
}
- unsigned getAssumedAddrSpace(const Value *V) const override {
- return Impl.getAssumedAddrSpace(V);
- }
-
+ unsigned getAssumedAddrSpace(const Value *V) const override {
+ return Impl.getAssumedAddrSpace(V);
+ }
+
Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
Value *NewV) const override {
return Impl.rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
@@ -1760,26 +1760,26 @@ public:
bool emitGetActiveLaneMask() override {
return Impl.emitGetActiveLaneMask();
}
- Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
- IntrinsicInst &II) override {
- return Impl.instCombineIntrinsic(IC, II);
- }
- Optional<Value *>
- simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
- APInt DemandedMask, KnownBits &Known,
- bool &KnownBitsComputed) override {
- return Impl.simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
- KnownBitsComputed);
- }
- Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
- InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
- APInt &UndefElts2, APInt &UndefElts3,
- std::function<void(Instruction *, unsigned, APInt, APInt &)>
- SimplifyAndSetOp) override {
- return Impl.simplifyDemandedVectorEltsIntrinsic(
- IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
- SimplifyAndSetOp);
- }
+ Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
+ IntrinsicInst &II) override {
+ return Impl.instCombineIntrinsic(IC, II);
+ }
+ Optional<Value *>
+ simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
+ APInt DemandedMask, KnownBits &Known,
+ bool &KnownBitsComputed) override {
+ return Impl.simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
+ KnownBitsComputed);
+ }
+ Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
+ InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
+ APInt &UndefElts2, APInt &UndefElts3,
+ std::function<void(Instruction *, unsigned, APInt, APInt &)>
+ SimplifyAndSetOp) override {
+ return Impl.simplifyDemandedVectorEltsIntrinsic(
+ IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
+ SimplifyAndSetOp);
+ }
bool isLegalAddImmediate(int64_t Imm) override {
return Impl.isLegalAddImmediate(Imm);
}
@@ -1796,9 +1796,9 @@ public:
TargetTransformInfo::LSRCost &C2) override {
return Impl.isLSRCostLess(C1, C2);
}
- bool isNumRegsMajorCostOfLSR() override {
- return Impl.isNumRegsMajorCostOfLSR();
- }
+ bool isNumRegsMajorCostOfLSR() override {
+ return Impl.isNumRegsMajorCostOfLSR();
+ }
bool isProfitableLSRChainElement(Instruction *I) override {
return Impl.isProfitableLSRChainElement(I);
}
@@ -1860,9 +1860,9 @@ public:
}
bool useAA() override { return Impl.useAA(); }
bool isTypeLegal(Type *Ty) override { return Impl.isTypeLegal(Ty); }
- unsigned getRegUsageForType(Type *Ty) override {
- return Impl.getRegUsageForType(Ty);
- }
+ unsigned getRegUsageForType(Type *Ty) override {
+ return Impl.getRegUsageForType(Ty);
+ }
bool shouldBuildLookupTables() override {
return Impl.shouldBuildLookupTables();
}
@@ -1927,10 +1927,10 @@ public:
TargetCostKind CostKind) override {
return Impl.getIntImmCost(Imm, Ty, CostKind);
}
- int getIntImmCostInst(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty,
- TargetCostKind CostKind,
- Instruction *Inst = nullptr) override {
- return Impl.getIntImmCostInst(Opc, Idx, Imm, Ty, CostKind, Inst);
+ int getIntImmCostInst(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty,
+ TargetCostKind CostKind,
+ Instruction *Inst = nullptr) override {
+ return Impl.getIntImmCostInst(Opc, Idx, Imm, Ty, CostKind, Inst);
}
int getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty, TargetCostKind CostKind) override {
@@ -1952,18 +1952,18 @@ public:
unsigned getMinVectorRegisterBitWidth() override {
return Impl.getMinVectorRegisterBitWidth();
}
- Optional<unsigned> getMaxVScale() const override {
- return Impl.getMaxVScale();
- }
+ Optional<unsigned> getMaxVScale() const override {
+ return Impl.getMaxVScale();
+ }
bool shouldMaximizeVectorBandwidth(bool OptSize) const override {
return Impl.shouldMaximizeVectorBandwidth(OptSize);
}
unsigned getMinimumVF(unsigned ElemWidth) const override {
return Impl.getMinimumVF(ElemWidth);
}
- unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const override {
- return Impl.getMaximumVF(ElemWidth, Opcode);
- }
+ unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const override {
+ return Impl.getMaximumVF(ElemWidth, Opcode);
+ }
bool shouldConsiderAddressTypePromotion(
const Instruction &I, bool &AllowPromotionWithoutCommonHeader) override {
return Impl.shouldConsiderAddressTypePromotion(
@@ -2031,9 +2031,9 @@ public:
return Impl.getShuffleCost(Kind, Tp, Index, SubTp);
}
int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
- CastContextHint CCH, TTI::TargetCostKind CostKind,
+ CastContextHint CCH, TTI::TargetCostKind CostKind,
const Instruction *I) override {
- return Impl.getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
+ return Impl.getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
}
int getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
unsigned Index) override {
@@ -2043,10 +2043,10 @@ public:
return Impl.getCFInstrCost(Opcode, CostKind);
}
int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
- CmpInst::Predicate VecPred,
+ CmpInst::Predicate VecPred,
TTI::TargetCostKind CostKind,
const Instruction *I) override {
- return Impl.getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
+ return Impl.getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
}
int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) override {
return Impl.getVectorInstrCost(Opcode, Val, Index);
@@ -2092,12 +2092,12 @@ public:
return Impl.getMinMaxReductionCost(Ty, CondTy, IsPairwiseForm, IsUnsigned,
CostKind);
}
- InstructionCost getExtendedAddReductionCost(
- bool IsMLA, bool IsUnsigned, Type *ResTy, VectorType *Ty,
- TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) override {
- return Impl.getExtendedAddReductionCost(IsMLA, IsUnsigned, ResTy, Ty,
- CostKind);
- }
+ InstructionCost getExtendedAddReductionCost(
+ bool IsMLA, bool IsUnsigned, Type *ResTy, VectorType *Ty,
+ TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) override {
+ return Impl.getExtendedAddReductionCost(IsMLA, IsUnsigned, ResTy, Ty,
+ CostKind);
+ }
int getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
TTI::TargetCostKind CostKind) override {
return Impl.getIntrinsicInstrCost(ICA, CostKind);
@@ -2191,14 +2191,14 @@ public:
ReductionFlags Flags) const override {
return Impl.useReductionIntrinsic(Opcode, Ty, Flags);
}
- bool preferInLoopReduction(unsigned Opcode, Type *Ty,
- ReductionFlags Flags) const override {
- return Impl.preferInLoopReduction(Opcode, Ty, Flags);
- }
- bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
- ReductionFlags Flags) const override {
- return Impl.preferPredicatedReductionSelect(Opcode, Ty, Flags);
- }
+ bool preferInLoopReduction(unsigned Opcode, Type *Ty,
+ ReductionFlags Flags) const override {
+ return Impl.preferInLoopReduction(Opcode, Ty, Flags);
+ }
+ bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
+ ReductionFlags Flags) const override {
+ return Impl.preferPredicatedReductionSelect(Opcode, Ty, Flags);
+ }
bool shouldExpandReduction(const IntrinsicInst *II) const override {
return Impl.shouldExpandReduction(II);
}
@@ -2207,10 +2207,10 @@ public:
return Impl.getGISelRematGlobalCost();
}
- bool supportsScalableVectors() const override {
- return Impl.supportsScalableVectors();
- }
-
+ bool supportsScalableVectors() const override {
+ return Impl.supportsScalableVectors();
+ }
+
bool hasActiveVectorLength() const override {
return Impl.hasActiveVectorLength();
}
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/TargetTransformInfoImpl.h b/contrib/libs/llvm12/include/llvm/Analysis/TargetTransformInfoImpl.h
index 0627f8b1be..30212e7c4d 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -27,7 +27,7 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
-#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/Type.h"
@@ -53,7 +53,7 @@ public:
int getGEPCost(Type *PointeeType, const Value *Ptr,
ArrayRef<const Value *> Operands,
- TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency) const {
+ TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency) const {
// In the basic model, we just assume that all-constant GEPs will be folded
// into their uses via addressing modes.
for (unsigned Idx = 0, Size = Operands.size(); Idx != Size; ++Idx)
@@ -66,31 +66,31 @@ public:
unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
unsigned &JTSize,
ProfileSummaryInfo *PSI,
- BlockFrequencyInfo *BFI) const {
+ BlockFrequencyInfo *BFI) const {
(void)PSI;
(void)BFI;
JTSize = 0;
return SI.getNumCases();
}
- unsigned getInliningThresholdMultiplier() const { return 1; }
- unsigned adjustInliningThreshold(const CallBase *CB) const { return 0; }
+ unsigned getInliningThresholdMultiplier() const { return 1; }
+ unsigned adjustInliningThreshold(const CallBase *CB) const { return 0; }
- int getInlinerVectorBonusPercent() const { return 150; }
+ int getInlinerVectorBonusPercent() const { return 150; }
- unsigned getMemcpyCost(const Instruction *I) const {
- return TTI::TCC_Expensive;
- }
+ unsigned getMemcpyCost(const Instruction *I) const {
+ return TTI::TCC_Expensive;
+ }
- bool hasBranchDivergence() const { return false; }
+ bool hasBranchDivergence() const { return false; }
- bool useGPUDivergenceAnalysis() const { return false; }
+ bool useGPUDivergenceAnalysis() const { return false; }
- bool isSourceOfDivergence(const Value *V) const { return false; }
+ bool isSourceOfDivergence(const Value *V) const { return false; }
- bool isAlwaysUniform(const Value *V) const { return false; }
+ bool isAlwaysUniform(const Value *V) const { return false; }
- unsigned getFlatAddressSpace() const { return -1; }
+ unsigned getFlatAddressSpace() const { return -1; }
bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
Intrinsic::ID IID) const {
@@ -99,14 +99,14 @@ public:
bool isNoopAddrSpaceCast(unsigned, unsigned) const { return false; }
- unsigned getAssumedAddrSpace(const Value *V) const { return -1; }
-
+ unsigned getAssumedAddrSpace(const Value *V) const { return -1; }
+
Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
Value *NewV) const {
return nullptr;
}
- bool isLoweredToCall(const Function *F) const {
+ bool isLoweredToCall(const Function *F) const {
assert(F && "A concrete function must be provided to this routine.");
// FIXME: These should almost certainly not be handled here, and instead
@@ -144,7 +144,7 @@ public:
bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
AssumptionCache &AC, TargetLibraryInfo *LibInfo,
- HardwareLoopInfo &HWLoopInfo) const {
+ HardwareLoopInfo &HWLoopInfo) const {
return false;
}
@@ -159,60 +159,60 @@ public:
return false;
}
- Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
- IntrinsicInst &II) const {
- return None;
- }
-
- Optional<Value *>
- simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
- APInt DemandedMask, KnownBits &Known,
- bool &KnownBitsComputed) const {
- return None;
- }
-
- Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
- InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
- APInt &UndefElts2, APInt &UndefElts3,
- std::function<void(Instruction *, unsigned, APInt, APInt &)>
- SimplifyAndSetOp) const {
- return None;
- }
-
+ Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
+ IntrinsicInst &II) const {
+ return None;
+ }
+
+ Optional<Value *>
+ simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
+ APInt DemandedMask, KnownBits &Known,
+ bool &KnownBitsComputed) const {
+ return None;
+ }
+
+ Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
+ InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
+ APInt &UndefElts2, APInt &UndefElts3,
+ std::function<void(Instruction *, unsigned, APInt, APInt &)>
+ SimplifyAndSetOp) const {
+ return None;
+ }
+
void getUnrollingPreferences(Loop *, ScalarEvolution &,
- TTI::UnrollingPreferences &) const {}
+ TTI::UnrollingPreferences &) const {}
void getPeelingPreferences(Loop *, ScalarEvolution &,
- TTI::PeelingPreferences &) const {}
+ TTI::PeelingPreferences &) const {}
- bool isLegalAddImmediate(int64_t Imm) const { return false; }
+ bool isLegalAddImmediate(int64_t Imm) const { return false; }
- bool isLegalICmpImmediate(int64_t Imm) const { return false; }
+ bool isLegalICmpImmediate(int64_t Imm) const { return false; }
bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
bool HasBaseReg, int64_t Scale, unsigned AddrSpace,
- Instruction *I = nullptr) const {
+ Instruction *I = nullptr) const {
// Guess that only reg and reg+reg addressing is allowed. This heuristic is
// taken from the implementation of LSR.
return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1);
}
- bool isLSRCostLess(TTI::LSRCost &C1, TTI::LSRCost &C2) const {
+ bool isLSRCostLess(TTI::LSRCost &C1, TTI::LSRCost &C2) const {
return std::tie(C1.NumRegs, C1.AddRecCost, C1.NumIVMuls, C1.NumBaseAdds,
C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
std::tie(C2.NumRegs, C2.AddRecCost, C2.NumIVMuls, C2.NumBaseAdds,
C2.ScaleCost, C2.ImmCost, C2.SetupCost);
}
- bool isNumRegsMajorCostOfLSR() const { return true; }
-
- bool isProfitableLSRChainElement(Instruction *I) const { return false; }
+ bool isNumRegsMajorCostOfLSR() const { return true; }
- bool canMacroFuseCmp() const { return false; }
+ bool isProfitableLSRChainElement(Instruction *I) const { return false; }
+ bool canMacroFuseCmp() const { return false; }
+
bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI,
DominatorTree *DT, AssumptionCache *AC,
- TargetLibraryInfo *LibInfo) const {
+ TargetLibraryInfo *LibInfo) const {
return false;
}
@@ -220,51 +220,51 @@ public:
bool shouldFavorBackedgeIndex(const Loop *L) const { return false; }
- bool isLegalMaskedStore(Type *DataType, Align Alignment) const {
- return false;
- }
+ bool isLegalMaskedStore(Type *DataType, Align Alignment) const {
+ return false;
+ }
- bool isLegalMaskedLoad(Type *DataType, Align Alignment) const {
- return false;
- }
+ bool isLegalMaskedLoad(Type *DataType, Align Alignment) const {
+ return false;
+ }
- bool isLegalNTStore(Type *DataType, Align Alignment) const {
+ bool isLegalNTStore(Type *DataType, Align Alignment) const {
// By default, assume nontemporal memory stores are available for stores
// that are aligned and have a size that is a power of 2.
unsigned DataSize = DL.getTypeStoreSize(DataType);
return Alignment >= DataSize && isPowerOf2_32(DataSize);
}
- bool isLegalNTLoad(Type *DataType, Align Alignment) const {
+ bool isLegalNTLoad(Type *DataType, Align Alignment) const {
// By default, assume nontemporal memory loads are available for loads that
// are aligned and have a size that is a power of 2.
unsigned DataSize = DL.getTypeStoreSize(DataType);
return Alignment >= DataSize && isPowerOf2_32(DataSize);
}
- bool isLegalMaskedScatter(Type *DataType, Align Alignment) const {
- return false;
- }
+ bool isLegalMaskedScatter(Type *DataType, Align Alignment) const {
+ return false;
+ }
- bool isLegalMaskedGather(Type *DataType, Align Alignment) const {
- return false;
- }
+ bool isLegalMaskedGather(Type *DataType, Align Alignment) const {
+ return false;
+ }
- bool isLegalMaskedCompressStore(Type *DataType) const { return false; }
+ bool isLegalMaskedCompressStore(Type *DataType) const { return false; }
- bool isLegalMaskedExpandLoad(Type *DataType) const { return false; }
+ bool isLegalMaskedExpandLoad(Type *DataType) const { return false; }
- bool hasDivRemOp(Type *DataType, bool IsSigned) const { return false; }
+ bool hasDivRemOp(Type *DataType, bool IsSigned) const { return false; }
- bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const {
- return false;
- }
+ bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const {
+ return false;
+ }
- bool prefersVectorizedAddressing() const { return true; }
+ bool prefersVectorizedAddressing() const { return true; }
int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
- bool HasBaseReg, int64_t Scale,
- unsigned AddrSpace) const {
+ bool HasBaseReg, int64_t Scale,
+ unsigned AddrSpace) const {
// Guess that all legal addressing mode are free.
if (isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, Scale,
AddrSpace))
@@ -272,87 +272,87 @@ public:
return -1;
}
- bool LSRWithInstrQueries() const { return false; }
-
- bool isTruncateFree(Type *Ty1, Type *Ty2) const { return false; }
+ bool LSRWithInstrQueries() const { return false; }
- bool isProfitableToHoist(Instruction *I) const { return true; }
+ bool isTruncateFree(Type *Ty1, Type *Ty2) const { return false; }
- bool useAA() const { return false; }
+ bool isProfitableToHoist(Instruction *I) const { return true; }
- bool isTypeLegal(Type *Ty) const { return false; }
+ bool useAA() const { return false; }
- unsigned getRegUsageForType(Type *Ty) const { return 1; }
+ bool isTypeLegal(Type *Ty) const { return false; }
- bool shouldBuildLookupTables() const { return true; }
- bool shouldBuildLookupTablesForConstant(Constant *C) const { return true; }
+ unsigned getRegUsageForType(Type *Ty) const { return 1; }
- bool useColdCCForColdCall(Function &F) const { return false; }
+ bool shouldBuildLookupTables() const { return true; }
+ bool shouldBuildLookupTablesForConstant(Constant *C) const { return true; }
+ bool useColdCCForColdCall(Function &F) const { return false; }
+
unsigned getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts,
- bool Insert, bool Extract) const {
+ bool Insert, bool Extract) const {
return 0;
}
unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
- unsigned VF) const {
+ unsigned VF) const {
return 0;
}
- bool supportsEfficientVectorElementLoadStore() const { return false; }
+ bool supportsEfficientVectorElementLoadStore() const { return false; }
- bool enableAggressiveInterleaving(bool LoopHasReductions) const {
- return false;
- }
+ bool enableAggressiveInterleaving(bool LoopHasReductions) const {
+ return false;
+ }
TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
bool IsZeroCmp) const {
return {};
}
- bool enableInterleavedAccessVectorization() const { return false; }
+ bool enableInterleavedAccessVectorization() const { return false; }
- bool enableMaskedInterleavedAccessVectorization() const { return false; }
+ bool enableMaskedInterleavedAccessVectorization() const { return false; }
- bool isFPVectorizationPotentiallyUnsafe() const { return false; }
+ bool isFPVectorizationPotentiallyUnsafe() const { return false; }
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
unsigned AddressSpace, unsigned Alignment,
- bool *Fast) const {
+ bool *Fast) const {
return false;
}
- TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const {
+ TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const {
return TTI::PSK_Software;
}
- bool haveFastSqrt(Type *Ty) const { return false; }
+ bool haveFastSqrt(Type *Ty) const { return false; }
- bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const { return true; }
+ bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const { return true; }
- unsigned getFPOpCost(Type *Ty) const {
- return TargetTransformInfo::TCC_Basic;
- }
+ unsigned getFPOpCost(Type *Ty) const {
+ return TargetTransformInfo::TCC_Basic;
+ }
int getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
- Type *Ty) const {
+ Type *Ty) const {
return 0;
}
unsigned getIntImmCost(const APInt &Imm, Type *Ty,
- TTI::TargetCostKind CostKind) const {
+ TTI::TargetCostKind CostKind) const {
return TTI::TCC_Basic;
}
unsigned getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm,
- Type *Ty, TTI::TargetCostKind CostKind,
- Instruction *Inst = nullptr) const {
+ Type *Ty, TTI::TargetCostKind CostKind,
+ Instruction *Inst = nullptr) const {
return TTI::TCC_Free;
}
unsigned getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
const APInt &Imm, Type *Ty,
- TTI::TargetCostKind CostKind) const {
+ TTI::TargetCostKind CostKind) const {
return TTI::TCC_Free;
}
@@ -375,18 +375,18 @@ public:
unsigned getRegisterBitWidth(bool Vector) const { return 32; }
- unsigned getMinVectorRegisterBitWidth() const { return 128; }
-
- Optional<unsigned> getMaxVScale() const { return None; }
+ unsigned getMinVectorRegisterBitWidth() const { return 128; }
+ Optional<unsigned> getMaxVScale() const { return None; }
+
bool shouldMaximizeVectorBandwidth(bool OptSize) const { return false; }
unsigned getMinimumVF(unsigned ElemWidth) const { return 0; }
- unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const { return 0; }
-
- bool shouldConsiderAddressTypePromotion(
- const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const {
+ unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const { return 0; }
+
+ bool shouldConsiderAddressTypePromotion(
+ const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const {
AllowPromotionWithoutCommonHeader = false;
return false;
}
@@ -425,7 +425,7 @@ public:
unsigned getMaxPrefetchIterationsAhead() const { return UINT_MAX; }
bool enableWritePrefetching() const { return false; }
- unsigned getMaxInterleaveFactor(unsigned VF) const { return 1; }
+ unsigned getMaxInterleaveFactor(unsigned VF) const { return 1; }
unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
TTI::TargetCostKind CostKind,
@@ -434,7 +434,7 @@ public:
TTI::OperandValueProperties Opd1PropInfo,
TTI::OperandValueProperties Opd2PropInfo,
ArrayRef<const Value *> Args,
- const Instruction *CxtI = nullptr) const {
+ const Instruction *CxtI = nullptr) const {
// FIXME: A number of transformation tests seem to require these values
// which seems a little odd for how arbitary there are.
switch (Opcode) {
@@ -453,14 +453,14 @@ public:
}
unsigned getShuffleCost(TTI::ShuffleKind Kind, VectorType *Ty, int Index,
- VectorType *SubTp) const {
+ VectorType *SubTp) const {
return 1;
}
unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
- TTI::CastContextHint CCH,
+ TTI::CastContextHint CCH,
TTI::TargetCostKind CostKind,
- const Instruction *I) const {
+ const Instruction *I) const {
switch (Opcode) {
default:
break;
@@ -483,24 +483,24 @@ public:
// Identity and pointer-to-pointer casts are free.
return 0;
break;
- case Instruction::Trunc: {
+ case Instruction::Trunc: {
// trunc to a native type is free (assuming the target has compare and
// shift-right of the same width).
- TypeSize DstSize = DL.getTypeSizeInBits(Dst);
- if (!DstSize.isScalable() && DL.isLegalInteger(DstSize.getFixedSize()))
+ TypeSize DstSize = DL.getTypeSizeInBits(Dst);
+ if (!DstSize.isScalable() && DL.isLegalInteger(DstSize.getFixedSize()))
return 0;
break;
}
- }
+ }
return 1;
}
unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst,
- VectorType *VecTy, unsigned Index) const {
+ VectorType *VecTy, unsigned Index) const {
return 1;
}
- unsigned getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) const {
+ unsigned getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) const {
// A phi would be free, unless we're costing the throughput because it
// will require a register.
if (Opcode == Instruction::PHI && CostKind != TTI::TCK_RecipThroughput)
@@ -509,14 +509,14 @@ public:
}
unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
- CmpInst::Predicate VecPred,
+ CmpInst::Predicate VecPred,
TTI::TargetCostKind CostKind,
const Instruction *I) const {
return 1;
}
- unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
- unsigned Index) const {
+ unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
+ unsigned Index) const {
return 1;
}
@@ -528,33 +528,33 @@ public:
unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
unsigned AddressSpace,
- TTI::TargetCostKind CostKind) const {
+ TTI::TargetCostKind CostKind) const {
return 1;
}
unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
const Value *Ptr, bool VariableMask,
Align Alignment, TTI::TargetCostKind CostKind,
- const Instruction *I = nullptr) const {
+ const Instruction *I = nullptr) const {
return 1;
}
unsigned getInterleavedMemoryOpCost(
unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
- bool UseMaskForCond, bool UseMaskForGaps) const {
+ bool UseMaskForCond, bool UseMaskForGaps) const {
return 1;
}
unsigned getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
- TTI::TargetCostKind CostKind) const {
+ TTI::TargetCostKind CostKind) const {
switch (ICA.getID()) {
default:
break;
case Intrinsic::annotation:
case Intrinsic::assume:
case Intrinsic::sideeffect:
- case Intrinsic::pseudoprobe:
+ case Intrinsic::pseudoprobe:
case Intrinsic::dbg_declare:
case Intrinsic::dbg_value:
case Intrinsic::dbg_label:
@@ -565,7 +565,7 @@ public:
case Intrinsic::is_constant:
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
- case Intrinsic::experimental_noalias_scope_decl:
+ case Intrinsic::experimental_noalias_scope_decl:
case Intrinsic::objectsize:
case Intrinsic::ptr_annotation:
case Intrinsic::var_annotation:
@@ -587,38 +587,38 @@ public:
}
unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys,
- TTI::TargetCostKind CostKind) const {
+ TTI::TargetCostKind CostKind) const {
return 1;
}
- unsigned getNumberOfParts(Type *Tp) const { return 0; }
+ unsigned getNumberOfParts(Type *Tp) const { return 0; }
unsigned getAddressComputationCost(Type *Tp, ScalarEvolution *,
- const SCEV *) const {
+ const SCEV *) const {
return 0;
}
unsigned getArithmeticReductionCost(unsigned, VectorType *, bool,
- TTI::TargetCostKind) const {
- return 1;
- }
+ TTI::TargetCostKind) const {
+ return 1;
+ }
unsigned getMinMaxReductionCost(VectorType *, VectorType *, bool, bool,
- TTI::TargetCostKind) const {
- return 1;
- }
-
- InstructionCost getExtendedAddReductionCost(
- bool IsMLA, bool IsUnsigned, Type *ResTy, VectorType *Ty,
- TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const {
- return 1;
- }
-
- unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const {
- return 0;
- }
-
- bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const {
+ TTI::TargetCostKind) const {
+ return 1;
+ }
+
+ InstructionCost getExtendedAddReductionCost(
+ bool IsMLA, bool IsUnsigned, Type *ResTy, VectorType *Ty,
+ TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const {
+ return 1;
+ }
+
+ unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const {
+ return 0;
+ }
+
+ bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const {
return false;
}
@@ -632,7 +632,7 @@ public:
}
Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
- Type *ExpectedType) const {
+ Type *ExpectedType) const {
return nullptr;
}
@@ -710,34 +710,34 @@ public:
return false;
}
- bool preferInLoopReduction(unsigned Opcode, Type *Ty,
- TTI::ReductionFlags Flags) const {
- return false;
- }
-
- bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
- TTI::ReductionFlags Flags) const {
- return false;
- }
-
+ bool preferInLoopReduction(unsigned Opcode, Type *Ty,
+ TTI::ReductionFlags Flags) const {
+ return false;
+ }
+
+ bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
+ TTI::ReductionFlags Flags) const {
+ return false;
+ }
+
bool shouldExpandReduction(const IntrinsicInst *II) const { return true; }
unsigned getGISelRematGlobalCost() const { return 1; }
- bool supportsScalableVectors() const { return false; }
-
+ bool supportsScalableVectors() const { return false; }
+
bool hasActiveVectorLength() const { return false; }
protected:
// Obtain the minimum required size to hold the value (without the sign)
// In case of a vector it returns the min required size for one element.
- unsigned minRequiredElementSize(const Value *Val, bool &isSigned) const {
+ unsigned minRequiredElementSize(const Value *Val, bool &isSigned) const {
if (isa<ConstantDataVector>(Val) || isa<ConstantVector>(Val)) {
const auto *VectorValue = cast<Constant>(Val);
// In case of a vector need to pick the max between the min
// required size for each element
- auto *VT = cast<FixedVectorType>(Val->getType());
+ auto *VT = cast<FixedVectorType>(Val->getType());
// Assume unsigned elements
isSigned = false;
@@ -785,12 +785,12 @@ protected:
return Val->getType()->getScalarSizeInBits();
}
- bool isStridedAccess(const SCEV *Ptr) const {
+ bool isStridedAccess(const SCEV *Ptr) const {
return Ptr && isa<SCEVAddRecExpr>(Ptr);
}
const SCEVConstant *getConstantStrideStep(ScalarEvolution *SE,
- const SCEV *Ptr) const {
+ const SCEV *Ptr) const {
if (!isStridedAccess(Ptr))
return nullptr;
const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ptr);
@@ -798,7 +798,7 @@ protected:
}
bool isConstantStridedAccessLessThan(ScalarEvolution *SE, const SCEV *Ptr,
- int64_t MergeDistance) const {
+ int64_t MergeDistance) const {
const SCEVConstant *Step = getConstantStrideStep(SE, Ptr);
if (!Step)
return false;
@@ -860,12 +860,12 @@ public:
uint64_t Field = ConstIdx->getZExtValue();
BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field);
} else {
- // If this operand is a scalable type, bail out early.
- // TODO: handle scalable vectors
- if (isa<ScalableVectorType>(TargetType))
- return TTI::TCC_Basic;
- int64_t ElementSize =
- DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize();
+ // If this operand is a scalable type, bail out early.
+ // TODO: handle scalable vectors
+ if (isa<ScalableVectorType>(TargetType))
+ return TTI::TCC_Basic;
+ int64_t ElementSize =
+ DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize();
if (ConstIdx) {
BaseOffset +=
ConstIdx->getValue().sextOrTrunc(PtrSizeBits) * ElementSize;
@@ -890,17 +890,17 @@ public:
int getUserCost(const User *U, ArrayRef<const Value *> Operands,
TTI::TargetCostKind CostKind) {
auto *TargetTTI = static_cast<T *>(this);
- // Handle non-intrinsic calls, invokes, and callbr.
+ // Handle non-intrinsic calls, invokes, and callbr.
// FIXME: Unlikely to be true for anything but CodeSize.
- auto *CB = dyn_cast<CallBase>(U);
- if (CB && !isa<IntrinsicInst>(U)) {
- if (const Function *F = CB->getCalledFunction()) {
+ auto *CB = dyn_cast<CallBase>(U);
+ if (CB && !isa<IntrinsicInst>(U)) {
+ if (const Function *F = CB->getCalledFunction()) {
if (!TargetTTI->isLoweredToCall(F))
return TTI::TCC_Basic; // Give a basic cost if it will be lowered
- return TTI::TCC_Basic * (F->getFunctionType()->getNumParams() + 1);
+ return TTI::TCC_Basic * (F->getFunctionType()->getNumParams() + 1);
}
- // For indirect or other calls, scale cost by number of arguments.
+ // For indirect or other calls, scale cost by number of arguments.
return TTI::TCC_Basic * (CB->arg_size() + 1);
}
@@ -912,12 +912,12 @@ public:
switch (Opcode) {
default:
break;
- case Instruction::Call: {
- assert(isa<IntrinsicInst>(U) && "Unexpected non-intrinsic call");
- auto *Intrinsic = cast<IntrinsicInst>(U);
- IntrinsicCostAttributes CostAttrs(Intrinsic->getIntrinsicID(), *CB);
- return TargetTTI->getIntrinsicInstrCost(CostAttrs, CostKind);
- }
+ case Instruction::Call: {
+ assert(isa<IntrinsicInst>(U) && "Unexpected non-intrinsic call");
+ auto *Intrinsic = cast<IntrinsicInst>(U);
+ IntrinsicCostAttributes CostAttrs(Intrinsic->getIntrinsicID(), *CB);
+ return TargetTTI->getIntrinsicInstrCost(CostAttrs, CostKind);
+ }
case Instruction::Br:
case Instruction::Ret:
case Instruction::PHI:
@@ -978,8 +978,8 @@ public:
case Instruction::SExt:
case Instruction::ZExt:
case Instruction::AddrSpaceCast:
- return TargetTTI->getCastInstrCost(
- Opcode, Ty, OpTy, TTI::getCastContextHint(I), CostKind, I);
+ return TargetTTI->getCastInstrCost(
+ Opcode, Ty, OpTy, TTI::getCastContextHint(I), CostKind, I);
case Instruction::Store: {
auto *SI = cast<StoreInst>(U);
Type *ValTy = U->getOperand(0)->getType();
@@ -996,16 +996,16 @@ public:
case Instruction::Select: {
Type *CondTy = U->getOperand(0)->getType();
return TargetTTI->getCmpSelInstrCost(Opcode, U->getType(), CondTy,
- CmpInst::BAD_ICMP_PREDICATE,
+ CmpInst::BAD_ICMP_PREDICATE,
CostKind, I);
}
case Instruction::ICmp:
case Instruction::FCmp: {
Type *ValTy = U->getOperand(0)->getType();
- // TODO: Also handle ICmp/FCmp constant expressions.
+ // TODO: Also handle ICmp/FCmp constant expressions.
return TargetTTI->getCmpSelInstrCost(Opcode, ValTy, U->getType(),
- I ? cast<CmpInst>(I)->getPredicate()
- : CmpInst::BAD_ICMP_PREDICATE,
+ I ? cast<CmpInst>(I)->getPredicate()
+ : CmpInst::BAD_ICMP_PREDICATE,
CostKind, I);
}
case Instruction::InsertElement: {
@@ -1057,23 +1057,23 @@ public:
if (CI)
Idx = CI->getZExtValue();
- // Try to match a reduction (a series of shufflevector and vector ops
- // followed by an extractelement).
- unsigned RdxOpcode;
- VectorType *RdxType;
- bool IsPairwise;
- switch (TTI::matchVectorReduction(EEI, RdxOpcode, RdxType, IsPairwise)) {
+ // Try to match a reduction (a series of shufflevector and vector ops
+ // followed by an extractelement).
+ unsigned RdxOpcode;
+ VectorType *RdxType;
+ bool IsPairwise;
+ switch (TTI::matchVectorReduction(EEI, RdxOpcode, RdxType, IsPairwise)) {
case TTI::RK_Arithmetic:
- return TargetTTI->getArithmeticReductionCost(RdxOpcode, RdxType,
- IsPairwise, CostKind);
+ return TargetTTI->getArithmeticReductionCost(RdxOpcode, RdxType,
+ IsPairwise, CostKind);
case TTI::RK_MinMax:
return TargetTTI->getMinMaxReductionCost(
- RdxType, cast<VectorType>(CmpInst::makeCmpResultType(RdxType)),
- IsPairwise, /*IsUnsigned=*/false, CostKind);
+ RdxType, cast<VectorType>(CmpInst::makeCmpResultType(RdxType)),
+ IsPairwise, /*IsUnsigned=*/false, CostKind);
case TTI::RK_UnsignedMinMax:
return TargetTTI->getMinMaxReductionCost(
- RdxType, cast<VectorType>(CmpInst::makeCmpResultType(RdxType)),
- IsPairwise, /*IsUnsigned=*/true, CostKind);
+ RdxType, cast<VectorType>(CmpInst::makeCmpResultType(RdxType)),
+ IsPairwise, /*IsUnsigned=*/true, CostKind);
case TTI::RK_None:
break;
}
@@ -1086,7 +1086,7 @@ public:
}
int getInstructionLatency(const Instruction *I) {
- SmallVector<const Value *, 4> Operands(I->operand_values());
+ SmallVector<const Value *, 4> Operands(I->operand_values());
if (getUserCost(I, Operands, TTI::TCK_Latency) == TTI::TCC_Free)
return 0;
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h b/contrib/libs/llvm12/include/llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h
index 3fc8df0c75..bf20189de3 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h
@@ -1,123 +1,123 @@
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-parameter"
-#endif
-
-//===-- ImportedFunctionsInliningStatistics.h -------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-// Generating inliner statistics for imported functions, mostly useful for
-// ThinLTO.
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TRANSFORMS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
-#define LLVM_TRANSFORMS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
-
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/StringRef.h"
-#include <string>
-#include <vector>
-
-namespace llvm {
-class Module;
-class Function;
-/// Calculate and dump ThinLTO specific inliner stats.
-/// The main statistics are:
-/// (1) Number of inlined imported functions,
-/// (2) Number of imported functions inlined into importing module (indirect),
-/// (3) Number of non imported functions inlined into importing module
-/// (indirect).
-/// The difference between first and the second is that first stat counts
-/// all performed inlines on imported functions, but the second one only the
-/// functions that have been eventually inlined to a function in the importing
-/// module (by a chain of inlines). Because llvm uses bottom-up inliner, it is
-/// possible to e.g. import function `A`, `B` and then inline `B` to `A`,
-/// and after this `A` might be too big to be inlined into some other function
-/// that calls it. It calculates this statistic by building graph, where
-/// the nodes are functions, and edges are performed inlines and then by marking
-/// the edges starting from not imported function.
-///
-/// If `Verbose` is set to true, then it also dumps statistics
-/// per each inlined function, sorted by the greatest inlines count like
-/// - number of performed inlines
-/// - number of performed inlines to importing module
-class ImportedFunctionsInliningStatistics {
-private:
- /// InlineGraphNode represents node in graph of inlined functions.
- struct InlineGraphNode {
- // Default-constructible and movable.
- InlineGraphNode() = default;
- InlineGraphNode(InlineGraphNode &&) = default;
- InlineGraphNode &operator=(InlineGraphNode &&) = default;
-
- llvm::SmallVector<InlineGraphNode *, 8> InlinedCallees;
- /// Incremented every direct inline.
- int32_t NumberOfInlines = 0;
- /// Number of inlines into non imported function (possibly indirect via
- /// intermediate inlines). Computed based on graph search.
- int32_t NumberOfRealInlines = 0;
- bool Imported = false;
- bool Visited = false;
- };
-
-public:
- ImportedFunctionsInliningStatistics() = default;
- ImportedFunctionsInliningStatistics(
- const ImportedFunctionsInliningStatistics &) = delete;
-
- /// Set information like AllFunctions, ImportedFunctions, ModuleName.
- void setModuleInfo(const Module &M);
- /// Record inline of @param Callee to @param Caller for statistis.
- void recordInline(const Function &Caller, const Function &Callee);
- /// Dump stats computed with InlinerStatistics class.
- /// If @param Verbose is true then separate statistics for every inlined
- /// function will be printed.
- void dump(bool Verbose);
-
-private:
- /// Creates new Node in NodeMap and sets attributes, or returns existed one.
- InlineGraphNode &createInlineGraphNode(const Function &);
- void calculateRealInlines();
- void dfs(InlineGraphNode &GraphNode);
-
- using NodesMapTy =
- llvm::StringMap<std::unique_ptr<InlineGraphNode>>;
- using SortedNodesTy =
- std::vector<const NodesMapTy::MapEntryTy*>;
- /// Returns vector of elements sorted by
- /// (-NumberOfInlines, -NumberOfRealInlines, FunctionName).
- SortedNodesTy getSortedNodes();
-
-private:
- /// This map manage life of all InlineGraphNodes. Unique pointer to
- /// InlineGraphNode used since the node pointers are also saved in the
- /// InlinedCallees vector. If it would store InlineGraphNode instead then the
- /// address of the node would not be invariant.
- NodesMapTy NodesMap;
- /// Non external functions that have some other function inlined inside.
- std::vector<StringRef> NonImportedCallers;
- int AllFunctions = 0;
- int ImportedFunctions = 0;
- StringRef ModuleName;
-};
-
-enum class InlinerFunctionImportStatsOpts {
- No = 0,
- Basic = 1,
- Verbose = 2,
-};
-
-} // llvm
-
-#endif // LLVM_TRANSFORMS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
-
-#ifdef __GNUC__
-#pragma GCC diagnostic pop
-#endif
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- ImportedFunctionsInliningStatistics.h -------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Generating inliner statistics for imported functions, mostly useful for
+// ThinLTO.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
+#define LLVM_TRANSFORMS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include <string>
+#include <vector>
+
+namespace llvm {
+class Module;
+class Function;
+/// Calculate and dump ThinLTO specific inliner stats.
+/// The main statistics are:
+/// (1) Number of inlined imported functions,
+/// (2) Number of imported functions inlined into importing module (indirect),
+/// (3) Number of non imported functions inlined into importing module
+/// (indirect).
+/// The difference between first and the second is that first stat counts
+/// all performed inlines on imported functions, but the second one only the
+/// functions that have been eventually inlined to a function in the importing
+/// module (by a chain of inlines). Because llvm uses bottom-up inliner, it is
+/// possible to e.g. import function `A`, `B` and then inline `B` to `A`,
+/// and after this `A` might be too big to be inlined into some other function
+/// that calls it. It calculates this statistic by building graph, where
+/// the nodes are functions, and edges are performed inlines and then by marking
+/// the edges starting from not imported function.
+///
+/// If `Verbose` is set to true, then it also dumps statistics
+/// per each inlined function, sorted by the greatest inlines count like
+/// - number of performed inlines
+/// - number of performed inlines to importing module
+class ImportedFunctionsInliningStatistics {
+private:
+ /// InlineGraphNode represents node in graph of inlined functions.
+ struct InlineGraphNode {
+ // Default-constructible and movable.
+ InlineGraphNode() = default;
+ InlineGraphNode(InlineGraphNode &&) = default;
+ InlineGraphNode &operator=(InlineGraphNode &&) = default;
+
+ llvm::SmallVector<InlineGraphNode *, 8> InlinedCallees;
+ /// Incremented every direct inline.
+ int32_t NumberOfInlines = 0;
+ /// Number of inlines into non imported function (possibly indirect via
+ /// intermediate inlines). Computed based on graph search.
+ int32_t NumberOfRealInlines = 0;
+ bool Imported = false;
+ bool Visited = false;
+ };
+
+public:
+ ImportedFunctionsInliningStatistics() = default;
+ ImportedFunctionsInliningStatistics(
+ const ImportedFunctionsInliningStatistics &) = delete;
+
+ /// Set information like AllFunctions, ImportedFunctions, ModuleName.
+ void setModuleInfo(const Module &M);
+ /// Record inline of @param Callee to @param Caller for statistis.
+ void recordInline(const Function &Caller, const Function &Callee);
+ /// Dump stats computed with InlinerStatistics class.
+ /// If @param Verbose is true then separate statistics for every inlined
+ /// function will be printed.
+ void dump(bool Verbose);
+
+private:
+ /// Creates new Node in NodeMap and sets attributes, or returns existed one.
+ InlineGraphNode &createInlineGraphNode(const Function &);
+ void calculateRealInlines();
+ void dfs(InlineGraphNode &GraphNode);
+
+ using NodesMapTy =
+ llvm::StringMap<std::unique_ptr<InlineGraphNode>>;
+ using SortedNodesTy =
+ std::vector<const NodesMapTy::MapEntryTy*>;
+ /// Returns vector of elements sorted by
+ /// (-NumberOfInlines, -NumberOfRealInlines, FunctionName).
+ SortedNodesTy getSortedNodes();
+
+private:
+ /// This map manage life of all InlineGraphNodes. Unique pointer to
+ /// InlineGraphNode used since the node pointers are also saved in the
+ /// InlinedCallees vector. If it would store InlineGraphNode instead then the
+ /// address of the node would not be invariant.
+ NodesMapTy NodesMap;
+ /// Non external functions that have some other function inlined inside.
+ std::vector<StringRef> NonImportedCallers;
+ int AllFunctions = 0;
+ int ImportedFunctions = 0;
+ StringRef ModuleName;
+};
+
+enum class InlinerFunctionImportStatsOpts {
+ No = 0,
+ Basic = 1,
+ Verbose = 2,
+};
+
+} // llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/Utils/Local.h b/contrib/libs/llvm12/include/llvm/Analysis/Utils/Local.h
index f6b4cf83b2..32801f7c14 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/Utils/Local.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/Utils/Local.h
@@ -37,7 +37,7 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
bool NoAssumptions = false) {
GEPOperator *GEPOp = cast<GEPOperator>(GEP);
Type *IntIdxTy = DL.getIndexType(GEP->getType());
- Value *Result = nullptr;
+ Value *Result = nullptr;
// If the GEP is inbounds, we know that none of the addressing operations will
// overflow in a signed sense.
@@ -53,7 +53,7 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
++i, ++GTI) {
Value *Op = *i;
uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
- Value *Offset;
+ Value *Offset;
if (Constant *OpC = dyn_cast<Constant>(Op)) {
if (OpC->isZeroValue())
continue;
@@ -62,47 +62,47 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
if (StructType *STy = GTI.getStructTypeOrNull()) {
uint64_t OpValue = OpC->getUniqueInteger().getZExtValue();
Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
- if (!Size)
- continue;
-
- Offset = ConstantInt::get(IntIdxTy, Size);
- } else {
- // Splat the constant if needed.
- if (IntIdxTy->isVectorTy() && !OpC->getType()->isVectorTy())
- OpC = ConstantVector::getSplat(
- cast<VectorType>(IntIdxTy)->getElementCount(), OpC);
-
- Constant *Scale = ConstantInt::get(IntIdxTy, Size);
- Constant *OC =
- ConstantExpr::getIntegerCast(OpC, IntIdxTy, true /*SExt*/);
- Offset =
- ConstantExpr::getMul(OC, Scale, false /*NUW*/, isInBounds /*NSW*/);
+ if (!Size)
+ continue;
+
+ Offset = ConstantInt::get(IntIdxTy, Size);
+ } else {
+ // Splat the constant if needed.
+ if (IntIdxTy->isVectorTy() && !OpC->getType()->isVectorTy())
+ OpC = ConstantVector::getSplat(
+ cast<VectorType>(IntIdxTy)->getElementCount(), OpC);
+
+ Constant *Scale = ConstantInt::get(IntIdxTy, Size);
+ Constant *OC =
+ ConstantExpr::getIntegerCast(OpC, IntIdxTy, true /*SExt*/);
+ Offset =
+ ConstantExpr::getMul(OC, Scale, false /*NUW*/, isInBounds /*NSW*/);
}
- } else {
- // Splat the index if needed.
- if (IntIdxTy->isVectorTy() && !Op->getType()->isVectorTy())
- Op = Builder->CreateVectorSplat(
- cast<FixedVectorType>(IntIdxTy)->getNumElements(), Op);
-
- // Convert to correct type.
- if (Op->getType() != IntIdxTy)
- Op = Builder->CreateIntCast(Op, IntIdxTy, true, Op->getName().str()+".c");
- if (Size != 1) {
- // We'll let instcombine(mul) convert this to a shl if possible.
- Op = Builder->CreateMul(Op, ConstantInt::get(IntIdxTy, Size),
- GEP->getName().str() + ".idx", false /*NUW*/,
- isInBounds /*NSW*/);
- }
- Offset = Op;
+ } else {
+ // Splat the index if needed.
+ if (IntIdxTy->isVectorTy() && !Op->getType()->isVectorTy())
+ Op = Builder->CreateVectorSplat(
+ cast<FixedVectorType>(IntIdxTy)->getNumElements(), Op);
+
+ // Convert to correct type.
+ if (Op->getType() != IntIdxTy)
+ Op = Builder->CreateIntCast(Op, IntIdxTy, true, Op->getName().str()+".c");
+ if (Size != 1) {
+ // We'll let instcombine(mul) convert this to a shl if possible.
+ Op = Builder->CreateMul(Op, ConstantInt::get(IntIdxTy, Size),
+ GEP->getName().str() + ".idx", false /*NUW*/,
+ isInBounds /*NSW*/);
+ }
+ Offset = Op;
}
- if (Result)
- Result = Builder->CreateAdd(Result, Offset, GEP->getName().str()+".offs",
- false /*NUW*/, isInBounds /*NSW*/);
- else
- Result = Offset;
+ if (Result)
+ Result = Builder->CreateAdd(Result, Offset, GEP->getName().str()+".offs",
+ false /*NUW*/, isInBounds /*NSW*/);
+ else
+ Result = Offset;
}
- return Result ? Result : Constant::getNullValue(IntIdxTy);
+ return Result ? Result : Constant::getNullValue(IntIdxTy);
}
}
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/Utils/TFUtils.h b/contrib/libs/llvm12/include/llvm/Analysis/Utils/TFUtils.h
index 2248ebf6da..ce5f3088d7 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/Utils/TFUtils.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/Utils/TFUtils.h
@@ -16,11 +16,11 @@
#ifndef LLVM_ANALYSIS_UTILS_TFUTILS_H
#define LLVM_ANALYSIS_UTILS_TFUTILS_H
-#include "llvm/Config/llvm-config.h"
+#include "llvm/Config/llvm-config.h"
#ifdef LLVM_HAVE_TF_API
#include "llvm/IR/LLVMContext.h"
-#include "llvm/Support/JSON.h"
+#include "llvm/Support/JSON.h"
#include <memory>
#include <vector>
@@ -44,141 +44,141 @@ namespace llvm {
class TFModelEvaluatorImpl;
class EvaluationResultImpl;
-/// TensorSpec encapsulates the specification of a tensor: its dimensions, or
-/// "shape" (row-major), its type (see TensorSpec::getDataType specializations
-/// for supported types), its name and port (see "TensorFlow: Large-Scale
-/// Machine Learning on Heterogeneous Distributed Systems", section 4.2, para 2:
-/// https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/45166.pdf)
-///
-/// TensorSpec is used to set up a TFModelEvaluator by describing the expected
-/// inputs and outputs.
-class TensorSpec final {
-public:
- template <typename T>
- static TensorSpec createSpec(const std::string &Name,
- const std::vector<int64_t> &Shape,
- int Port = 0) {
- return TensorSpec(Name, Port, getDataType<T>(), Shape);
- }
-
- const std::string &name() const { return Name; }
- int port() const { return Port; }
- int typeIndex() const { return TypeIndex; }
- const std::vector<int64_t> &shape() const { return Shape; }
-
- bool operator==(const TensorSpec &Other) const {
- return Name == Other.Name && Port == Other.Port &&
- TypeIndex == Other.TypeIndex && Shape == Other.Shape;
- }
-
- bool operator!=(const TensorSpec &Other) const { return !(*this == Other); }
-
- /// Get the number of elements in a tensor with this shape.
- size_t getElementCount() const { return ElementCount; }
- /// Get the size, in bytes, of one element.
- size_t getElementByteSize() const;
-
- template <typename T> bool isElementType() const {
- return getDataType<T>() == TypeIndex;
- }
-
-private:
- TensorSpec(const std::string &Name, int Port, int TypeIndex,
- const std::vector<int64_t> &Shape);
-
- template <typename T> static int getDataType() {
- llvm_unreachable("Undefined tensor type");
- }
-
- std::string Name;
- int Port = 0;
- int TypeIndex = 0;
- std::vector<int64_t> Shape;
- size_t ElementCount = 0;
-};
-
-/// Construct a TensorSpec from a JSON dictionary of the form:
-/// { "name": <string>,
-/// "port": <int>,
-/// "type": <string. Use LLVM's types, e.g. float, double, int64_t>,
-/// "shape": <array of ints> }
-/// For the "type" field, see the C++ primitive types used in
-/// TFUTILS_SUPPORTED_TYPES.
-Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
- const json::Value &Value);
-
-struct LoggedFeatureSpec {
- TensorSpec Spec;
- Optional<std::string> LoggingName;
-};
-
-/// Load the output specs. If SpecFileOverride is not empty, that path is used.
-/// Otherwise, the file is assumed to be called 'output_spec.json' and be found
-/// under ModelPath (the model directory).
-/// The first output tensor name must match ExpectedDecisionName.
-/// In case of error, the return is None and the error is logged.
-Optional<std::vector<LoggedFeatureSpec>>
-loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName,
- StringRef ModelPath, StringRef SpecFileOverride = StringRef());
-
-/// Logging utility - given an ordered specification of features, and assuming
-/// a scalar reward, allow logging feature values and rewards, and then print
-/// as tf.train.SequenceExample text protobuf.
-/// The assumption is that, for an event to be logged (i.e. a set of feature
-/// values and a reward), the user calls the log* API for each feature exactly
-/// once, providing the index matching the position in the feature spec list
-/// provided at construction:
-/// event 0:
-/// logTensorValue(0, ...)
-/// logTensorValue(1, ...)
-/// ...
-/// logReward(...)
-/// event 1:
-/// logTensorValue(0, ...)
-/// logTensorValue(1, ...)
-/// ...
-/// logReward(...)
-///
-/// At the end, call print to generate the protobuf.
-class Logger final {
-public:
- /// Construct a Logger. If IncludeReward is false, then logReward shouldn't
- /// be called, and the reward feature won't be printed out.
- Logger(const std::vector<LoggedFeatureSpec> &FeatureSpecs,
- const TensorSpec &RewardSpec, bool IncludeReward)
- : FeatureSpecs(FeatureSpecs), RewardSpec(RewardSpec),
- RawLogData(FeatureSpecs.size() + IncludeReward),
- IncludeReward(IncludeReward) {}
-
- template <typename T> void logReward(T Value) {
- assert(IncludeReward);
- logTensorValue(RawLogData.size() - 1, &Value);
- }
-
- template <typename T> void logFinalReward(T Value) {
- assert(RawLogData.back().empty());
- logReward(Value);
- }
-
- template <typename T>
- void logTensorValue(size_t FeatureID, const T *Value, size_t Size = 1) {
- const char *Start = reinterpret_cast<const char *>(Value);
- const char *End = Start + sizeof(T) * Size;
- RawLogData[FeatureID].insert(RawLogData[FeatureID].end(), Start, End);
- }
-
- void print(raw_ostream &OS);
-
-private:
- std::vector<LoggedFeatureSpec> FeatureSpecs;
- TensorSpec RewardSpec;
- /// RawData has one entry per feature, plus one more for the reward.
- /// Each feature's values are then stored in a vector, in succession.
- /// This means the ith event is stored at [*][i]
- std::vector<std::vector<char>> RawLogData;
- const bool IncludeReward;
-};
-
+/// TensorSpec encapsulates the specification of a tensor: its dimensions, or
+/// "shape" (row-major), its type (see TensorSpec::getDataType specializations
+/// for supported types), its name and port (see "TensorFlow: Large-Scale
+/// Machine Learning on Heterogeneous Distributed Systems", section 4.2, para 2:
+/// https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/45166.pdf)
+///
+/// TensorSpec is used to set up a TFModelEvaluator by describing the expected
+/// inputs and outputs.
+class TensorSpec final {
+public:
+ template <typename T>
+ static TensorSpec createSpec(const std::string &Name,
+ const std::vector<int64_t> &Shape,
+ int Port = 0) {
+ return TensorSpec(Name, Port, getDataType<T>(), Shape);
+ }
+
+ const std::string &name() const { return Name; }
+ int port() const { return Port; }
+ int typeIndex() const { return TypeIndex; }
+ const std::vector<int64_t> &shape() const { return Shape; }
+
+ bool operator==(const TensorSpec &Other) const {
+ return Name == Other.Name && Port == Other.Port &&
+ TypeIndex == Other.TypeIndex && Shape == Other.Shape;
+ }
+
+ bool operator!=(const TensorSpec &Other) const { return !(*this == Other); }
+
+ /// Get the number of elements in a tensor with this shape.
+ size_t getElementCount() const { return ElementCount; }
+ /// Get the size, in bytes, of one element.
+ size_t getElementByteSize() const;
+
+ template <typename T> bool isElementType() const {
+ return getDataType<T>() == TypeIndex;
+ }
+
+private:
+ TensorSpec(const std::string &Name, int Port, int TypeIndex,
+ const std::vector<int64_t> &Shape);
+
+ template <typename T> static int getDataType() {
+ llvm_unreachable("Undefined tensor type");
+ }
+
+ std::string Name;
+ int Port = 0;
+ int TypeIndex = 0;
+ std::vector<int64_t> Shape;
+ size_t ElementCount = 0;
+};
+
+/// Construct a TensorSpec from a JSON dictionary of the form:
+/// { "name": <string>,
+/// "port": <int>,
+/// "type": <string. Use LLVM's types, e.g. float, double, int64_t>,
+/// "shape": <array of ints> }
+/// For the "type" field, see the C++ primitive types used in
+/// TFUTILS_SUPPORTED_TYPES.
+Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
+ const json::Value &Value);
+
+struct LoggedFeatureSpec {
+ TensorSpec Spec;
+ Optional<std::string> LoggingName;
+};
+
+/// Load the output specs. If SpecFileOverride is not empty, that path is used.
+/// Otherwise, the file is assumed to be called 'output_spec.json' and be found
+/// under ModelPath (the model directory).
+/// The first output tensor name must match ExpectedDecisionName.
+/// In case of error, the return is None and the error is logged.
+Optional<std::vector<LoggedFeatureSpec>>
+loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName,
+ StringRef ModelPath, StringRef SpecFileOverride = StringRef());
+
+/// Logging utility - given an ordered specification of features, and assuming
+/// a scalar reward, allow logging feature values and rewards, and then print
+/// as tf.train.SequenceExample text protobuf.
+/// The assumption is that, for an event to be logged (i.e. a set of feature
+/// values and a reward), the user calls the log* API for each feature exactly
+/// once, providing the index matching the position in the feature spec list
+/// provided at construction:
+/// event 0:
+/// logTensorValue(0, ...)
+/// logTensorValue(1, ...)
+/// ...
+/// logReward(...)
+/// event 1:
+/// logTensorValue(0, ...)
+/// logTensorValue(1, ...)
+/// ...
+/// logReward(...)
+///
+/// At the end, call print to generate the protobuf.
+class Logger final {
+public:
+ /// Construct a Logger. If IncludeReward is false, then logReward shouldn't
+ /// be called, and the reward feature won't be printed out.
+ Logger(const std::vector<LoggedFeatureSpec> &FeatureSpecs,
+ const TensorSpec &RewardSpec, bool IncludeReward)
+ : FeatureSpecs(FeatureSpecs), RewardSpec(RewardSpec),
+ RawLogData(FeatureSpecs.size() + IncludeReward),
+ IncludeReward(IncludeReward) {}
+
+ template <typename T> void logReward(T Value) {
+ assert(IncludeReward);
+ logTensorValue(RawLogData.size() - 1, &Value);
+ }
+
+ template <typename T> void logFinalReward(T Value) {
+ assert(RawLogData.back().empty());
+ logReward(Value);
+ }
+
+ template <typename T>
+ void logTensorValue(size_t FeatureID, const T *Value, size_t Size = 1) {
+ const char *Start = reinterpret_cast<const char *>(Value);
+ const char *End = Start + sizeof(T) * Size;
+ RawLogData[FeatureID].insert(RawLogData[FeatureID].end(), Start, End);
+ }
+
+ void print(raw_ostream &OS);
+
+private:
+ std::vector<LoggedFeatureSpec> FeatureSpecs;
+ TensorSpec RewardSpec;
+ /// RawData has one entry per feature, plus one more for the reward.
+ /// Each feature's values are then stored in a vector, in succession.
+ /// This means the ith event is stored at [*][i]
+ std::vector<std::vector<char>> RawLogData;
+ const bool IncludeReward;
+};
+
class TFModelEvaluator final {
public:
/// The result of a model evaluation. Handles the lifetime of the output
@@ -187,26 +187,26 @@ public:
class EvaluationResult {
public:
EvaluationResult(const EvaluationResult &) = delete;
- EvaluationResult &operator=(const EvaluationResult &Other) = delete;
-
+ EvaluationResult &operator=(const EvaluationResult &Other) = delete;
+
EvaluationResult(EvaluationResult &&Other);
- EvaluationResult &operator=(EvaluationResult &&Other);
-
+ EvaluationResult &operator=(EvaluationResult &&Other);
+
~EvaluationResult();
- /// Get a (const) pointer to the first element of the tensor at Index.
+ /// Get a (const) pointer to the first element of the tensor at Index.
template <typename T> T *getTensorValue(size_t Index) {
return static_cast<T *>(getUntypedTensorValue(Index));
}
- template <typename T> const T *getTensorValue(size_t Index) const {
- return static_cast<T *>(getUntypedTensorValue(Index));
- }
-
- /// Get a (const) pointer to the untyped data of the tensor.
- void *getUntypedTensorValue(size_t Index);
- const void *getUntypedTensorValue(size_t Index) const;
-
+ template <typename T> const T *getTensorValue(size_t Index) const {
+ return static_cast<T *>(getUntypedTensorValue(Index));
+ }
+
+ /// Get a (const) pointer to the untyped data of the tensor.
+ void *getUntypedTensorValue(size_t Index);
+ const void *getUntypedTensorValue(size_t Index) const;
+
private:
friend class TFModelEvaluator;
EvaluationResult(std::unique_ptr<EvaluationResultImpl> Impl);
@@ -214,14 +214,14 @@ public:
};
TFModelEvaluator(StringRef SavedModelPath,
- const std::vector<TensorSpec> &InputSpecs,
- const std::vector<TensorSpec> &OutputSpecs,
+ const std::vector<TensorSpec> &InputSpecs,
+ const std::vector<TensorSpec> &OutputSpecs,
const char *Tags = "serve");
- TFModelEvaluator(StringRef SavedModelPath,
- const std::vector<TensorSpec> &InputSpecs,
- function_ref<TensorSpec(size_t)> GetOutputSpecs,
- size_t OutputSpecsSize, const char *Tags = "serve");
-
+ TFModelEvaluator(StringRef SavedModelPath,
+ const std::vector<TensorSpec> &InputSpecs,
+ function_ref<TensorSpec(size_t)> GetOutputSpecs,
+ size_t OutputSpecsSize, const char *Tags = "serve");
+
~TFModelEvaluator();
TFModelEvaluator(const TFModelEvaluator &) = delete;
TFModelEvaluator(TFModelEvaluator &&) = delete;
@@ -246,27 +246,27 @@ private:
std::unique_ptr<TFModelEvaluatorImpl> Impl;
};
-/// List of supported types, as a pair:
-/// - C++ type
-/// - enum name (implementation-specific)
-#define TFUTILS_SUPPORTED_TYPES(M) \
- M(float, TF_FLOAT) \
- M(double, TF_DOUBLE) \
- M(int8_t, TF_INT8) \
- M(uint8_t, TF_UINT8) \
- M(int16_t, TF_INT16) \
- M(uint16_t, TF_UINT16) \
- M(int32_t, TF_INT32) \
- M(uint32_t, TF_UINT32) \
- M(int64_t, TF_INT64) \
- M(uint64_t, TF_UINT64)
-
-#define TFUTILS_GETDATATYPE_DEF(T, E) \
- template <> int TensorSpec::getDataType<T>();
-
-TFUTILS_SUPPORTED_TYPES(TFUTILS_GETDATATYPE_DEF)
-
-#undef TFUTILS_GETDATATYPE_DEF
+/// List of supported types, as a pair:
+/// - C++ type
+/// - enum name (implementation-specific)
+#define TFUTILS_SUPPORTED_TYPES(M) \
+ M(float, TF_FLOAT) \
+ M(double, TF_DOUBLE) \
+ M(int8_t, TF_INT8) \
+ M(uint8_t, TF_UINT8) \
+ M(int16_t, TF_INT16) \
+ M(uint16_t, TF_UINT16) \
+ M(int32_t, TF_INT32) \
+ M(uint32_t, TF_UINT32) \
+ M(int64_t, TF_INT64) \
+ M(uint64_t, TF_UINT64)
+
+#define TFUTILS_GETDATATYPE_DEF(T, E) \
+ template <> int TensorSpec::getDataType<T>();
+
+TFUTILS_SUPPORTED_TYPES(TFUTILS_GETDATATYPE_DEF)
+
+#undef TFUTILS_GETDATATYPE_DEF
} // namespace llvm
#endif // LLVM_HAVE_TF_API
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/ValueLattice.h b/contrib/libs/llvm12/include/llvm/Analysis/ValueLattice.h
index 6b9e7e3650..fdb1edde7f 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/ValueLattice.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/ValueLattice.h
@@ -18,7 +18,7 @@
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
-#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Instructions.h"
//
//===----------------------------------------------------------------------===//
// ValueLatticeElement
@@ -464,16 +464,16 @@ public:
if (isConstant() && Other.isConstant())
return ConstantExpr::getCompare(Pred, getConstant(), Other.getConstant());
- if (ICmpInst::isEquality(Pred)) {
- // not(C) != C => true, not(C) == C => false.
- if ((isNotConstant() && Other.isConstant() &&
- getNotConstant() == Other.getConstant()) ||
- (isConstant() && Other.isNotConstant() &&
- getConstant() == Other.getNotConstant()))
- return Pred == ICmpInst::ICMP_NE
- ? ConstantInt::getTrue(Ty) : ConstantInt::getFalse(Ty);
- }
-
+ if (ICmpInst::isEquality(Pred)) {
+ // not(C) != C => true, not(C) == C => false.
+ if ((isNotConstant() && Other.isConstant() &&
+ getNotConstant() == Other.getConstant()) ||
+ (isConstant() && Other.isNotConstant() &&
+ getConstant() == Other.getNotConstant()))
+ return Pred == ICmpInst::ICMP_NE
+ ? ConstantInt::getTrue(Ty) : ConstantInt::getFalse(Ty);
+ }
+
// Integer constants are represented as ConstantRanges with single
// elements.
if (!isConstantRange() || !Other.isConstantRange())
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/ValueTracking.h b/contrib/libs/llvm12/include/llvm/Analysis/ValueTracking.h
index ccc0b62f08..65c4f66864 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/ValueTracking.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/ValueTracking.h
@@ -28,14 +28,14 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Intrinsics.h"
-#include "llvm/IR/Operator.h"
+#include "llvm/IR/Operator.h"
#include <cassert>
#include <cstdint>
namespace llvm {
class AddOperator;
-class AllocaInst;
+class AllocaInst;
class APInt;
class AssumptionCache;
class DominatorTree;
@@ -52,8 +52,8 @@ class StringRef;
class TargetLibraryInfo;
class Value;
-constexpr unsigned MaxAnalysisRecursionDepth = 6;
-
+constexpr unsigned MaxAnalysisRecursionDepth = 6;
+
/// Determine which bits of V are known to be either zero or one and return
/// them in the KnownZero/KnownOne bit sets.
///
@@ -377,13 +377,13 @@ constexpr unsigned MaxAnalysisRecursionDepth = 6;
/// that the returned value has pointer type if the specified value does. If
/// the MaxLookup value is non-zero, it limits the number of instructions to
/// be stripped off.
- Value *getUnderlyingObject(Value *V, unsigned MaxLookup = 6);
- inline const Value *getUnderlyingObject(const Value *V,
+ Value *getUnderlyingObject(Value *V, unsigned MaxLookup = 6);
+ inline const Value *getUnderlyingObject(const Value *V,
unsigned MaxLookup = 6) {
- return getUnderlyingObject(const_cast<Value *>(V), MaxLookup);
+ return getUnderlyingObject(const_cast<Value *>(V), MaxLookup);
}
- /// This method is similar to getUnderlyingObject except that it can
+ /// This method is similar to getUnderlyingObject except that it can
/// look through phi and select instructions and return multiple objects.
///
/// If LoopInfo is passed, loop phis are further analyzed. If a pointer
@@ -411,30 +411,30 @@ constexpr unsigned MaxAnalysisRecursionDepth = 6;
/// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects
/// should not assume that Curr and Prev share the same underlying object thus
/// it shouldn't look through the phi above.
- void getUnderlyingObjects(const Value *V,
+ void getUnderlyingObjects(const Value *V,
SmallVectorImpl<const Value *> &Objects,
- LoopInfo *LI = nullptr, unsigned MaxLookup = 6);
+ LoopInfo *LI = nullptr, unsigned MaxLookup = 6);
- /// This is a wrapper around getUnderlyingObjects and adds support for basic
+ /// This is a wrapper around getUnderlyingObjects and adds support for basic
/// ptrtoint+arithmetic+inttoptr sequences.
bool getUnderlyingObjectsForCodeGen(const Value *V,
- SmallVectorImpl<Value *> &Objects);
-
- /// Returns unique alloca where the value comes from, or nullptr.
- /// If OffsetZero is true check that V points to the begining of the alloca.
- AllocaInst *findAllocaForValue(Value *V, bool OffsetZero = false);
- inline const AllocaInst *findAllocaForValue(const Value *V,
- bool OffsetZero = false) {
- return findAllocaForValue(const_cast<Value *>(V), OffsetZero);
- }
-
+ SmallVectorImpl<Value *> &Objects);
+
+ /// Returns unique alloca where the value comes from, or nullptr.
+ /// If OffsetZero is true check that V points to the begining of the alloca.
+ AllocaInst *findAllocaForValue(Value *V, bool OffsetZero = false);
+ inline const AllocaInst *findAllocaForValue(const Value *V,
+ bool OffsetZero = false) {
+ return findAllocaForValue(const_cast<Value *>(V), OffsetZero);
+ }
+
/// Return true if the only users of this pointer are lifetime markers.
bool onlyUsedByLifetimeMarkers(const Value *V);
- /// Return true if the only users of this pointer are lifetime markers or
- /// droppable instructions.
- bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V);
-
+ /// Return true if the only users of this pointer are lifetime markers or
+ /// droppable instructions.
+ bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V);
+
/// Return true if speculation of the given load must be suppressed to avoid
/// ordering or interfering with an active sanitizer. If not suppressed,
/// dereferenceability and alignment must be proven separately. Note: This
@@ -591,65 +591,65 @@ constexpr unsigned MaxAnalysisRecursionDepth = 6;
/// if, for all i, r is evaluated to poison or op raises UB if vi = poison.
/// To filter out operands that raise UB on poison, you can use
/// getGuaranteedNonPoisonOp.
- bool propagatesPoison(const Operator *I);
+ bool propagatesPoison(const Operator *I);
- /// Insert operands of I into Ops such that I will trigger undefined behavior
- /// if I is executed and that operand has a poison value.
- void getGuaranteedNonPoisonOps(const Instruction *I,
- SmallPtrSetImpl<const Value *> &Ops);
+ /// Insert operands of I into Ops such that I will trigger undefined behavior
+ /// if I is executed and that operand has a poison value.
+ void getGuaranteedNonPoisonOps(const Instruction *I,
+ SmallPtrSetImpl<const Value *> &Ops);
- /// Return true if the given instruction must trigger undefined behavior
+ /// Return true if the given instruction must trigger undefined behavior
/// when I is executed with any operands which appear in KnownPoison holding
/// a poison value at the point of execution.
bool mustTriggerUB(const Instruction *I,
const SmallSet<const Value *, 16>& KnownPoison);
- /// Return true if this function can prove that if Inst is executed
- /// and yields a poison value or undef bits, then that will trigger
- /// undefined behavior.
+ /// Return true if this function can prove that if Inst is executed
+ /// and yields a poison value or undef bits, then that will trigger
+ /// undefined behavior.
///
/// Note that this currently only considers the basic block that is
- /// the parent of Inst.
- bool programUndefinedIfUndefOrPoison(const Instruction *Inst);
- bool programUndefinedIfPoison(const Instruction *Inst);
-
- /// canCreateUndefOrPoison returns true if Op can create undef or poison from
- /// non-undef & non-poison operands.
- /// For vectors, canCreateUndefOrPoison returns true if there is potential
- /// poison or undef in any element of the result when vectors without
- /// undef/poison poison are given as operands.
- /// For example, given `Op = shl <2 x i32> %x, <0, 32>`, this function returns
- /// true. If Op raises immediate UB but never creates poison or undef
- /// (e.g. sdiv I, 0), canCreatePoison returns false.
- ///
- /// canCreatePoison returns true if Op can create poison from non-poison
+ /// the parent of Inst.
+ bool programUndefinedIfUndefOrPoison(const Instruction *Inst);
+ bool programUndefinedIfPoison(const Instruction *Inst);
+
+ /// canCreateUndefOrPoison returns true if Op can create undef or poison from
+ /// non-undef & non-poison operands.
+ /// For vectors, canCreateUndefOrPoison returns true if there is potential
+ /// poison or undef in any element of the result when vectors without
+ /// undef/poison poison are given as operands.
+ /// For example, given `Op = shl <2 x i32> %x, <0, 32>`, this function returns
+ /// true. If Op raises immediate UB but never creates poison or undef
+ /// (e.g. sdiv I, 0), canCreatePoison returns false.
+ ///
+ /// canCreatePoison returns true if Op can create poison from non-poison
/// operands.
- bool canCreateUndefOrPoison(const Operator *Op);
- bool canCreatePoison(const Operator *Op);
-
- /// Return true if V is poison given that ValAssumedPoison is already poison.
- /// For example, if ValAssumedPoison is `icmp X, 10` and V is `icmp X, 5`,
- /// impliesPoison returns true.
- bool impliesPoison(const Value *ValAssumedPoison, const Value *V);
-
- /// Return true if this function can prove that V does not have undef bits
- /// and is never poison. If V is an aggregate value or vector, check whether
- /// all elements (except padding) are not undef or poison.
- /// Note that this is different from canCreateUndefOrPoison because the
- /// function assumes Op's operands are not poison/undef.
- ///
+ bool canCreateUndefOrPoison(const Operator *Op);
+ bool canCreatePoison(const Operator *Op);
+
+ /// Return true if V is poison given that ValAssumedPoison is already poison.
+ /// For example, if ValAssumedPoison is `icmp X, 10` and V is `icmp X, 5`,
+ /// impliesPoison returns true.
+ bool impliesPoison(const Value *ValAssumedPoison, const Value *V);
+
+ /// Return true if this function can prove that V does not have undef bits
+ /// and is never poison. If V is an aggregate value or vector, check whether
+ /// all elements (except padding) are not undef or poison.
+ /// Note that this is different from canCreateUndefOrPoison because the
+ /// function assumes Op's operands are not poison/undef.
+ ///
/// If CtxI and DT are specified this method performs flow-sensitive analysis
/// and returns true if it is guaranteed to be never undef or poison
/// immediately before the CtxI.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
- AssumptionCache *AC = nullptr,
+ AssumptionCache *AC = nullptr,
const Instruction *CtxI = nullptr,
const DominatorTree *DT = nullptr,
unsigned Depth = 0);
- bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC = nullptr,
- const Instruction *CtxI = nullptr,
- const DominatorTree *DT = nullptr,
- unsigned Depth = 0);
+ bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC = nullptr,
+ const Instruction *CtxI = nullptr,
+ const DominatorTree *DT = nullptr,
+ unsigned Depth = 0);
/// Specific patterns of select instructions we can match.
enum SelectPatternFlavor {
@@ -740,14 +740,14 @@ constexpr unsigned MaxAnalysisRecursionDepth = 6;
/// minimum/maximum flavor.
CmpInst::Predicate getInverseMinMaxPred(SelectPatternFlavor SPF);
- /// Check if the values in \p VL are select instructions that can be converted
- /// to a min or max (vector) intrinsic. Returns the intrinsic ID, if such a
- /// conversion is possible, together with a bool indicating whether all select
- /// conditions are only used by the selects. Otherwise return
- /// Intrinsic::not_intrinsic.
- std::pair<Intrinsic::ID, bool>
- canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL);
-
+ /// Check if the values in \p VL are select instructions that can be converted
+ /// to a min or max (vector) intrinsic. Returns the intrinsic ID, if such a
+ /// conversion is possible, together with a bool indicating whether all select
+ /// conditions are only used by the selects. Otherwise return
+ /// Intrinsic::not_intrinsic.
+ std::pair<Intrinsic::ID, bool>
+ canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL);
+
/// Return true if RHS is known to be implied true by LHS. Return false if
/// RHS is known to be implied false by LHS. Otherwise, return None if no
/// implication can be made.
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/VecFuncs.def b/contrib/libs/llvm12/include/llvm/Analysis/VecFuncs.def
index cfc3d61158..01dc54f3a0 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/VecFuncs.def
+++ b/contrib/libs/llvm12/include/llvm/Analysis/VecFuncs.def
@@ -62,87 +62,87 @@ TLI_DEFINE_VECFUNC("acoshf", "vacoshf", 4)
TLI_DEFINE_VECFUNC("atanhf", "vatanhf", 4)
-#elif defined(TLI_DEFINE_LIBMVEC_X86_VECFUNCS)
-// GLIBC Vector math Functions
-
-TLI_DEFINE_VECFUNC("sin", "_ZGVbN2v_sin", 2)
-TLI_DEFINE_VECFUNC("sin", "_ZGVdN4v_sin", 4)
-
-TLI_DEFINE_VECFUNC("sinf", "_ZGVbN4v_sinf", 4)
-TLI_DEFINE_VECFUNC("sinf", "_ZGVdN8v_sinf", 8)
-
-TLI_DEFINE_VECFUNC("llvm.sin.f64", "_ZGVbN2v_sin", 2)
-TLI_DEFINE_VECFUNC("llvm.sin.f64", "_ZGVdN4v_sin", 4)
-
-TLI_DEFINE_VECFUNC("llvm.sin.f32", "_ZGVbN4v_sinf", 4)
-TLI_DEFINE_VECFUNC("llvm.sin.f32", "_ZGVdN8v_sinf", 8)
-
-TLI_DEFINE_VECFUNC("cos", "_ZGVbN2v_cos", 2)
-TLI_DEFINE_VECFUNC("cos", "_ZGVdN4v_cos", 4)
-
-TLI_DEFINE_VECFUNC("cosf", "_ZGVbN4v_cosf", 4)
-TLI_DEFINE_VECFUNC("cosf", "_ZGVdN8v_cosf", 8)
-
-TLI_DEFINE_VECFUNC("llvm.cos.f64", "_ZGVbN2v_cos", 2)
-TLI_DEFINE_VECFUNC("llvm.cos.f64", "_ZGVdN4v_cos", 4)
-
-TLI_DEFINE_VECFUNC("llvm.cos.f32", "_ZGVbN4v_cosf", 4)
-TLI_DEFINE_VECFUNC("llvm.cos.f32", "_ZGVdN8v_cosf", 8)
-
-TLI_DEFINE_VECFUNC("pow", "_ZGVbN2vv_pow", 2)
-TLI_DEFINE_VECFUNC("pow", "_ZGVdN4vv_pow", 4)
-
-TLI_DEFINE_VECFUNC("powf", "_ZGVbN4vv_powf", 4)
-TLI_DEFINE_VECFUNC("powf", "_ZGVdN8vv_powf", 8)
-
-TLI_DEFINE_VECFUNC("__pow_finite", "_ZGVbN2vv___pow_finite", 2)
-TLI_DEFINE_VECFUNC("__pow_finite", "_ZGVdN4vv___pow_finite", 4)
-
-TLI_DEFINE_VECFUNC("__powf_finite", "_ZGVbN4vv___powf_finite", 4)
-TLI_DEFINE_VECFUNC("__powf_finite", "_ZGVdN8vv___powf_finite", 8)
-
-TLI_DEFINE_VECFUNC("llvm.pow.f64", "_ZGVbN2vv_pow", 2)
-TLI_DEFINE_VECFUNC("llvm.pow.f64", "_ZGVdN4vv_pow", 4)
-
-TLI_DEFINE_VECFUNC("llvm.pow.f32", "_ZGVbN4vv_powf", 4)
-TLI_DEFINE_VECFUNC("llvm.pow.f32", "_ZGVdN8vv_powf", 8)
-
-TLI_DEFINE_VECFUNC("exp", "_ZGVbN2v_exp", 2)
-TLI_DEFINE_VECFUNC("exp", "_ZGVdN4v_exp", 4)
-
-TLI_DEFINE_VECFUNC("expf", "_ZGVbN4v_expf", 4)
-TLI_DEFINE_VECFUNC("expf", "_ZGVdN8v_expf", 8)
-
-TLI_DEFINE_VECFUNC("__exp_finite", "_ZGVbN2v___exp_finite", 2)
-TLI_DEFINE_VECFUNC("__exp_finite", "_ZGVdN4v___exp_finite", 4)
-
-TLI_DEFINE_VECFUNC("__expf_finite", "_ZGVbN4v___expf_finite", 4)
-TLI_DEFINE_VECFUNC("__expf_finite", "_ZGVdN8v___expf_finite", 8)
-
-TLI_DEFINE_VECFUNC("llvm.exp.f64", "_ZGVbN2v_exp", 2)
-TLI_DEFINE_VECFUNC("llvm.exp.f64", "_ZGVdN4v_exp", 4)
-
-TLI_DEFINE_VECFUNC("llvm.exp.f32", "_ZGVbN4v_expf", 4)
-TLI_DEFINE_VECFUNC("llvm.exp.f32", "_ZGVdN8v_expf", 8)
-
-TLI_DEFINE_VECFUNC("log", "_ZGVbN2v_log", 2)
-TLI_DEFINE_VECFUNC("log", "_ZGVdN4v_log", 4)
-
-TLI_DEFINE_VECFUNC("logf", "_ZGVbN4v_logf", 4)
-TLI_DEFINE_VECFUNC("logf", "_ZGVdN8v_logf", 8)
-
-TLI_DEFINE_VECFUNC("__log_finite", "_ZGVbN2v___log_finite", 2)
-TLI_DEFINE_VECFUNC("__log_finite", "_ZGVdN4v___log_finite", 4)
-
-TLI_DEFINE_VECFUNC("__logf_finite", "_ZGVbN4v___logf_finite", 4)
-TLI_DEFINE_VECFUNC("__logf_finite", "_ZGVdN8v___logf_finite", 8)
-
-TLI_DEFINE_VECFUNC("llvm.log.f64", "_ZGVbN2v_log", 2)
-TLI_DEFINE_VECFUNC("llvm.log.f64", "_ZGVdN4v_log", 4)
-
-TLI_DEFINE_VECFUNC("llvm.log.f32", "_ZGVbN4v_logf", 4)
-TLI_DEFINE_VECFUNC("llvm.log.f32", "_ZGVdN8v_logf", 8)
-
+#elif defined(TLI_DEFINE_LIBMVEC_X86_VECFUNCS)
+// GLIBC Vector math Functions
+
+TLI_DEFINE_VECFUNC("sin", "_ZGVbN2v_sin", 2)
+TLI_DEFINE_VECFUNC("sin", "_ZGVdN4v_sin", 4)
+
+TLI_DEFINE_VECFUNC("sinf", "_ZGVbN4v_sinf", 4)
+TLI_DEFINE_VECFUNC("sinf", "_ZGVdN8v_sinf", 8)
+
+TLI_DEFINE_VECFUNC("llvm.sin.f64", "_ZGVbN2v_sin", 2)
+TLI_DEFINE_VECFUNC("llvm.sin.f64", "_ZGVdN4v_sin", 4)
+
+TLI_DEFINE_VECFUNC("llvm.sin.f32", "_ZGVbN4v_sinf", 4)
+TLI_DEFINE_VECFUNC("llvm.sin.f32", "_ZGVdN8v_sinf", 8)
+
+TLI_DEFINE_VECFUNC("cos", "_ZGVbN2v_cos", 2)
+TLI_DEFINE_VECFUNC("cos", "_ZGVdN4v_cos", 4)
+
+TLI_DEFINE_VECFUNC("cosf", "_ZGVbN4v_cosf", 4)
+TLI_DEFINE_VECFUNC("cosf", "_ZGVdN8v_cosf", 8)
+
+TLI_DEFINE_VECFUNC("llvm.cos.f64", "_ZGVbN2v_cos", 2)
+TLI_DEFINE_VECFUNC("llvm.cos.f64", "_ZGVdN4v_cos", 4)
+
+TLI_DEFINE_VECFUNC("llvm.cos.f32", "_ZGVbN4v_cosf", 4)
+TLI_DEFINE_VECFUNC("llvm.cos.f32", "_ZGVdN8v_cosf", 8)
+
+TLI_DEFINE_VECFUNC("pow", "_ZGVbN2vv_pow", 2)
+TLI_DEFINE_VECFUNC("pow", "_ZGVdN4vv_pow", 4)
+
+TLI_DEFINE_VECFUNC("powf", "_ZGVbN4vv_powf", 4)
+TLI_DEFINE_VECFUNC("powf", "_ZGVdN8vv_powf", 8)
+
+TLI_DEFINE_VECFUNC("__pow_finite", "_ZGVbN2vv___pow_finite", 2)
+TLI_DEFINE_VECFUNC("__pow_finite", "_ZGVdN4vv___pow_finite", 4)
+
+TLI_DEFINE_VECFUNC("__powf_finite", "_ZGVbN4vv___powf_finite", 4)
+TLI_DEFINE_VECFUNC("__powf_finite", "_ZGVdN8vv___powf_finite", 8)
+
+TLI_DEFINE_VECFUNC("llvm.pow.f64", "_ZGVbN2vv_pow", 2)
+TLI_DEFINE_VECFUNC("llvm.pow.f64", "_ZGVdN4vv_pow", 4)
+
+TLI_DEFINE_VECFUNC("llvm.pow.f32", "_ZGVbN4vv_powf", 4)
+TLI_DEFINE_VECFUNC("llvm.pow.f32", "_ZGVdN8vv_powf", 8)
+
+TLI_DEFINE_VECFUNC("exp", "_ZGVbN2v_exp", 2)
+TLI_DEFINE_VECFUNC("exp", "_ZGVdN4v_exp", 4)
+
+TLI_DEFINE_VECFUNC("expf", "_ZGVbN4v_expf", 4)
+TLI_DEFINE_VECFUNC("expf", "_ZGVdN8v_expf", 8)
+
+TLI_DEFINE_VECFUNC("__exp_finite", "_ZGVbN2v___exp_finite", 2)
+TLI_DEFINE_VECFUNC("__exp_finite", "_ZGVdN4v___exp_finite", 4)
+
+TLI_DEFINE_VECFUNC("__expf_finite", "_ZGVbN4v___expf_finite", 4)
+TLI_DEFINE_VECFUNC("__expf_finite", "_ZGVdN8v___expf_finite", 8)
+
+TLI_DEFINE_VECFUNC("llvm.exp.f64", "_ZGVbN2v_exp", 2)
+TLI_DEFINE_VECFUNC("llvm.exp.f64", "_ZGVdN4v_exp", 4)
+
+TLI_DEFINE_VECFUNC("llvm.exp.f32", "_ZGVbN4v_expf", 4)
+TLI_DEFINE_VECFUNC("llvm.exp.f32", "_ZGVdN8v_expf", 8)
+
+TLI_DEFINE_VECFUNC("log", "_ZGVbN2v_log", 2)
+TLI_DEFINE_VECFUNC("log", "_ZGVdN4v_log", 4)
+
+TLI_DEFINE_VECFUNC("logf", "_ZGVbN4v_logf", 4)
+TLI_DEFINE_VECFUNC("logf", "_ZGVdN8v_logf", 8)
+
+TLI_DEFINE_VECFUNC("__log_finite", "_ZGVbN2v___log_finite", 2)
+TLI_DEFINE_VECFUNC("__log_finite", "_ZGVdN4v___log_finite", 4)
+
+TLI_DEFINE_VECFUNC("__logf_finite", "_ZGVbN4v___logf_finite", 4)
+TLI_DEFINE_VECFUNC("__logf_finite", "_ZGVdN8v___logf_finite", 8)
+
+TLI_DEFINE_VECFUNC("llvm.log.f64", "_ZGVbN2v_log", 2)
+TLI_DEFINE_VECFUNC("llvm.log.f64", "_ZGVdN4v_log", 4)
+
+TLI_DEFINE_VECFUNC("llvm.log.f32", "_ZGVbN4v_logf", 4)
+TLI_DEFINE_VECFUNC("llvm.log.f32", "_ZGVdN8v_logf", 8)
+
#elif defined(TLI_DEFINE_MASSV_VECFUNCS)
// IBM MASS library's vector Functions
@@ -326,70 +326,70 @@ TLI_DEFINE_VECFUNC("llvm.log.f32", "__svml_logf4", 4)
TLI_DEFINE_VECFUNC("llvm.log.f32", "__svml_logf8", 8)
TLI_DEFINE_VECFUNC("llvm.log.f32", "__svml_logf16", 16)
-TLI_DEFINE_VECFUNC("log2", "__svml_log22", 2)
-TLI_DEFINE_VECFUNC("log2", "__svml_log24", 4)
-TLI_DEFINE_VECFUNC("log2", "__svml_log28", 8)
-
-TLI_DEFINE_VECFUNC("log2f", "__svml_log2f4", 4)
-TLI_DEFINE_VECFUNC("log2f", "__svml_log2f8", 8)
-TLI_DEFINE_VECFUNC("log2f", "__svml_log2f16", 16)
-
-TLI_DEFINE_VECFUNC("__log2_finite", "__svml_log22", 2)
-TLI_DEFINE_VECFUNC("__log2_finite", "__svml_log24", 4)
-TLI_DEFINE_VECFUNC("__log2_finite", "__svml_log28", 8)
-
-TLI_DEFINE_VECFUNC("__log2f_finite", "__svml_log2f4", 4)
-TLI_DEFINE_VECFUNC("__log2f_finite", "__svml_log2f8", 8)
-TLI_DEFINE_VECFUNC("__log2f_finite", "__svml_log2f16", 16)
-
-TLI_DEFINE_VECFUNC("llvm.log2.f64", "__svml_log22", 2)
-TLI_DEFINE_VECFUNC("llvm.log2.f64", "__svml_log24", 4)
-TLI_DEFINE_VECFUNC("llvm.log2.f64", "__svml_log28", 8)
-
-TLI_DEFINE_VECFUNC("llvm.log2.f32", "__svml_log2f4", 4)
-TLI_DEFINE_VECFUNC("llvm.log2.f32", "__svml_log2f8", 8)
-TLI_DEFINE_VECFUNC("llvm.log2.f32", "__svml_log2f16", 16)
-
-TLI_DEFINE_VECFUNC("log10", "__svml_log102", 2)
-TLI_DEFINE_VECFUNC("log10", "__svml_log104", 4)
-TLI_DEFINE_VECFUNC("log10", "__svml_log108", 8)
-
-TLI_DEFINE_VECFUNC("log10f", "__svml_log10f4", 4)
-TLI_DEFINE_VECFUNC("log10f", "__svml_log10f8", 8)
-TLI_DEFINE_VECFUNC("log10f", "__svml_log10f16", 16)
-
-TLI_DEFINE_VECFUNC("__log10_finite", "__svml_log102", 2)
-TLI_DEFINE_VECFUNC("__log10_finite", "__svml_log104", 4)
-TLI_DEFINE_VECFUNC("__log10_finite", "__svml_log108", 8)
-
-TLI_DEFINE_VECFUNC("__log10f_finite", "__svml_log10f4", 4)
-TLI_DEFINE_VECFUNC("__log10f_finite", "__svml_log10f8", 8)
-TLI_DEFINE_VECFUNC("__log10f_finite", "__svml_log10f16", 16)
-
-TLI_DEFINE_VECFUNC("llvm.log10.f64", "__svml_log102", 2)
-TLI_DEFINE_VECFUNC("llvm.log10.f64", "__svml_log104", 4)
-TLI_DEFINE_VECFUNC("llvm.log10.f64", "__svml_log108", 8)
-
-TLI_DEFINE_VECFUNC("llvm.log10.f32", "__svml_log10f4", 4)
-TLI_DEFINE_VECFUNC("llvm.log10.f32", "__svml_log10f8", 8)
-TLI_DEFINE_VECFUNC("llvm.log10.f32", "__svml_log10f16", 16)
-
-TLI_DEFINE_VECFUNC("sqrt", "__svml_sqrt2", 2)
-TLI_DEFINE_VECFUNC("sqrt", "__svml_sqrt4", 4)
-TLI_DEFINE_VECFUNC("sqrt", "__svml_sqrt8", 8)
-
-TLI_DEFINE_VECFUNC("sqrtf", "__svml_sqrtf4", 4)
-TLI_DEFINE_VECFUNC("sqrtf", "__svml_sqrtf8", 8)
-TLI_DEFINE_VECFUNC("sqrtf", "__svml_sqrtf16", 16)
-
-TLI_DEFINE_VECFUNC("__sqrt_finite", "__svml_sqrt2", 2)
-TLI_DEFINE_VECFUNC("__sqrt_finite", "__svml_sqrt4", 4)
-TLI_DEFINE_VECFUNC("__sqrt_finite", "__svml_sqrt8", 8)
-
-TLI_DEFINE_VECFUNC("__sqrtf_finite", "__svml_sqrtf4", 4)
-TLI_DEFINE_VECFUNC("__sqrtf_finite", "__svml_sqrtf8", 8)
-TLI_DEFINE_VECFUNC("__sqrtf_finite", "__svml_sqrtf16", 16)
-
+TLI_DEFINE_VECFUNC("log2", "__svml_log22", 2)
+TLI_DEFINE_VECFUNC("log2", "__svml_log24", 4)
+TLI_DEFINE_VECFUNC("log2", "__svml_log28", 8)
+
+TLI_DEFINE_VECFUNC("log2f", "__svml_log2f4", 4)
+TLI_DEFINE_VECFUNC("log2f", "__svml_log2f8", 8)
+TLI_DEFINE_VECFUNC("log2f", "__svml_log2f16", 16)
+
+TLI_DEFINE_VECFUNC("__log2_finite", "__svml_log22", 2)
+TLI_DEFINE_VECFUNC("__log2_finite", "__svml_log24", 4)
+TLI_DEFINE_VECFUNC("__log2_finite", "__svml_log28", 8)
+
+TLI_DEFINE_VECFUNC("__log2f_finite", "__svml_log2f4", 4)
+TLI_DEFINE_VECFUNC("__log2f_finite", "__svml_log2f8", 8)
+TLI_DEFINE_VECFUNC("__log2f_finite", "__svml_log2f16", 16)
+
+TLI_DEFINE_VECFUNC("llvm.log2.f64", "__svml_log22", 2)
+TLI_DEFINE_VECFUNC("llvm.log2.f64", "__svml_log24", 4)
+TLI_DEFINE_VECFUNC("llvm.log2.f64", "__svml_log28", 8)
+
+TLI_DEFINE_VECFUNC("llvm.log2.f32", "__svml_log2f4", 4)
+TLI_DEFINE_VECFUNC("llvm.log2.f32", "__svml_log2f8", 8)
+TLI_DEFINE_VECFUNC("llvm.log2.f32", "__svml_log2f16", 16)
+
+TLI_DEFINE_VECFUNC("log10", "__svml_log102", 2)
+TLI_DEFINE_VECFUNC("log10", "__svml_log104", 4)
+TLI_DEFINE_VECFUNC("log10", "__svml_log108", 8)
+
+TLI_DEFINE_VECFUNC("log10f", "__svml_log10f4", 4)
+TLI_DEFINE_VECFUNC("log10f", "__svml_log10f8", 8)
+TLI_DEFINE_VECFUNC("log10f", "__svml_log10f16", 16)
+
+TLI_DEFINE_VECFUNC("__log10_finite", "__svml_log102", 2)
+TLI_DEFINE_VECFUNC("__log10_finite", "__svml_log104", 4)
+TLI_DEFINE_VECFUNC("__log10_finite", "__svml_log108", 8)
+
+TLI_DEFINE_VECFUNC("__log10f_finite", "__svml_log10f4", 4)
+TLI_DEFINE_VECFUNC("__log10f_finite", "__svml_log10f8", 8)
+TLI_DEFINE_VECFUNC("__log10f_finite", "__svml_log10f16", 16)
+
+TLI_DEFINE_VECFUNC("llvm.log10.f64", "__svml_log102", 2)
+TLI_DEFINE_VECFUNC("llvm.log10.f64", "__svml_log104", 4)
+TLI_DEFINE_VECFUNC("llvm.log10.f64", "__svml_log108", 8)
+
+TLI_DEFINE_VECFUNC("llvm.log10.f32", "__svml_log10f4", 4)
+TLI_DEFINE_VECFUNC("llvm.log10.f32", "__svml_log10f8", 8)
+TLI_DEFINE_VECFUNC("llvm.log10.f32", "__svml_log10f16", 16)
+
+TLI_DEFINE_VECFUNC("sqrt", "__svml_sqrt2", 2)
+TLI_DEFINE_VECFUNC("sqrt", "__svml_sqrt4", 4)
+TLI_DEFINE_VECFUNC("sqrt", "__svml_sqrt8", 8)
+
+TLI_DEFINE_VECFUNC("sqrtf", "__svml_sqrtf4", 4)
+TLI_DEFINE_VECFUNC("sqrtf", "__svml_sqrtf8", 8)
+TLI_DEFINE_VECFUNC("sqrtf", "__svml_sqrtf16", 16)
+
+TLI_DEFINE_VECFUNC("__sqrt_finite", "__svml_sqrt2", 2)
+TLI_DEFINE_VECFUNC("__sqrt_finite", "__svml_sqrt4", 4)
+TLI_DEFINE_VECFUNC("__sqrt_finite", "__svml_sqrt8", 8)
+
+TLI_DEFINE_VECFUNC("__sqrtf_finite", "__svml_sqrtf4", 4)
+TLI_DEFINE_VECFUNC("__sqrtf_finite", "__svml_sqrtf8", 8)
+TLI_DEFINE_VECFUNC("__sqrtf_finite", "__svml_sqrtf16", 16)
+
TLI_DEFINE_VECFUNC("exp2", "__svml_exp22", 2)
TLI_DEFINE_VECFUNC("exp2", "__svml_exp24", 4)
TLI_DEFINE_VECFUNC("exp2", "__svml_exp28", 8)
@@ -420,7 +420,7 @@ TLI_DEFINE_VECFUNC("__exp2f_finite", "__svml_exp2f16", 16)
#undef TLI_DEFINE_VECFUNC
#undef TLI_DEFINE_ACCELERATE_VECFUNCS
-#undef TLI_DEFINE_LIBMVEC_X86_VECFUNCS
+#undef TLI_DEFINE_LIBMVEC_X86_VECFUNCS
#undef TLI_DEFINE_MASSV_VECFUNCS
#undef TLI_DEFINE_SVML_VECFUNCS
#undef TLI_DEFINE_MASSV_VECFUNCS_NAMES
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/VectorUtils.h b/contrib/libs/llvm12/include/llvm/Analysis/VectorUtils.h
index dc23c9c9ee..82047c3aec 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/VectorUtils.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/VectorUtils.h
@@ -21,12 +21,12 @@
#define LLVM_ANALYSIS_VECTORUTILS_H
#include "llvm/ADT/MapVector.h"
-#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/LoopAccessAnalysis.h"
#include "llvm/Support/CheckedArithmetic.h"
namespace llvm {
-class TargetLibraryInfo;
+class TargetLibraryInfo;
/// Describes the type of Parameters
enum class VFParamKind {
@@ -106,8 +106,8 @@ struct VFShape {
// Retrieve the VFShape that can be used to map a (scalar) function to itself,
// with VF = 1.
static VFShape getScalarShape(const CallInst &CI) {
- return VFShape::get(CI, ElementCount::getFixed(1),
- /*HasGlobalPredicate*/ false);
+ return VFShape::get(CI, ElementCount::getFixed(1),
+ /*HasGlobalPredicate*/ false);
}
// Retrieve the basic vectorization shape of the function, where all
@@ -122,7 +122,7 @@ struct VFShape {
Parameters.push_back(
VFParameter({CI.arg_size(), VFParamKind::GlobalPredicate}));
- return {EC.getKnownMinValue(), EC.isScalable(), Parameters};
+ return {EC.getKnownMinValue(), EC.isScalable(), Parameters};
}
/// Sanity check on the Parameters in the VFShape.
bool hasValidParameterList() const;
@@ -307,19 +307,19 @@ namespace Intrinsic {
typedef unsigned ID;
}
-/// A helper function for converting Scalar types to vector types. If
-/// the incoming type is void, we return void. If the EC represents a
-/// scalar, we return the scalar type.
-inline Type *ToVectorTy(Type *Scalar, ElementCount EC) {
- if (Scalar->isVoidTy() || Scalar->isMetadataTy() || EC.isScalar())
+/// A helper function for converting Scalar types to vector types. If
+/// the incoming type is void, we return void. If the EC represents a
+/// scalar, we return the scalar type.
+inline Type *ToVectorTy(Type *Scalar, ElementCount EC) {
+ if (Scalar->isVoidTy() || Scalar->isMetadataTy() || EC.isScalar())
return Scalar;
- return VectorType::get(Scalar, EC);
-}
-
-inline Type *ToVectorTy(Type *Scalar, unsigned VF) {
- return ToVectorTy(Scalar, ElementCount::getFixed(VF));
+ return VectorType::get(Scalar, EC);
}
+inline Type *ToVectorTy(Type *Scalar, unsigned VF) {
+ return ToVectorTy(Scalar, ElementCount::getFixed(VF));
+}
+
/// Identify if the intrinsic is trivially vectorizable.
/// This method returns true if the intrinsic's argument types are all scalars
/// for the scalar form of the intrinsic and all vectors (or scalars handled by
@@ -365,7 +365,7 @@ int getSplatIndex(ArrayRef<int> Mask);
/// Get splat value if the input is a splat vector or return nullptr.
/// The value may be extracted from a splat constants vector or from
/// a sequence of instructions that broadcast a single value into a vector.
-Value *getSplatValue(const Value *V);
+Value *getSplatValue(const Value *V);
/// Return true if each element of the vector value \p V is poisoned or equal to
/// every other non-poisoned element. If an index element is specified, either
@@ -551,20 +551,20 @@ createSequentialMask(unsigned Start, unsigned NumInts, unsigned NumUndefs);
/// elements, it will be padded with undefs.
Value *concatenateVectors(IRBuilderBase &Builder, ArrayRef<Value *> Vecs);
-/// Given a mask vector of i1, Return true if all of the elements of this
-/// predicate mask are known to be false or undef. That is, return true if all
-/// lanes can be assumed inactive.
+/// Given a mask vector of i1, Return true if all of the elements of this
+/// predicate mask are known to be false or undef. That is, return true if all
+/// lanes can be assumed inactive.
bool maskIsAllZeroOrUndef(Value *Mask);
-/// Given a mask vector of i1, Return true if all of the elements of this
-/// predicate mask are known to be true or undef. That is, return true if all
-/// lanes can be assumed active.
+/// Given a mask vector of i1, Return true if all of the elements of this
+/// predicate mask are known to be true or undef. That is, return true if all
+/// lanes can be assumed active.
bool maskIsAllOneOrUndef(Value *Mask);
/// Given a mask vector of the form <Y x i1>, return an APInt (of bitwidth Y)
/// for each lane which may be active.
APInt possiblyDemandedEltsInMask(Value *Mask);
-
+
/// The group of interleaved loads/stores sharing the same stride and
/// close to each other.
///
@@ -627,11 +627,11 @@ public:
return false;
int32_t Key = *MaybeKey;
- // Skip if the key is used for either the tombstone or empty special values.
- if (DenseMapInfo<int32_t>::getTombstoneKey() == Key ||
- DenseMapInfo<int32_t>::getEmptyKey() == Key)
- return false;
-
+ // Skip if the key is used for either the tombstone or empty special values.
+ if (DenseMapInfo<int32_t>::getTombstoneKey() == Key ||
+ DenseMapInfo<int32_t>::getEmptyKey() == Key)
+ return false;
+
// Skip if there is already a member with the same index.
if (Members.find(Key) != Members.end())
return false;
@@ -667,7 +667,7 @@ public:
/// \returns nullptr if contains no such member.
InstTy *getMember(uint32_t Index) const {
int32_t Key = SmallestKey + Index;
- return Members.lookup(Key);
+ return Members.lookup(Key);
}
/// Get the index for the given member. Unlike the key in the member
@@ -785,7 +785,7 @@ public:
/// \returns nullptr if doesn't have such group.
InterleaveGroup<Instruction> *
getInterleaveGroup(const Instruction *Instr) const {
- return InterleaveGroupMap.lookup(Instr);
+ return InterleaveGroupMap.lookup(Instr);
}
iterator_range<SmallPtrSetIterator<llvm::InterleaveGroup<Instruction> *>>