aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm12/include/llvm/Transforms/Utils
diff options
context:
space:
mode:
authorDevtools Arcadia <arcadia-devtools@yandex-team.ru>2022-02-07 18:08:42 +0300
committerDevtools Arcadia <arcadia-devtools@mous.vla.yp-c.yandex.net>2022-02-07 18:08:42 +0300
commit1110808a9d39d4b808aef724c861a2e1a38d2a69 (patch)
treee26c9fed0de5d9873cce7e00bc214573dc2195b7 /contrib/libs/llvm12/include/llvm/Transforms/Utils
downloadydb-1110808a9d39d4b808aef724c861a2e1a38d2a69.tar.gz
intermediate changes
ref:cde9a383711a11544ce7e107a78147fb96cc4029
Diffstat (limited to 'contrib/libs/llvm12/include/llvm/Transforms/Utils')
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/AMDGPUEmitPrintf.h36
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/ASanStackFrameLayout.h91
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/AddDiscriminators.h42
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/AssumeBundleBuilder.h71
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/BasicBlockUtils.h579
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/BreakCriticalEdges.h39
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/BuildLibCalls.h223
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/BypassSlowDivision.h85
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/CallGraphUpdater.h120
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/CallPromotionUtils.h89
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/CanonicalizeAliases.h42
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/CanonicalizeFreezeInLoops.h44
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/Cloning.h327
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/CodeExtractor.h246
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/CodeMoverUtils.h78
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/CtorUtils.h42
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/Debugify.h162
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/EntryExitInstrumenter.h46
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/EscapeEnumerator.h59
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/Evaluator.h143
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/FixIrreducible.h31
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/FunctionComparator.h403
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/FunctionImportUtils.h147
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/GlobalStatus.h95
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/GuardUtils.h55
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/InjectTLIMappings.h48
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/InstructionNamer.h31
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/IntegerDivision.h83
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/LCSSA.h54
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/LibCallsShrinkWrap.h37
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/Local.h502
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/LoopPeel.h51
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/LoopRotationUtils.h52
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/LoopSimplify.h81
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/LoopUtils.h498
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/LoopVersioning.h166
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/LowerInvoke.h40
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/LowerMemIntrinsics.h66
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/LowerSwitch.h37
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/MatrixUtils.h105
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/Mem2Reg.h41
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/MetaRenamer.h37
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/ModuleUtils.h134
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/NameAnonGlobals.h43
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/PredicateInfo.h252
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/PromoteMemToReg.h55
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/SSAUpdater.h187
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/SSAUpdaterBulk.h101
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/SSAUpdaterImpl.h478
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/SanitizerStats.h66
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h522
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/SimplifyCFGOptions.h88
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/SimplifyIndVar.h96
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/SimplifyLibCalls.h257
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/SizeOpts.h118
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/SplitModule.h53
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/StripGCRelocates.h36
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/StripNonLineTableDebugInfo.h37
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/SymbolRewriter.h152
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h56
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/UnifyLoopExits.h33
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/UniqueInternalLinkageNames.h42
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/UnrollLoop.h149
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/VNCoercion.h118
-rw-r--r--contrib/libs/llvm12/include/llvm/Transforms/Utils/ValueMapper.h292
65 files changed, 8589 insertions, 0 deletions
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/AMDGPUEmitPrintf.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/AMDGPUEmitPrintf.h
new file mode 100644
index 0000000000..514986ca40
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/AMDGPUEmitPrintf.h
@@ -0,0 +1,36 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- AMDGPUEmitPrintf.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Utility function to lower a printf call into a series of device
+// library calls on the AMDGPU target.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_AMDGPUEMITPRINTF_H
+#define LLVM_TRANSFORMS_UTILS_AMDGPUEMITPRINTF_H
+
+#include "llvm/IR/IRBuilder.h"
+
+namespace llvm {
+
+Value *emitAMDGPUPrintfCall(IRBuilder<> &Builder, ArrayRef<Value *> Args);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_AMDGPUEMITPRINTF_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/ASanStackFrameLayout.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/ASanStackFrameLayout.h
new file mode 100644
index 0000000000..7ead1e088c
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/ASanStackFrameLayout.h
@@ -0,0 +1,91 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- ASanStackFrameLayout.h - ComputeASanStackFrameLayout -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines ComputeASanStackFrameLayout and auxiliary data structs.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H
+#define LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+class AllocaInst;
+
+// These magic constants should be the same as in
+// in asan_internal.h from ASan runtime in compiler-rt.
+static const int kAsanStackLeftRedzoneMagic = 0xf1;
+static const int kAsanStackMidRedzoneMagic = 0xf2;
+static const int kAsanStackRightRedzoneMagic = 0xf3;
+static const int kAsanStackUseAfterReturnMagic = 0xf5;
+static const int kAsanStackUseAfterScopeMagic = 0xf8;
+
+// Input/output data struct for ComputeASanStackFrameLayout.
+struct ASanStackVariableDescription {
+ const char *Name; // Name of the variable that will be displayed by asan
+ // if a stack-related bug is reported.
+ uint64_t Size; // Size of the variable in bytes.
+ size_t LifetimeSize; // Size in bytes to use for lifetime analysis check.
+ // Will be rounded up to Granularity.
+ size_t Alignment; // Alignment of the variable (power of 2).
+ AllocaInst *AI; // The actual AllocaInst.
+ size_t Offset; // Offset from the beginning of the frame;
+ // set by ComputeASanStackFrameLayout.
+ unsigned Line; // Line number.
+};
+
+// Output data struct for ComputeASanStackFrameLayout.
+struct ASanStackFrameLayout {
+ size_t Granularity; // Shadow granularity.
+ size_t FrameAlignment; // Alignment for the entire frame.
+ size_t FrameSize; // Size of the frame in bytes.
+};
+
+ASanStackFrameLayout ComputeASanStackFrameLayout(
+ // The array of stack variables. The elements may get reordered and changed.
+ SmallVectorImpl<ASanStackVariableDescription> &Vars,
+ // AddressSanitizer's shadow granularity. Usually 8, may also be 16, 32, 64.
+ size_t Granularity,
+ // The minimal size of the left-most redzone (header).
+ // At least 4 pointer sizes, power of 2, and >= Granularity.
+ // The resulting FrameSize should be multiple of MinHeaderSize.
+ size_t MinHeaderSize);
+
+// Compute frame description, see DescribeAddressIfStack in ASan runtime.
+SmallString<64> ComputeASanStackFrameDescription(
+ const SmallVectorImpl<ASanStackVariableDescription> &Vars);
+
+// Returns shadow bytes with marked red zones. This shadow represents the state
+// if the stack frame when all local variables are inside of the own scope.
+SmallVector<uint8_t, 64>
+GetShadowBytes(const SmallVectorImpl<ASanStackVariableDescription> &Vars,
+ const ASanStackFrameLayout &Layout);
+
+// Returns shadow bytes with marked red zones and after scope. This shadow
+// represents the state if the stack frame when all local variables are outside
+// of the own scope.
+SmallVector<uint8_t, 64> GetShadowBytesAfterScope(
+ // The array of stack variables. The elements may get reordered and changed.
+ const SmallVectorImpl<ASanStackVariableDescription> &Vars,
+ const ASanStackFrameLayout &Layout);
+
+} // llvm namespace
+
+#endif // LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/AddDiscriminators.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/AddDiscriminators.h
new file mode 100644
index 0000000000..34dde3d45f
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/AddDiscriminators.h
@@ -0,0 +1,42 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- AddDiscriminators.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass adds DWARF discriminators to the IR. Path discriminators are used
+// to decide what CFG path was taken inside sub-graphs whose instructions share
+// the same line and column number information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_ADDDISCRIMINATORS_H
+#define LLVM_TRANSFORMS_UTILS_ADDDISCRIMINATORS_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+class AddDiscriminatorsPass : public PassInfoMixin<AddDiscriminatorsPass> {
+public:
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_ADDDISCRIMINATORS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/AssumeBundleBuilder.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/AssumeBundleBuilder.h
new file mode 100644
index 0000000000..0c49868ebf
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/AssumeBundleBuilder.h
@@ -0,0 +1,71 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- AssumeBundleBuilder.h - utils to build assume bundles ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contain tools to preserve informations. They should be used before
+// performing a transformation that may move and delete instructions as those
+// transformation may destroy or worsen information that can be derived from the
+// IR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_ASSUMEBUNDLEBUILDER_H
+#define LLVM_TRANSFORMS_UTILS_ASSUMEBUNDLEBUILDER_H
+
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class IntrinsicInst;
+class AssumptionCache;
+class DominatorTree;
+
+/// Build a call to llvm.assume to preserve informations that can be derived
+/// from the given instruction.
+/// If no information derived from \p I, this call returns null.
+/// The returned instruction is not inserted anywhere.
+IntrinsicInst *buildAssumeFromInst(Instruction *I);
+
+/// Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert
+/// if before I. This is usually what need to be done to salvage the knowledge
+/// contained in the instruction I.
+/// The AssumptionCache must be provided if it is available or the cache may
+/// become silently be invalid.
+/// The DominatorTree can optionally be provided to enable cross-block
+/// reasoning.
+void salvageKnowledge(Instruction *I, AssumptionCache *AC = nullptr,
+ DominatorTree *DT = nullptr);
+
+/// This pass attempts to minimize the number of assume without loosing any
+/// information.
+struct AssumeSimplifyPass : public PassInfoMixin<AssumeSimplifyPass> {
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+FunctionPass *createAssumeSimplifyPass();
+
+/// This pass will try to build an llvm.assume for every instruction in the
+/// function. Its main purpose is testing.
+struct AssumeBuilderPass : public PassInfoMixin<AssumeBuilderPass> {
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/BasicBlockUtils.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/BasicBlockUtils.h
new file mode 100644
index 0000000000..17376fd660
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -0,0 +1,579 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- Transform/Utils/BasicBlockUtils.h - BasicBlock Utils -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This family of functions perform manipulations on basic blocks, and
+// instructions contained within basic blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_BASICBLOCKUTILS_H
+#define LLVM_TRANSFORMS_UTILS_BASICBLOCKUTILS_H
+
+// FIXME: Move to this file: BasicBlock::removePredecessor, BB::splitBasicBlock
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Analysis/DomTreeUpdater.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/InstrTypes.h"
+#include <cassert>
+
+namespace llvm {
+
+class BlockFrequencyInfo;
+class BranchProbabilityInfo;
+class DominatorTree;
+class DomTreeUpdater;
+class Function;
+class Instruction;
+class LoopInfo;
+class MDNode;
+class MemoryDependenceResults;
+class MemorySSAUpdater;
+class PostDominatorTree;
+class ReturnInst;
+class TargetLibraryInfo;
+class Value;
+
+/// Replace contents of every block in \p BBs with single unreachable
+/// instruction. If \p Updates is specified, collect all necessary DT updates
+/// into this vector. If \p KeepOneInputPHIs is true, one-input Phis in
+/// successors of blocks being deleted will be preserved.
+void DetatchDeadBlocks(ArrayRef <BasicBlock *> BBs,
+ SmallVectorImpl<DominatorTree::UpdateType> *Updates,
+ bool KeepOneInputPHIs = false);
+
+/// Delete the specified block, which must have no predecessors.
+void DeleteDeadBlock(BasicBlock *BB, DomTreeUpdater *DTU = nullptr,
+ bool KeepOneInputPHIs = false);
+
+/// Delete the specified blocks from \p BB. The set of deleted blocks must have
+/// no predecessors that are not being deleted themselves. \p BBs must have no
+/// duplicating blocks. If there are loops among this set of blocks, all
+/// relevant loop info updates should be done before this function is called.
+/// If \p KeepOneInputPHIs is true, one-input Phis in successors of blocks
+/// being deleted will be preserved.
+void DeleteDeadBlocks(ArrayRef <BasicBlock *> BBs,
+ DomTreeUpdater *DTU = nullptr,
+ bool KeepOneInputPHIs = false);
+
+/// Delete all basic blocks from \p F that are not reachable from its entry
+/// node. If \p KeepOneInputPHIs is true, one-input Phis in successors of
+/// blocks being deleted will be preserved.
+bool EliminateUnreachableBlocks(Function &F, DomTreeUpdater *DTU = nullptr,
+ bool KeepOneInputPHIs = false);
+
+/// We know that BB has one predecessor. If there are any single-entry PHI nodes
+/// in it, fold them away. This handles the case when all entries to the PHI
+/// nodes in a block are guaranteed equal, such as when the block has exactly
+/// one predecessor.
+bool FoldSingleEntryPHINodes(BasicBlock *BB,
+ MemoryDependenceResults *MemDep = nullptr);
+
+/// Examine each PHI in the given block and delete it if it is dead. Also
+/// recursively delete any operands that become dead as a result. This includes
+/// tracing the def-use list from the PHI to see if it is ultimately unused or
+/// if it reaches an unused cycle. Return true if any PHIs were deleted.
+bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI = nullptr,
+ MemorySSAUpdater *MSSAU = nullptr);
+
+/// Attempts to merge a block into its predecessor, if possible. The return
+/// value indicates success or failure.
+/// By default do not merge blocks if BB's predecessor has multiple successors.
+/// If PredecessorWithTwoSuccessors = true, the blocks can only be merged
+/// if BB's Pred has a branch to BB and to AnotherBB, and BB has a single
+/// successor Sing. In this case the branch will be updated with Sing instead of
+/// BB, and BB will still be merged into its predecessor and removed.
+bool MergeBlockIntoPredecessor(BasicBlock *BB, DomTreeUpdater *DTU = nullptr,
+ LoopInfo *LI = nullptr,
+ MemorySSAUpdater *MSSAU = nullptr,
+ MemoryDependenceResults *MemDep = nullptr,
+ bool PredecessorWithTwoSuccessors = false);
+
+/// Merge block(s) sucessors, if possible. Return true if at least two
+/// of the blocks were merged together.
+/// In order to merge, each block must be terminated by an unconditional
+/// branch. If L is provided, then the blocks merged into their predecessors
+/// must be in L. In addition, This utility calls on another utility:
+/// MergeBlockIntoPredecessor. Blocks are successfully merged when the call to
+/// MergeBlockIntoPredecessor returns true.
+bool MergeBlockSuccessorsIntoGivenBlocks(
+ SmallPtrSetImpl<BasicBlock *> &MergeBlocks, Loop *L = nullptr,
+ DomTreeUpdater *DTU = nullptr, LoopInfo *LI = nullptr);
+
+/// Try to remove redundant dbg.value instructions from given basic block.
+/// Returns true if at least one instruction was removed.
+bool RemoveRedundantDbgInstrs(BasicBlock *BB);
+
+/// Replace all uses of an instruction (specified by BI) with a value, then
+/// remove and delete the original instruction.
+void ReplaceInstWithValue(BasicBlock::InstListType &BIL,
+ BasicBlock::iterator &BI, Value *V);
+
+/// Replace the instruction specified by BI with the instruction specified by I.
+/// Copies DebugLoc from BI to I, if I doesn't already have a DebugLoc. The
+/// original instruction is deleted and BI is updated to point to the new
+/// instruction.
+void ReplaceInstWithInst(BasicBlock::InstListType &BIL,
+ BasicBlock::iterator &BI, Instruction *I);
+
+/// Replace the instruction specified by From with the instruction specified by
+/// To. Copies DebugLoc from BI to I, if I doesn't already have a DebugLoc.
+void ReplaceInstWithInst(Instruction *From, Instruction *To);
+
+/// Option class for critical edge splitting.
+///
+/// This provides a builder interface for overriding the default options used
+/// during critical edge splitting.
+struct CriticalEdgeSplittingOptions {
+ DominatorTree *DT;
+ PostDominatorTree *PDT;
+ LoopInfo *LI;
+ MemorySSAUpdater *MSSAU;
+ bool MergeIdenticalEdges = false;
+ bool KeepOneInputPHIs = false;
+ bool PreserveLCSSA = false;
+ bool IgnoreUnreachableDests = false;
+ /// SplitCriticalEdge is guaranteed to preserve loop-simplify form if LI is
+ /// provided. If it cannot be preserved, no splitting will take place. If it
+ /// is not set, preserve loop-simplify form if possible.
+ bool PreserveLoopSimplify = true;
+
+ CriticalEdgeSplittingOptions(DominatorTree *DT = nullptr,
+ LoopInfo *LI = nullptr,
+ MemorySSAUpdater *MSSAU = nullptr,
+ PostDominatorTree *PDT = nullptr)
+ : DT(DT), PDT(PDT), LI(LI), MSSAU(MSSAU) {}
+
+ CriticalEdgeSplittingOptions &setMergeIdenticalEdges() {
+ MergeIdenticalEdges = true;
+ return *this;
+ }
+
+ CriticalEdgeSplittingOptions &setKeepOneInputPHIs() {
+ KeepOneInputPHIs = true;
+ return *this;
+ }
+
+ CriticalEdgeSplittingOptions &setPreserveLCSSA() {
+ PreserveLCSSA = true;
+ return *this;
+ }
+
+ CriticalEdgeSplittingOptions &setIgnoreUnreachableDests() {
+ IgnoreUnreachableDests = true;
+ return *this;
+ }
+
+ CriticalEdgeSplittingOptions &unsetPreserveLoopSimplify() {
+ PreserveLoopSimplify = false;
+ return *this;
+ }
+};
+
+/// If this edge is a critical edge, insert a new node to split the critical
+/// edge. This will update the analyses passed in through the option struct.
+/// This returns the new block if the edge was split, null otherwise.
+///
+/// If MergeIdenticalEdges in the options struct is true (not the default),
+/// *all* edges from TI to the specified successor will be merged into the same
+/// critical edge block. This is most commonly interesting with switch
+/// instructions, which may have many edges to any one destination. This
+/// ensures that all edges to that dest go to one block instead of each going
+/// to a different block, but isn't the standard definition of a "critical
+/// edge".
+///
+/// It is invalid to call this function on a critical edge that starts at an
+/// IndirectBrInst. Splitting these edges will almost always create an invalid
+/// program because the address of the new block won't be the one that is jumped
+/// to.
+BasicBlock *SplitCriticalEdge(Instruction *TI, unsigned SuccNum,
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions(),
+ const Twine &BBName = "");
+
+inline BasicBlock *
+SplitCriticalEdge(BasicBlock *BB, succ_iterator SI,
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions()) {
+ return SplitCriticalEdge(BB->getTerminator(), SI.getSuccessorIndex(),
+ Options);
+}
+
+/// If the edge from *PI to BB is not critical, return false. Otherwise, split
+/// all edges between the two blocks and return true. This updates all of the
+/// same analyses as the other SplitCriticalEdge function. If P is specified, it
+/// updates the analyses described above.
+inline bool SplitCriticalEdge(BasicBlock *Succ, pred_iterator PI,
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions()) {
+ bool MadeChange = false;
+ Instruction *TI = (*PI)->getTerminator();
+ for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
+ if (TI->getSuccessor(i) == Succ)
+ MadeChange |= !!SplitCriticalEdge(TI, i, Options);
+ return MadeChange;
+}
+
+/// If an edge from Src to Dst is critical, split the edge and return true,
+/// otherwise return false. This method requires that there be an edge between
+/// the two blocks. It updates the analyses passed in the options struct
+inline BasicBlock *
+SplitCriticalEdge(BasicBlock *Src, BasicBlock *Dst,
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions()) {
+ Instruction *TI = Src->getTerminator();
+ unsigned i = 0;
+ while (true) {
+ assert(i != TI->getNumSuccessors() && "Edge doesn't exist!");
+ if (TI->getSuccessor(i) == Dst)
+ return SplitCriticalEdge(TI, i, Options);
+ ++i;
+ }
+}
+
+/// Loop over all of the edges in the CFG, breaking critical edges as they are
+/// found. Returns the number of broken edges.
+unsigned SplitAllCriticalEdges(Function &F,
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions());
+
+/// Split the edge connecting the specified blocks, and return the newly created
+/// basic block between \p From and \p To.
+BasicBlock *SplitEdge(BasicBlock *From, BasicBlock *To,
+ DominatorTree *DT = nullptr, LoopInfo *LI = nullptr,
+ MemorySSAUpdater *MSSAU = nullptr,
+ const Twine &BBName = "");
+
+/// Split the specified block at the specified instruction.
+///
+/// If \p Before is true, splitBlockBefore handles the block
+/// splitting. Otherwise, execution proceeds as described below.
+///
+/// Everything before \p SplitPt stays in \p Old and everything starting with \p
+/// SplitPt moves to a new block. The two blocks are joined by an unconditional
+/// branch. The new block with name \p BBName is returned.
+///
+/// FIXME: deprecated, switch to the DomTreeUpdater-based one.
+BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt, DominatorTree *DT,
+ LoopInfo *LI = nullptr,
+ MemorySSAUpdater *MSSAU = nullptr,
+ const Twine &BBName = "", bool Before = false);
+
+/// Split the specified block at the specified instruction.
+///
+/// If \p Before is true, splitBlockBefore handles the block
+/// splitting. Otherwise, execution proceeds as described below.
+///
+/// Everything before \p SplitPt stays in \p Old and everything starting with \p
+/// SplitPt moves to a new block. The two blocks are joined by an unconditional
+/// branch. The new block with name \p BBName is returned.
+BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt,
+ DomTreeUpdater *DTU = nullptr, LoopInfo *LI = nullptr,
+ MemorySSAUpdater *MSSAU = nullptr,
+ const Twine &BBName = "", bool Before = false);
+
+/// Split the specified block at the specified instruction \p SplitPt.
+/// All instructions before \p SplitPt are moved to a new block and all
+/// instructions after \p SplitPt stay in the old block. The new block and the
+/// old block are joined by inserting an unconditional branch to the end of the
+/// new block. The new block with name \p BBName is returned.
+BasicBlock *splitBlockBefore(BasicBlock *Old, Instruction *SplitPt,
+ DomTreeUpdater *DTU, LoopInfo *LI,
+ MemorySSAUpdater *MSSAU, const Twine &BBName = "");
+
+/// This method introduces at least one new basic block into the function and
+/// moves some of the predecessors of BB to be predecessors of the new block.
+/// The new predecessors are indicated by the Preds array. The new block is
+/// given a suffix of 'Suffix'. Returns new basic block to which predecessors
+/// from Preds are now pointing.
+///
+/// If BB is a landingpad block then additional basicblock might be introduced.
+/// It will have Suffix+".split_lp". See SplitLandingPadPredecessors for more
+/// details on this case.
+///
+/// This currently updates the LLVM IR, DominatorTree, LoopInfo, and LCCSA but
+/// no other analyses. In particular, it does not preserve LoopSimplify
+/// (because it's complicated to handle the case where one of the edges being
+/// split is an exit of a loop with other exits).
+///
+/// FIXME: deprecated, switch to the DomTreeUpdater-based one.
+BasicBlock *SplitBlockPredecessors(BasicBlock *BB, ArrayRef<BasicBlock *> Preds,
+ const char *Suffix, DominatorTree *DT,
+ LoopInfo *LI = nullptr,
+ MemorySSAUpdater *MSSAU = nullptr,
+ bool PreserveLCSSA = false);
+
+/// This method introduces at least one new basic block into the function and
+/// moves some of the predecessors of BB to be predecessors of the new block.
+/// The new predecessors are indicated by the Preds array. The new block is
+/// given a suffix of 'Suffix'. Returns new basic block to which predecessors
+/// from Preds are now pointing.
+///
+/// If BB is a landingpad block then additional basicblock might be introduced.
+/// It will have Suffix+".split_lp". See SplitLandingPadPredecessors for more
+/// details on this case.
+///
+/// This currently updates the LLVM IR, DominatorTree, LoopInfo, and LCCSA but
+/// no other analyses. In particular, it does not preserve LoopSimplify
+/// (because it's complicated to handle the case where one of the edges being
+/// split is an exit of a loop with other exits).
+BasicBlock *SplitBlockPredecessors(BasicBlock *BB, ArrayRef<BasicBlock *> Preds,
+ const char *Suffix,
+ DomTreeUpdater *DTU = nullptr,
+ LoopInfo *LI = nullptr,
+ MemorySSAUpdater *MSSAU = nullptr,
+ bool PreserveLCSSA = false);
+
+/// This method transforms the landing pad, OrigBB, by introducing two new basic
+/// blocks into the function. One of those new basic blocks gets the
+/// predecessors listed in Preds. The other basic block gets the remaining
+/// predecessors of OrigBB. The landingpad instruction OrigBB is clone into both
+/// of the new basic blocks. The new blocks are given the suffixes 'Suffix1' and
+/// 'Suffix2', and are returned in the NewBBs vector.
+///
+/// This currently updates the LLVM IR, DominatorTree, LoopInfo, and LCCSA but
+/// no other analyses. In particular, it does not preserve LoopSimplify
+/// (because it's complicated to handle the case where one of the edges being
+/// split is an exit of a loop with other exits).
+///
+/// FIXME: deprecated, switch to the DomTreeUpdater-based one.
+void SplitLandingPadPredecessors(BasicBlock *OrigBB,
+ ArrayRef<BasicBlock *> Preds,
+ const char *Suffix, const char *Suffix2,
+ SmallVectorImpl<BasicBlock *> &NewBBs,
+ DominatorTree *DT, LoopInfo *LI = nullptr,
+ MemorySSAUpdater *MSSAU = nullptr,
+ bool PreserveLCSSA = false);
+
+/// This method transforms the landing pad, OrigBB, by introducing two new basic
+/// blocks into the function. One of those new basic blocks gets the
+/// predecessors listed in Preds. The other basic block gets the remaining
+/// predecessors of OrigBB. The landingpad instruction OrigBB is clone into both
+/// of the new basic blocks. The new blocks are given the suffixes 'Suffix1' and
+/// 'Suffix2', and are returned in the NewBBs vector.
+///
+/// This currently updates the LLVM IR, DominatorTree, LoopInfo, and LCCSA but
+/// no other analyses. In particular, it does not preserve LoopSimplify
+/// (because it's complicated to handle the case where one of the edges being
+/// split is an exit of a loop with other exits).
+void SplitLandingPadPredecessors(
+ BasicBlock *OrigBB, ArrayRef<BasicBlock *> Preds, const char *Suffix,
+ const char *Suffix2, SmallVectorImpl<BasicBlock *> &NewBBs,
+ DomTreeUpdater *DTU = nullptr, LoopInfo *LI = nullptr,
+ MemorySSAUpdater *MSSAU = nullptr, bool PreserveLCSSA = false);
+
+/// This method duplicates the specified return instruction into a predecessor
+/// which ends in an unconditional branch. If the return instruction returns a
+/// value defined by a PHI, propagate the right value into the return. It
+/// returns the new return instruction in the predecessor.
+ReturnInst *FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
+ BasicBlock *Pred,
+ DomTreeUpdater *DTU = nullptr);
+
+/// Split the containing block at the specified instruction - everything before
+/// SplitBefore stays in the old basic block, and the rest of the instructions
+/// in the BB are moved to a new block. The two blocks are connected by a
+/// conditional branch (with value of Cmp being the condition).
+/// Before:
+/// Head
+/// SplitBefore
+/// Tail
+/// After:
+/// Head
+/// if (Cond)
+/// ThenBlock
+/// SplitBefore
+/// Tail
+///
+/// If \p ThenBlock is not specified, a new block will be created for it.
+/// If \p Unreachable is true, the newly created block will end with
+/// UnreachableInst, otherwise it branches to Tail.
+/// Returns the NewBasicBlock's terminator.
+///
+/// Updates DT and LI if given.
+///
+/// FIXME: deprecated, switch to the DomTreeUpdater-based one.
+Instruction *SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore,
+ bool Unreachable, MDNode *BranchWeights,
+ DominatorTree *DT,
+ LoopInfo *LI = nullptr,
+ BasicBlock *ThenBlock = nullptr);
+
+/// Split the containing block at the specified instruction - everything before
+/// SplitBefore stays in the old basic block, and the rest of the instructions
+/// in the BB are moved to a new block. The two blocks are connected by a
+/// conditional branch (with value of Cmp being the condition).
+/// Before:
+/// Head
+/// SplitBefore
+/// Tail
+/// After:
+/// Head
+/// if (Cond)
+/// ThenBlock
+/// SplitBefore
+/// Tail
+///
+/// If \p ThenBlock is not specified, a new block will be created for it.
+/// If \p Unreachable is true, the newly created block will end with
+/// UnreachableInst, otherwise it branches to Tail.
+/// Returns the NewBasicBlock's terminator.
+///
+/// Updates DT and LI if given.
+Instruction *SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore,
+ bool Unreachable,
+ MDNode *BranchWeights = nullptr,
+ DomTreeUpdater *DTU = nullptr,
+ LoopInfo *LI = nullptr,
+ BasicBlock *ThenBlock = nullptr);
+
+/// SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen,
+/// but also creates the ElseBlock.
+/// Before:
+/// Head
+/// SplitBefore
+/// Tail
+/// After:
+/// Head
+/// if (Cond)
+/// ThenBlock
+/// else
+/// ElseBlock
+/// SplitBefore
+/// Tail
+void SplitBlockAndInsertIfThenElse(Value *Cond, Instruction *SplitBefore,
+ Instruction **ThenTerm,
+ Instruction **ElseTerm,
+ MDNode *BranchWeights = nullptr);
+
+/// Check whether BB is the merge point of a if-region.
+/// If so, return the boolean condition that determines which entry into
+/// BB will be taken. Also, return by references the block that will be
+/// entered from if the condition is true, and the block that will be
+/// entered if the condition is false.
+///
+/// This does no checking to see if the true/false blocks have large or unsavory
+/// instructions in them.
+Value *GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue,
+ BasicBlock *&IfFalse);
+
+// Split critical edges where the source of the edge is an indirectbr
+// instruction. This isn't always possible, but we can handle some easy cases.
+// This is useful because MI is unable to split such critical edges,
+// which means it will not be able to sink instructions along those edges.
+// This is especially painful for indirect branches with many successors, where
+// we end up having to prepare all outgoing values in the origin block.
+//
+// Our normal algorithm for splitting critical edges requires us to update
+// the outgoing edges of the edge origin block, but for an indirectbr this
+// is hard, since it would require finding and updating the block addresses
+// the indirect branch uses. But if a block only has a single indirectbr
+// predecessor, with the others being regular branches, we can do it in a
+// different way.
+// Say we have A -> D, B -> D, I -> D where only I -> D is an indirectbr.
+// We can split D into D0 and D1, where D0 contains only the PHIs from D,
+// and D1 is the D block body. We can then duplicate D0 as D0A and D0B, and
+// create the following structure:
+// A -> D0A, B -> D0A, I -> D0B, D0A -> D1, D0B -> D1
+// If BPI and BFI aren't non-null, BPI/BFI will be updated accordingly.
+bool SplitIndirectBrCriticalEdges(Function &F,
+ BranchProbabilityInfo *BPI = nullptr,
+ BlockFrequencyInfo *BFI = nullptr);
+
+/// Given a set of incoming and outgoing blocks, create a "hub" such that every
+/// edge from an incoming block InBB to an outgoing block OutBB is now split
+/// into two edges, one from InBB to the hub and another from the hub to
+/// OutBB. The hub consists of a series of guard blocks, one for each outgoing
+/// block. Each guard block conditionally branches to the corresponding outgoing
+/// block, or the next guard block in the chain. These guard blocks are returned
+/// in the argument vector.
+///
+/// Since the control flow edges from InBB to OutBB have now been replaced, the
+/// function also updates any PHINodes in OutBB. For each such PHINode, the
+/// operands corresponding to incoming blocks are moved to a new PHINode in the
+/// hub, and the hub is made an operand of the original PHINode.
+///
+/// Input CFG:
+/// ----------
+///
+/// Def
+/// |
+/// v
+/// In1 In2
+/// | |
+/// | |
+/// v v
+/// Foo ---> Out1 Out2
+/// |
+/// v
+/// Use
+///
+///
+/// Create hub: Incoming = {In1, In2}, Outgoing = {Out1, Out2}
+/// ----------------------------------------------------------
+///
+/// Def
+/// |
+/// v
+/// In1 In2 Foo
+/// | Hub | |
+/// | + - - | - - + |
+/// | ' v ' V
+/// +------> Guard1 -----> Out1
+/// ' | '
+/// ' v '
+/// ' Guard2 -----> Out2
+/// ' ' |
+/// + - - - - - + |
+/// v
+/// Use
+///
+/// Limitations:
+/// -----------
+/// 1. This assumes that all terminators in the CFG are direct branches (the
+/// "br" instruction). The presence of any other control flow such as
+/// indirectbr, switch or callbr will cause an assert.
+///
+/// 2. The updates to the PHINodes are not sufficient to restore SSA
+/// form. Consider a definition Def, its use Use, incoming block In2 and
+/// outgoing block Out2, such that:
+/// a. In2 is reachable from D or contains D.
+/// b. U is reachable from Out2 or is contained in Out2.
+/// c. U is not a PHINode if U is contained in Out2.
+///
+/// Clearly, Def dominates Out2 since the program is valid SSA. But when the
+/// hub is introduced, there is a new path through the hub along which Use is
+/// reachable from entry without passing through Def, and SSA is no longer
+/// valid. To fix this, we need to look at all the blocks post-dominated by
+/// the hub on the one hand, and dominated by Out2 on the other. This is left
+/// for the caller to accomplish, since each specific use of this function
+/// may have additional information which simplifies this fixup. For example,
+/// see restoreSSA() in the UnifyLoopExits pass.
+BasicBlock *CreateControlFlowHub(DomTreeUpdater *DTU,
+ SmallVectorImpl<BasicBlock *> &GuardBlocks,
+ const SetVector<BasicBlock *> &Predecessors,
+ const SetVector<BasicBlock *> &Successors,
+ const StringRef Prefix);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_BASICBLOCKUTILS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/BreakCriticalEdges.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/BreakCriticalEdges.h
new file mode 100644
index 0000000000..ec1db7f00f
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/BreakCriticalEdges.h
@@ -0,0 +1,39 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- BreakCriticalEdges.h - Critical Edge Elimination Pass --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// BreakCriticalEdges pass - Break all of the critical edges in the CFG by
+// inserting a dummy basic block. This pass may be "required" by passes that
+// cannot deal with critical edges. For this usage, the structure type is
+// forward declared. This pass obviously invalidates the CFG, but can update
+// dominator trees.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_BREAKCRITICALEDGES_H
+#define LLVM_TRANSFORMS_UTILS_BREAKCRITICALEDGES_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+struct BreakCriticalEdgesPass : public PassInfoMixin<BreakCriticalEdgesPass> {
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // namespace llvm
+#endif // LLVM_TRANSFORMS_UTILS_BREAKCRITICALEDGES_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/BuildLibCalls.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/BuildLibCalls.h
new file mode 100644
index 0000000000..edb268a5b1
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/BuildLibCalls.h
@@ -0,0 +1,223 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- BuildLibCalls.h - Utility builder for libcalls -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes an interface to build some C language libcalls for
+// optimization passes that need to call the various functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_BUILDLIBCALLS_H
+#define LLVM_TRANSFORMS_UTILS_BUILDLIBCALLS_H
+
+#include "llvm/Analysis/TargetLibraryInfo.h"
+
+namespace llvm {
+ class Value;
+ class DataLayout;
+ class IRBuilderBase;
+
+ /// Analyze the name and prototype of the given function and set any
+ /// applicable attributes.
+ /// If the library function is unavailable, this doesn't modify it.
+ ///
+ /// Returns true if any attributes were set and false otherwise.
+ bool inferLibFuncAttributes(Function &F, const TargetLibraryInfo &TLI);
+ bool inferLibFuncAttributes(Module *M, StringRef Name, const TargetLibraryInfo &TLI);
+
+ /// Check whether the overloaded floating point function
+ /// corresponding to \a Ty is available.
+ bool hasFloatFn(const TargetLibraryInfo *TLI, Type *Ty,
+ LibFunc DoubleFn, LibFunc FloatFn, LibFunc LongDoubleFn);
+
+ /// Get the name of the overloaded floating point function
+ /// corresponding to \a Ty.
+ StringRef getFloatFnName(const TargetLibraryInfo *TLI, Type *Ty,
+ LibFunc DoubleFn, LibFunc FloatFn,
+ LibFunc LongDoubleFn);
+
+ /// Return V if it is an i8*, otherwise cast it to i8*.
+ Value *castToCStr(Value *V, IRBuilderBase &B);
+
+ /// Emit a call to the strlen function to the builder, for the specified
+ /// pointer. Ptr is required to be some pointer type, and the return value has
+ /// 'intptr_t' type.
+ Value *emitStrLen(Value *Ptr, IRBuilderBase &B, const DataLayout &DL,
+ const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the strdup function to the builder, for the specified
+ /// pointer. Ptr is required to be some pointer type, and the return value has
+ /// 'i8*' type.
+ Value *emitStrDup(Value *Ptr, IRBuilderBase &B, const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the strnlen function to the builder, for the specified
+ /// pointer. Ptr is required to be some pointer type, MaxLen must be of size_t
+ /// type, and the return value has 'intptr_t' type.
+ Value *emitStrNLen(Value *Ptr, Value *MaxLen, IRBuilderBase &B,
+ const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the strchr function to the builder, for the specified
+ /// pointer and character. Ptr is required to be some pointer type, and the
+ /// return value has 'i8*' type.
+ Value *emitStrChr(Value *Ptr, char C, IRBuilderBase &B,
+ const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the strncmp function to the builder.
+ Value *emitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilderBase &B,
+ const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the strcpy function to the builder, for the specified
+ /// pointer arguments.
+ Value *emitStrCpy(Value *Dst, Value *Src, IRBuilderBase &B,
+ const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the stpcpy function to the builder, for the specified
+ /// pointer arguments.
+ Value *emitStpCpy(Value *Dst, Value *Src, IRBuilderBase &B,
+ const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the strncpy function to the builder, for the specified
+ /// pointer arguments and length.
+ Value *emitStrNCpy(Value *Dst, Value *Src, Value *Len, IRBuilderBase &B,
+ const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the stpncpy function to the builder, for the specified
+ /// pointer arguments and length.
+ Value *emitStpNCpy(Value *Dst, Value *Src, Value *Len, IRBuilderBase &B,
+ const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the __memcpy_chk function to the builder. This expects that
+ /// the Len and ObjSize have type 'intptr_t' and Dst/Src are pointers.
+ Value *emitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
+ IRBuilderBase &B, const DataLayout &DL,
+ const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the mempcpy function.
+ Value *emitMemPCpy(Value *Dst, Value *Src, Value *Len, IRBuilderBase &B,
+ const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the memchr function. This assumes that Ptr is a pointer,
+ /// Val is an i32 value, and Len is an 'intptr_t' value.
+ Value *emitMemChr(Value *Ptr, Value *Val, Value *Len, IRBuilderBase &B,
+ const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the memcmp function.
+ Value *emitMemCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilderBase &B,
+ const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the bcmp function.
+ Value *emitBCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilderBase &B,
+ const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the memccpy function.
+ Value *emitMemCCpy(Value *Ptr1, Value *Ptr2, Value *Val, Value *Len,
+ IRBuilderBase &B, const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the snprintf function.
+ Value *emitSNPrintf(Value *Dest, Value *Size, Value *Fmt,
+ ArrayRef<Value *> Args, IRBuilderBase &B,
+ const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the sprintf function.
+ Value *emitSPrintf(Value *Dest, Value *Fmt, ArrayRef<Value *> VariadicArgs,
+ IRBuilderBase &B, const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the strcat function.
+ Value *emitStrCat(Value *Dest, Value *Src, IRBuilderBase &B,
+ const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the strlcpy function.
+ Value *emitStrLCpy(Value *Dest, Value *Src, Value *Size, IRBuilderBase &B,
+ const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the strlcat function.
+ Value *emitStrLCat(Value *Dest, Value *Src, Value *Size, IRBuilderBase &B,
+ const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the strncat function.
+ Value *emitStrNCat(Value *Dest, Value *Src, Value *Size, IRBuilderBase &B,
+ const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the vsnprintf function.
+ Value *emitVSNPrintf(Value *Dest, Value *Size, Value *Fmt, Value *VAList,
+ IRBuilderBase &B, const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the vsprintf function.
+ Value *emitVSPrintf(Value *Dest, Value *Fmt, Value *VAList, IRBuilderBase &B,
+ const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the unary function named 'Name' (e.g. 'floor'). This
+ /// function is known to take a single of type matching 'Op' and returns one
+ /// value with the same type. If 'Op' is a long double, 'l' is added as the
+ /// suffix of name, if 'Op' is a float, we add a 'f' suffix.
+ Value *emitUnaryFloatFnCall(Value *Op, StringRef Name, IRBuilderBase &B,
+ const AttributeList &Attrs);
+
+ /// Emit a call to the unary function DoubleFn, FloatFn or LongDoubleFn,
+ /// depending of the type of Op.
+ Value *emitUnaryFloatFnCall(Value *Op, const TargetLibraryInfo *TLI,
+ LibFunc DoubleFn, LibFunc FloatFn,
+ LibFunc LongDoubleFn, IRBuilderBase &B,
+ const AttributeList &Attrs);
+
+ /// Emit a call to the binary function named 'Name' (e.g. 'fmin'). This
+ /// function is known to take type matching 'Op1' and 'Op2' and return one
+ /// value with the same type. If 'Op1/Op2' are long double, 'l' is added as
+ /// the suffix of name, if 'Op1/Op2' are float, we add a 'f' suffix.
+ Value *emitBinaryFloatFnCall(Value *Op1, Value *Op2, StringRef Name,
+ IRBuilderBase &B, const AttributeList &Attrs);
+
+ /// Emit a call to the binary function DoubleFn, FloatFn or LongDoubleFn,
+ /// depending of the type of Op1.
+ Value *emitBinaryFloatFnCall(Value *Op1, Value *Op2,
+ const TargetLibraryInfo *TLI, LibFunc DoubleFn,
+ LibFunc FloatFn, LibFunc LongDoubleFn,
+ IRBuilderBase &B, const AttributeList &Attrs);
+
+ /// Emit a call to the putchar function. This assumes that Char is an integer.
+ Value *emitPutChar(Value *Char, IRBuilderBase &B,
+ const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the puts function. This assumes that Str is some pointer.
+ Value *emitPutS(Value *Str, IRBuilderBase &B, const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the fputc function. This assumes that Char is an i32, and
+ /// File is a pointer to FILE.
+ Value *emitFPutC(Value *Char, Value *File, IRBuilderBase &B,
+ const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the fputs function. Str is required to be a pointer and
+ /// File is a pointer to FILE.
+ Value *emitFPutS(Value *Str, Value *File, IRBuilderBase &B,
+ const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the fwrite function. This assumes that Ptr is a pointer,
+ /// Size is an 'intptr_t', and File is a pointer to FILE.
+ Value *emitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilderBase &B,
+ const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the malloc function.
+ Value *emitMalloc(Value *Num, IRBuilderBase &B, const DataLayout &DL,
+ const TargetLibraryInfo *TLI);
+
+ /// Emit a call to the calloc function.
+ Value *emitCalloc(Value *Num, Value *Size, const AttributeList &Attrs,
+ IRBuilderBase &B, const TargetLibraryInfo &TLI);
+}
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/BypassSlowDivision.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/BypassSlowDivision.h
new file mode 100644
index 0000000000..60d3101d08
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/BypassSlowDivision.h
@@ -0,0 +1,85 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/Transforms/Utils/BypassSlowDivision.h ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an optimization for div and rem on architectures that
+// execute short instructions significantly faster than longer instructions.
+// For example, on Intel Atom 32-bit divides are slow enough that during
+// runtime it is profitable to check the value of the operands, and if they are
+// positive and less than 256 use an unsigned 8-bit divide.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H
+#define LLVM_TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/IR/ValueHandle.h"
+#include <cstdint>
+
+namespace llvm {
+
+class BasicBlock;
+class Value;
+
+struct DivRemMapKey {
+ bool SignedOp;
+ AssertingVH<Value> Dividend;
+ AssertingVH<Value> Divisor;
+
+ DivRemMapKey() = default;
+
+ DivRemMapKey(bool InSignedOp, Value *InDividend, Value *InDivisor)
+ : SignedOp(InSignedOp), Dividend(InDividend), Divisor(InDivisor) {}
+};
+
+template <> struct DenseMapInfo<DivRemMapKey> {
+ static bool isEqual(const DivRemMapKey &Val1, const DivRemMapKey &Val2) {
+ return Val1.SignedOp == Val2.SignedOp && Val1.Dividend == Val2.Dividend &&
+ Val1.Divisor == Val2.Divisor;
+ }
+
+ static DivRemMapKey getEmptyKey() {
+ return DivRemMapKey(false, nullptr, nullptr);
+ }
+
+ static DivRemMapKey getTombstoneKey() {
+ return DivRemMapKey(true, nullptr, nullptr);
+ }
+
+ static unsigned getHashValue(const DivRemMapKey &Val) {
+ return (unsigned)(reinterpret_cast<uintptr_t>(
+ static_cast<Value *>(Val.Dividend)) ^
+ reinterpret_cast<uintptr_t>(
+ static_cast<Value *>(Val.Divisor))) ^
+ (unsigned)Val.SignedOp;
+ }
+};
+
+/// This optimization identifies DIV instructions in a BB that can be
+/// profitably bypassed and carried out with a shorter, faster divide.
+///
+/// This optimization may add basic blocks immediately after BB; for obvious
+/// reasons, you shouldn't pass those blocks to bypassSlowDivision.
+bool bypassSlowDivision(
+ BasicBlock *BB, const DenseMap<unsigned int, unsigned int> &BypassWidth);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/CallGraphUpdater.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/CallGraphUpdater.h
new file mode 100644
index 0000000000..52b14df3ed
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/CallGraphUpdater.h
@@ -0,0 +1,120 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- CallGraphUpdater.h - A (lazy) call graph update helper ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file provides interfaces used to manipulate a call graph, regardless
+/// if it is a "old style" CallGraph or an "new style" LazyCallGraph.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CALLGRAPHUPDATER_H
+#define LLVM_TRANSFORMS_UTILS_CALLGRAPHUPDATER_H
+
+#include "llvm/Analysis/CGSCCPassManager.h"
+#include "llvm/Analysis/CallGraph.h"
+#include "llvm/Analysis/CallGraphSCCPass.h"
+#include "llvm/Analysis/LazyCallGraph.h"
+
+namespace llvm {
+
+/// Wrapper to unify "old style" CallGraph and "new style" LazyCallGraph. This
+/// simplifies the interface and the call sites, e.g., new and old pass manager
+/// passes can share the same code.
+class CallGraphUpdater {
+ /// Containers for functions which we did replace or want to delete when
+ /// `finalize` is called. This can happen explicitly or as part of the
+ /// destructor. Dead functions in comdat sections are tracked separately
+ /// because a function with discardable linakage in a COMDAT should only
+ /// be dropped if the entire COMDAT is dropped, see git ac07703842cf.
+ ///{
+ SmallPtrSet<Function *, 16> ReplacedFunctions;
+ SmallVector<Function *, 16> DeadFunctions;
+ SmallVector<Function *, 16> DeadFunctionsInComdats;
+ ///}
+
+ /// Old PM variables
+ ///{
+ CallGraph *CG = nullptr;
+ CallGraphSCC *CGSCC = nullptr;
+ ///}
+
+ /// New PM variables
+ ///{
+ LazyCallGraph *LCG = nullptr;
+ LazyCallGraph::SCC *SCC = nullptr;
+ CGSCCAnalysisManager *AM = nullptr;
+ CGSCCUpdateResult *UR = nullptr;
+ FunctionAnalysisManager *FAM = nullptr;
+ ///}
+
+public:
+ CallGraphUpdater() {}
+ ~CallGraphUpdater() { finalize(); }
+
+ /// Initializers for usage outside of a CGSCC pass, inside a CGSCC pass in
+ /// the old and new pass manager (PM).
+ ///{
+ void initialize(CallGraph &CG, CallGraphSCC &SCC) {
+ this->CG = &CG;
+ this->CGSCC = &SCC;
+ }
+ void initialize(LazyCallGraph &LCG, LazyCallGraph::SCC &SCC,
+ CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR) {
+ this->LCG = &LCG;
+ this->SCC = &SCC;
+ this->AM = &AM;
+ this->UR = &UR;
+ FAM =
+ &AM.getResult<FunctionAnalysisManagerCGSCCProxy>(SCC, LCG).getManager();
+ }
+ ///}
+
+ /// Finalizer that will trigger actions like function removal from the CG.
+ bool finalize();
+
+ /// Remove \p Fn from the call graph.
+ void removeFunction(Function &Fn);
+
+ /// After an CGSCC pass changes a function in ways that affect the call
+ /// graph, this method can be called to update it.
+ void reanalyzeFunction(Function &Fn);
+
+ /// If a new function was created by outlining, this method can be called
+ /// to update the call graph for the new function. Note that the old one
+ /// still needs to be re-analyzed or manually updated.
+ void registerOutlinedFunction(Function &OriginalFn, Function &NewFn);
+
+ /// Replace \p OldFn in the call graph (and SCC) with \p NewFn. The uses
+ /// outside the call graph and the function \p OldFn are not modified.
+ /// Note that \p OldFn is also removed from the call graph
+ /// (\see removeFunction).
+ void replaceFunctionWith(Function &OldFn, Function &NewFn);
+
+ /// Remove the call site \p CS from the call graph.
+ void removeCallSite(CallBase &CS);
+
+ /// Replace \p OldCS with the new call site \p NewCS.
+ /// \return True if the replacement was successful, otherwise False. In the
+ /// latter case the parent function of \p OldCB needs to be re-analyzed.
+ bool replaceCallSite(CallBase &OldCS, CallBase &NewCS);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_CALLGRAPHUPDATER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/CallPromotionUtils.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/CallPromotionUtils.h
new file mode 100644
index 0000000000..a2d5d1108b
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/CallPromotionUtils.h
@@ -0,0 +1,89 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- CallPromotionUtils.h - Utilities for call promotion ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares utilities useful for promoting indirect call sites to
+// direct call sites.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CALLPROMOTIONUTILS_H
+#define LLVM_TRANSFORMS_UTILS_CALLPROMOTIONUTILS_H
+
+namespace llvm {
+class CallBase;
+class CastInst;
+class Function;
+class MDNode;
+
+/// Return true if the given indirect call site can be made to call \p Callee.
+///
+/// This function ensures that the number and type of the call site's arguments
+/// and return value match those of the given function. If the types do not
+/// match exactly, they must at least be bitcast compatible. If \p FailureReason
+/// is non-null and the indirect call cannot be promoted, the failure reason
+/// will be stored in it.
+bool isLegalToPromote(const CallBase &CB, Function *Callee,
+ const char **FailureReason = nullptr);
+
+/// Promote the given indirect call site to unconditionally call \p Callee.
+///
+/// This function promotes the given call site, returning the direct call or
+/// invoke instruction. If the function type of the call site doesn't match that
+/// of the callee, bitcast instructions are inserted where appropriate. If \p
+/// RetBitCast is non-null, it will be used to store the return value bitcast,
+/// if created.
+CallBase &promoteCall(CallBase &CB, Function *Callee,
+ CastInst **RetBitCast = nullptr);
+
+/// Promote the given indirect call site to conditionally call \p Callee.
+///
+/// This function creates an if-then-else structure at the location of the call
+/// site. The original call site is moved into the "else" block. A clone of the
+/// indirect call site is promoted, placed in the "then" block, and returned. If
+/// \p BranchWeights is non-null, it will be used to set !prof metadata on the
+/// new conditional branch.
+CallBase &promoteCallWithIfThenElse(CallBase &CB, Function *Callee,
+ MDNode *BranchWeights = nullptr);
+
+/// Try to promote (devirtualize) a virtual call on an Alloca. Return true on
+/// success.
+///
+/// Look for a pattern like:
+///
+/// %o = alloca %class.Impl
+/// %1 = getelementptr %class.Impl, %class.Impl* %o, i64 0, i32 0, i32 0
+/// store i32 (...)** bitcast (i8** getelementptr inbounds
+/// ({ [3 x i8*] }, { [3 x i8*] }* @_ZTV4Impl, i64 0, inrange i32 0, i64 2)
+/// to i32 (...)**), i32 (...)*** %1
+/// %2 = getelementptr inbounds %class.Impl, %class.Impl* %o, i64 0, i32 0
+/// %3 = bitcast %class.Interface* %2 to void (%class.Interface*)***
+/// %vtable.i = load void (%class.Interface*)**, void (%class.Interface*)*** %3
+/// %4 = load void (%class.Interface*)*, void (%class.Interface*)** %vtable.i
+/// call void %4(%class.Interface* nonnull %2)
+///
+/// @_ZTV4Impl = linkonce_odr dso_local unnamed_addr constant { [3 x i8*] }
+/// { [3 x i8*]
+/// [i8* null, i8* bitcast ({ i8*, i8*, i8* }* @_ZTI4Impl to i8*),
+/// i8* bitcast (void (%class.Impl*)* @_ZN4Impl3RunEv to i8*)] }
+///
+bool tryPromoteCall(CallBase &CB);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_CALLPROMOTIONUTILS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/CanonicalizeAliases.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/CanonicalizeAliases.h
new file mode 100644
index 0000000000..7cd6a1e827
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/CanonicalizeAliases.h
@@ -0,0 +1,42 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- CanonicalizeAliases.h - Alias Canonicalization Pass -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file canonicalizes aliases.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CANONICALIZE_ALIASES_H
+#define LLVM_TRANSFORMS_UTILS_CANONICALIZE_ALIASES_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Simple pass that canonicalizes aliases.
+class CanonicalizeAliasesPass : public PassInfoMixin<CanonicalizeAliasesPass> {
+public:
+ CanonicalizeAliasesPass() = default;
+
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_CANONICALIZE_ALIASESH
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/CanonicalizeFreezeInLoops.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/CanonicalizeFreezeInLoops.h
new file mode 100644
index 0000000000..59e205a1cc
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/CanonicalizeFreezeInLoops.h
@@ -0,0 +1,44 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//==- CanonicalizeFreezeInLoop.h - Canonicalize freezes in a loop-*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file canonicalizes freeze instructions in a loop.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CANONICALIZE_FREEZES_IN_LOOPS_H
+#define LLVM_TRANSFORMS_UTILS_CANONICALIZE_FREEZES_IN_LOOPS_H
+
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class LPMUpdater;
+
+/// A pass that canonicalizes freeze instructions in a loop.
+class CanonicalizeFreezeInLoopsPass
+ : public PassInfoMixin<CanonicalizeFreezeInLoopsPass> {
+public:
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_CANONICALIZE_FREEZES_IN_LOOPS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/Cloning.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/Cloning.h
new file mode 100644
index 0000000000..55e8d5d0b2
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/Cloning.h
@@ -0,0 +1,327 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- Cloning.h - Clone various parts of LLVM programs ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines various functions that are used to clone chunks of LLVM
+// code for various purposes. This varies from copying whole modules into new
+// modules, to cloning functions with different arguments, to inlining
+// functions, to copying basic blocks to support loop unrolling or superblock
+// formation, etc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CLONING_H
+#define LLVM_TRANSFORMS_UTILS_CLONING_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/InlineCost.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+#include <functional>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+
+class AAResults;
+class AllocaInst;
+class BasicBlock;
+class BlockFrequencyInfo;
+class CallInst;
+class CallGraph;
+class DebugInfoFinder;
+class DominatorTree;
+class Function;
+class Instruction;
+class InvokeInst;
+class Loop;
+class LoopInfo;
+class Module;
+class ProfileSummaryInfo;
+class ReturnInst;
+class DomTreeUpdater;
+
+/// Return an exact copy of the specified module
+std::unique_ptr<Module> CloneModule(const Module &M);
+std::unique_ptr<Module> CloneModule(const Module &M, ValueToValueMapTy &VMap);
+
+/// Return a copy of the specified module. The ShouldCloneDefinition function
+/// controls whether a specific GlobalValue's definition is cloned. If the
+/// function returns false, the module copy will contain an external reference
+/// in place of the global definition.
+std::unique_ptr<Module>
+CloneModule(const Module &M, ValueToValueMapTy &VMap,
+ function_ref<bool(const GlobalValue *)> ShouldCloneDefinition);
+
+/// This struct can be used to capture information about code
+/// being cloned, while it is being cloned.
+struct ClonedCodeInfo {
+ /// This is set to true if the cloned code contains a normal call instruction.
+ bool ContainsCalls = false;
+
+ /// This is set to true if the cloned code contains a 'dynamic' alloca.
+ /// Dynamic allocas are allocas that are either not in the entry block or they
+ /// are in the entry block but are not a constant size.
+ bool ContainsDynamicAllocas = false;
+
+ /// All cloned call sites that have operand bundles attached are appended to
+ /// this vector. This vector may contain nulls or undefs if some of the
+ /// originally inserted callsites were DCE'ed after they were cloned.
+ std::vector<WeakTrackingVH> OperandBundleCallSites;
+
+ ClonedCodeInfo() = default;
+};
+
+/// Return a copy of the specified basic block, but without
+/// embedding the block into a particular function. The block returned is an
+/// exact copy of the specified basic block, without any remapping having been
+/// performed. Because of this, this is only suitable for applications where
+/// the basic block will be inserted into the same function that it was cloned
+/// from (loop unrolling would use this, for example).
+///
+/// Also, note that this function makes a direct copy of the basic block, and
+/// can thus produce illegal LLVM code. In particular, it will copy any PHI
+/// nodes from the original block, even though there are no predecessors for the
+/// newly cloned block (thus, phi nodes will have to be updated). Also, this
+/// block will branch to the old successors of the original block: these
+/// successors will have to have any PHI nodes updated to account for the new
+/// incoming edges.
+///
+/// The correlation between instructions in the source and result basic blocks
+/// is recorded in the VMap map.
+///
+/// If you have a particular suffix you'd like to use to add to any cloned
+/// names, specify it as the optional third parameter.
+///
+/// If you would like the basic block to be auto-inserted into the end of a
+/// function, you can specify it as the optional fourth parameter.
+///
+/// If you would like to collect additional information about the cloned
+/// function, you can specify a ClonedCodeInfo object with the optional fifth
+/// parameter.
+BasicBlock *CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap,
+ const Twine &NameSuffix = "", Function *F = nullptr,
+ ClonedCodeInfo *CodeInfo = nullptr,
+ DebugInfoFinder *DIFinder = nullptr);
+
+/// Return a copy of the specified function and add it to that
+/// function's module. Also, any references specified in the VMap are changed
+/// to refer to their mapped value instead of the original one. If any of the
+/// arguments to the function are in the VMap, the arguments are deleted from
+/// the resultant function. The VMap is updated to include mappings from all of
+/// the instructions and basicblocks in the function from their old to new
+/// values. The final argument captures information about the cloned code if
+/// non-null.
+///
+/// VMap contains no non-identity GlobalValue mappings and debug info metadata
+/// will not be cloned.
+///
+Function *CloneFunction(Function *F, ValueToValueMapTy &VMap,
+ ClonedCodeInfo *CodeInfo = nullptr);
+
+/// Clone OldFunc into NewFunc, transforming the old arguments into references
+/// to VMap values. Note that if NewFunc already has basic blocks, the ones
+/// cloned into it will be added to the end of the function. This function
+/// fills in a list of return instructions, and can optionally remap types
+/// and/or append the specified suffix to all values cloned.
+///
+/// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue
+/// mappings.
+///
+void CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
+ ValueToValueMapTy &VMap, bool ModuleLevelChanges,
+ SmallVectorImpl<ReturnInst*> &Returns,
+ const char *NameSuffix = "",
+ ClonedCodeInfo *CodeInfo = nullptr,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr);
+
+void CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc,
+ const Instruction *StartingInst,
+ ValueToValueMapTy &VMap, bool ModuleLevelChanges,
+ SmallVectorImpl<ReturnInst *> &Returns,
+ const char *NameSuffix = "",
+ ClonedCodeInfo *CodeInfo = nullptr);
+
+/// This works exactly like CloneFunctionInto,
+/// except that it does some simple constant prop and DCE on the fly. The
+/// effect of this is to copy significantly less code in cases where (for
+/// example) a function call with constant arguments is inlined, and those
+/// constant arguments cause a significant amount of code in the callee to be
+/// dead. Since this doesn't produce an exactly copy of the input, it can't be
+/// used for things like CloneFunction or CloneModule.
+///
+/// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue
+/// mappings.
+///
+void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
+ ValueToValueMapTy &VMap, bool ModuleLevelChanges,
+ SmallVectorImpl<ReturnInst*> &Returns,
+ const char *NameSuffix = "",
+ ClonedCodeInfo *CodeInfo = nullptr,
+ Instruction *TheCall = nullptr);
+
+/// This class captures the data input to the InlineFunction call, and records
+/// the auxiliary results produced by it.
+class InlineFunctionInfo {
+public:
+ explicit InlineFunctionInfo(
+ CallGraph *cg = nullptr,
+ function_ref<AssumptionCache &(Function &)> GetAssumptionCache = nullptr,
+ ProfileSummaryInfo *PSI = nullptr,
+ BlockFrequencyInfo *CallerBFI = nullptr,
+ BlockFrequencyInfo *CalleeBFI = nullptr)
+ : CG(cg), GetAssumptionCache(GetAssumptionCache), PSI(PSI),
+ CallerBFI(CallerBFI), CalleeBFI(CalleeBFI) {}
+
+ /// If non-null, InlineFunction will update the callgraph to reflect the
+ /// changes it makes.
+ CallGraph *CG;
+ function_ref<AssumptionCache &(Function &)> GetAssumptionCache;
+ ProfileSummaryInfo *PSI;
+ BlockFrequencyInfo *CallerBFI, *CalleeBFI;
+
+ /// InlineFunction fills this in with all static allocas that get copied into
+ /// the caller.
+ SmallVector<AllocaInst *, 4> StaticAllocas;
+
+ /// InlineFunction fills this in with callsites that were inlined from the
+ /// callee. This is only filled in if CG is non-null.
+ SmallVector<WeakTrackingVH, 8> InlinedCalls;
+
+ /// All of the new call sites inlined into the caller.
+ ///
+ /// 'InlineFunction' fills this in by scanning the inlined instructions, and
+ /// only if CG is null. If CG is non-null, instead the value handle
+ /// `InlinedCalls` above is used.
+ SmallVector<CallBase *, 8> InlinedCallSites;
+
+ void reset() {
+ StaticAllocas.clear();
+ InlinedCalls.clear();
+ InlinedCallSites.clear();
+ }
+};
+
+/// This function inlines the called function into the basic
+/// block of the caller. This returns false if it is not possible to inline
+/// this call. The program is still in a well defined state if this occurs
+/// though.
+///
+/// Note that this only does one level of inlining. For example, if the
+/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
+/// exists in the instruction stream. Similarly this will inline a recursive
+/// function by one level.
+///
+/// Note that while this routine is allowed to cleanup and optimize the
+/// *inlined* code to minimize the actual inserted code, it must not delete
+/// code in the caller as users of this routine may have pointers to
+/// instructions in the caller that need to remain stable.
+///
+/// If ForwardVarArgsTo is passed, inlining a function with varargs is allowed
+/// and all varargs at the callsite will be passed to any calls to
+/// ForwardVarArgsTo. The caller of InlineFunction has to make sure any varargs
+/// are only used by ForwardVarArgsTo.
+InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
+ AAResults *CalleeAAR = nullptr,
+ bool InsertLifetime = true,
+ Function *ForwardVarArgsTo = nullptr);
+
+/// Clones a loop \p OrigLoop. Returns the loop and the blocks in \p
+/// Blocks.
+///
+/// Updates LoopInfo and DominatorTree assuming the loop is dominated by block
+/// \p LoopDomBB. Insert the new blocks before block specified in \p Before.
+/// Note: Only innermost loops are supported.
+Loop *cloneLoopWithPreheader(BasicBlock *Before, BasicBlock *LoopDomBB,
+ Loop *OrigLoop, ValueToValueMapTy &VMap,
+ const Twine &NameSuffix, LoopInfo *LI,
+ DominatorTree *DT,
+ SmallVectorImpl<BasicBlock *> &Blocks);
+
+/// Remaps instructions in \p Blocks using the mapping in \p VMap.
+void remapInstructionsInBlocks(const SmallVectorImpl<BasicBlock *> &Blocks,
+ ValueToValueMapTy &VMap);
+
+/// Split edge between BB and PredBB and duplicate all non-Phi instructions
+/// from BB between its beginning and the StopAt instruction into the split
+/// block. Phi nodes are not duplicated, but their uses are handled correctly:
+/// we replace them with the uses of corresponding Phi inputs. ValueMapping
+/// is used to map the original instructions from BB to their newly-created
+/// copies. Returns the split block.
+BasicBlock *DuplicateInstructionsInSplitBetween(BasicBlock *BB,
+ BasicBlock *PredBB,
+ Instruction *StopAt,
+ ValueToValueMapTy &ValueMapping,
+ DomTreeUpdater &DTU);
+
+/// Updates profile information by adjusting the entry count by adding
+/// entryDelta then scaling callsite information by the new count divided by the
+/// old count. VMap is used during inlinng to also update the new clone
+void updateProfileCallee(
+ Function *Callee, int64_t entryDelta,
+ const ValueMap<const Value *, WeakTrackingVH> *VMap = nullptr);
+
+/// Find the 'llvm.experimental.noalias.scope.decl' intrinsics in the specified
+/// basic blocks and extract their scope. These are candidates for duplication
+/// when cloning.
+void identifyNoAliasScopesToClone(
+ ArrayRef<BasicBlock *> BBs, SmallVectorImpl<MDNode *> &NoAliasDeclScopes);
+
+/// Find the 'llvm.experimental.noalias.scope.decl' intrinsics in the specified
+/// instruction range and extract their scope. These are candidates for
+/// duplication when cloning.
+void identifyNoAliasScopesToClone(
+ BasicBlock::iterator Start, BasicBlock::iterator End,
+ SmallVectorImpl<MDNode *> &NoAliasDeclScopes);
+
+/// Duplicate the specified list of noalias decl scopes.
+/// The 'Ext' string is added as an extension to the name.
+/// Afterwards, the ClonedScopes contains the mapping of the original scope
+/// MDNode onto the cloned scope.
+/// Be aware that the cloned scopes are still part of the original scope domain.
+void cloneNoAliasScopes(
+ ArrayRef<MDNode *> NoAliasDeclScopes,
+ DenseMap<MDNode *, MDNode *> &ClonedScopes,
+ StringRef Ext, LLVMContext &Context);
+
+/// Adapt the metadata for the specified instruction according to the
+/// provided mapping. This is normally used after cloning an instruction, when
+/// some noalias scopes needed to be cloned.
+void adaptNoAliasScopes(
+ llvm::Instruction *I, const DenseMap<MDNode *, MDNode *> &ClonedScopes,
+ LLVMContext &Context);
+
+/// Clone the specified noalias decl scopes. Then adapt all instructions in the
+/// NewBlocks basicblocks to the cloned versions.
+/// 'Ext' will be added to the duplicate scope names.
+void cloneAndAdaptNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes,
+ ArrayRef<BasicBlock *> NewBlocks,
+ LLVMContext &Context, StringRef Ext);
+
+/// Clone the specified noalias decl scopes. Then adapt all instructions in the
+/// [IStart, IEnd] (IEnd included !) range to the cloned versions. 'Ext' will be
+/// added to the duplicate scope names.
+void cloneAndAdaptNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes,
+ Instruction *IStart, Instruction *IEnd,
+ LLVMContext &Context, StringRef Ext);
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_CLONING_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/CodeExtractor.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/CodeExtractor.h
new file mode 100644
index 0000000000..7d4f6938cd
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/CodeExtractor.h
@@ -0,0 +1,246 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- Transform/Utils/CodeExtractor.h - Code extraction util ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A utility to support extracting code from one function into its own
+// stand-alone function.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CODEEXTRACTOR_H
+#define LLVM_TRANSFORMS_UTILS_CODEEXTRACTOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include <limits>
+
+namespace llvm {
+
+class AllocaInst;
+class BasicBlock;
+class BlockFrequency;
+class BlockFrequencyInfo;
+class BranchProbabilityInfo;
+class AssumptionCache;
+class CallInst;
+class DominatorTree;
+class Function;
+class Instruction;
+class Loop;
+class Module;
+class Type;
+class Value;
+
+/// A cache for the CodeExtractor analysis. The operation \ref
+/// CodeExtractor::extractCodeRegion is guaranteed not to invalidate this
+/// object. This object should conservatively be considered invalid if any
+/// other mutating operations on the IR occur.
+///
+/// Constructing this object is O(n) in the size of the function.
+class CodeExtractorAnalysisCache {
+ /// The allocas in the function.
+ SmallVector<AllocaInst *, 16> Allocas;
+
+ /// Base memory addresses of load/store instructions, grouped by block.
+ DenseMap<BasicBlock *, DenseSet<Value *>> BaseMemAddrs;
+
+ /// Blocks which contain instructions which may have unknown side-effects
+ /// on memory.
+ DenseSet<BasicBlock *> SideEffectingBlocks;
+
+ void findSideEffectInfoForBlock(BasicBlock &BB);
+
+public:
+ CodeExtractorAnalysisCache(Function &F);
+
+ /// Get the allocas in the function at the time the analysis was created.
+ /// Note that some of these allocas may no longer be present in the function,
+ /// due to \ref CodeExtractor::extractCodeRegion.
+ ArrayRef<AllocaInst *> getAllocas() const { return Allocas; }
+
+ /// Check whether \p BB contains an instruction thought to load from, store
+ /// to, or otherwise clobber the alloca \p Addr.
+ bool doesBlockContainClobberOfAddr(BasicBlock &BB, AllocaInst *Addr) const;
+};
+
+ /// Utility class for extracting code into a new function.
+ ///
+ /// This utility provides a simple interface for extracting some sequence of
+ /// code into its own function, replacing it with a call to that function. It
+ /// also provides various methods to query about the nature and result of
+ /// such a transformation.
+ ///
+ /// The rough algorithm used is:
+ /// 1) Find both the inputs and outputs for the extracted region.
+ /// 2) Pass the inputs as arguments, remapping them within the extracted
+ /// function to arguments.
+ /// 3) Add allocas for any scalar outputs, adding all of the outputs' allocas
+ /// as arguments, and inserting stores to the arguments for any scalars.
+ class CodeExtractor {
+ using ValueSet = SetVector<Value *>;
+
+ // Various bits of state computed on construction.
+ DominatorTree *const DT;
+ const bool AggregateArgs;
+ BlockFrequencyInfo *BFI;
+ BranchProbabilityInfo *BPI;
+ AssumptionCache *AC;
+
+ // If true, varargs functions can be extracted.
+ bool AllowVarArgs;
+
+ // Bits of intermediate state computed at various phases of extraction.
+ SetVector<BasicBlock *> Blocks;
+ unsigned NumExitBlocks = std::numeric_limits<unsigned>::max();
+ Type *RetTy;
+
+ // Suffix to use when creating extracted function (appended to the original
+ // function name + "."). If empty, the default is to use the entry block
+ // label, if non-empty, otherwise "extracted".
+ std::string Suffix;
+
+ public:
+ /// Create a code extractor for a sequence of blocks.
+ ///
+ /// Given a sequence of basic blocks where the first block in the sequence
+ /// dominates the rest, prepare a code extractor object for pulling this
+ /// sequence out into its new function. When a DominatorTree is also given,
+ /// extra checking and transformations are enabled. If AllowVarArgs is true,
+ /// vararg functions can be extracted. This is safe, if all vararg handling
+ /// code is extracted, including vastart. If AllowAlloca is true, then
+ /// extraction of blocks containing alloca instructions would be possible,
+ /// however code extractor won't validate whether extraction is legal.
+ CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT = nullptr,
+ bool AggregateArgs = false, BlockFrequencyInfo *BFI = nullptr,
+ BranchProbabilityInfo *BPI = nullptr,
+ AssumptionCache *AC = nullptr,
+ bool AllowVarArgs = false, bool AllowAlloca = false,
+ std::string Suffix = "");
+
+ /// Create a code extractor for a loop body.
+ ///
+ /// Behaves just like the generic code sequence constructor, but uses the
+ /// block sequence of the loop.
+ CodeExtractor(DominatorTree &DT, Loop &L, bool AggregateArgs = false,
+ BlockFrequencyInfo *BFI = nullptr,
+ BranchProbabilityInfo *BPI = nullptr,
+ AssumptionCache *AC = nullptr,
+ std::string Suffix = "");
+
+ /// Perform the extraction, returning the new function.
+ ///
+ /// Returns zero when called on a CodeExtractor instance where isEligible
+ /// returns false.
+ Function *extractCodeRegion(const CodeExtractorAnalysisCache &CEAC);
+
+ /// Verify that assumption cache isn't stale after a region is extracted.
+ /// Returns true when verifier finds errors. AssumptionCache is passed as
+ /// parameter to make this function stateless.
+ static bool verifyAssumptionCache(const Function &OldFunc,
+ const Function &NewFunc,
+ AssumptionCache *AC);
+
+ /// Test whether this code extractor is eligible.
+ ///
+ /// Based on the blocks used when constructing the code extractor,
+ /// determine whether it is eligible for extraction.
+ ///
+ /// Checks that varargs handling (with vastart and vaend) is only done in
+ /// the outlined blocks.
+ bool isEligible() const;
+
+ /// Compute the set of input values and output values for the code.
+ ///
+ /// These can be used either when performing the extraction or to evaluate
+ /// the expected size of a call to the extracted function. Note that this
+ /// work cannot be cached between the two as once we decide to extract
+ /// a code sequence, that sequence is modified, including changing these
+ /// sets, before extraction occurs. These modifications won't have any
+ /// significant impact on the cost however.
+ void findInputsOutputs(ValueSet &Inputs, ValueSet &Outputs,
+ const ValueSet &Allocas) const;
+
+ /// Check if life time marker nodes can be hoisted/sunk into the outline
+ /// region.
+ ///
+ /// Returns true if it is safe to do the code motion.
+ bool
+ isLegalToShrinkwrapLifetimeMarkers(const CodeExtractorAnalysisCache &CEAC,
+ Instruction *AllocaAddr) const;
+
+ /// Find the set of allocas whose life ranges are contained within the
+ /// outlined region.
+ ///
+ /// Allocas which have life_time markers contained in the outlined region
+ /// should be pushed to the outlined function. The address bitcasts that
+ /// are used by the lifetime markers are also candidates for shrink-
+ /// wrapping. The instructions that need to be sunk are collected in
+ /// 'Allocas'.
+ void findAllocas(const CodeExtractorAnalysisCache &CEAC,
+ ValueSet &SinkCands, ValueSet &HoistCands,
+ BasicBlock *&ExitBlock) const;
+
+ /// Find or create a block within the outline region for placing hoisted
+ /// code.
+ ///
+ /// CommonExitBlock is block outside the outline region. It is the common
+ /// successor of blocks inside the region. If there exists a single block
+ /// inside the region that is the predecessor of CommonExitBlock, that block
+ /// will be returned. Otherwise CommonExitBlock will be split and the
+ /// original block will be added to the outline region.
+ BasicBlock *findOrCreateBlockForHoisting(BasicBlock *CommonExitBlock);
+
+ private:
+ struct LifetimeMarkerInfo {
+ bool SinkLifeStart = false;
+ bool HoistLifeEnd = false;
+ Instruction *LifeStart = nullptr;
+ Instruction *LifeEnd = nullptr;
+ };
+
+ LifetimeMarkerInfo
+ getLifetimeMarkers(const CodeExtractorAnalysisCache &CEAC,
+ Instruction *Addr, BasicBlock *ExitBlock) const;
+
+ void severSplitPHINodesOfEntry(BasicBlock *&Header);
+ void severSplitPHINodesOfExits(const SmallPtrSetImpl<BasicBlock *> &Exits);
+ void splitReturnBlocks();
+
+ Function *constructFunction(const ValueSet &inputs,
+ const ValueSet &outputs,
+ BasicBlock *header,
+ BasicBlock *newRootNode, BasicBlock *newHeader,
+ Function *oldFunction, Module *M);
+
+ void moveCodeToFunction(Function *newFunction);
+
+ void calculateNewCallTerminatorWeights(
+ BasicBlock *CodeReplacer,
+ DenseMap<BasicBlock *, BlockFrequency> &ExitWeights,
+ BranchProbabilityInfo *BPI);
+
+ CallInst *emitCallAndSwitchStatement(Function *newFunction,
+ BasicBlock *newHeader,
+ ValueSet &inputs, ValueSet &outputs);
+ };
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_CODEEXTRACTOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/CodeMoverUtils.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/CodeMoverUtils.h
new file mode 100644
index 0000000000..81436fd675
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/CodeMoverUtils.h
@@ -0,0 +1,78 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- Transform/Utils/CodeMoverUtils.h - CodeMover Utils -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This family of functions determine movements are safe on basic blocks, and
+// instructions contained within a function.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CODEMOVERUTILS_H
+#define LLVM_TRANSFORMS_UTILS_CODEMOVERUTILS_H
+
+namespace llvm {
+
+class BasicBlock;
+class DependenceInfo;
+class DominatorTree;
+class Instruction;
+class PostDominatorTree;
+
+/// Return true if \p I0 and \p I1 are control flow equivalent.
+/// Two instructions are control flow equivalent if their basic blocks are
+/// control flow equivalent.
+bool isControlFlowEquivalent(const Instruction &I0, const Instruction &I1,
+ const DominatorTree &DT,
+ const PostDominatorTree &PDT);
+
+/// Return true if \p BB0 and \p BB1 are control flow equivalent.
+/// Two basic blocks are control flow equivalent if when one executes, the other
+/// is guaranteed to execute.
+bool isControlFlowEquivalent(const BasicBlock &BB0, const BasicBlock &BB1,
+ const DominatorTree &DT,
+ const PostDominatorTree &PDT);
+
+/// Return true if \p I can be safely moved before \p InsertPoint.
+bool isSafeToMoveBefore(Instruction &I, Instruction &InsertPoint,
+ DominatorTree &DT,
+ const PostDominatorTree *PDT = nullptr,
+ DependenceInfo *DI = nullptr);
+
+/// Return true if all instructions (except the terminator) in \p BB can be
+/// safely moved before \p InsertPoint.
+bool isSafeToMoveBefore(BasicBlock &BB, Instruction &InsertPoint,
+ DominatorTree &DT,
+ const PostDominatorTree *PDT = nullptr,
+ DependenceInfo *DI = nullptr);
+
+/// Move instructions, in an order-preserving manner, from \p FromBB to the
+/// beginning of \p ToBB when proven safe.
+void moveInstructionsToTheBeginning(BasicBlock &FromBB, BasicBlock &ToBB,
+ DominatorTree &DT,
+ const PostDominatorTree &PDT,
+ DependenceInfo &DI);
+
+/// Move instructions, in an order-preserving manner, from \p FromBB to the end
+/// of \p ToBB when proven safe.
+void moveInstructionsToTheEnd(BasicBlock &FromBB, BasicBlock &ToBB,
+ DominatorTree &DT, const PostDominatorTree &PDT,
+ DependenceInfo &DI);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_CODEMOVERUTILS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/CtorUtils.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/CtorUtils.h
new file mode 100644
index 0000000000..4ec952672f
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/CtorUtils.h
@@ -0,0 +1,42 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- CtorUtils.h - Helpers for working with global_ctors ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functions that are used to process llvm.global_ctors.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CTORUTILS_H
+#define LLVM_TRANSFORMS_UTILS_CTORUTILS_H
+
+#include "llvm/ADT/STLExtras.h"
+
+namespace llvm {
+
+class GlobalVariable;
+class Function;
+class Module;
+
+/// Call "ShouldRemove" for every entry in M's global_ctor list and remove the
+/// entries for which it returns true. Return true if anything changed.
+bool optimizeGlobalCtorsList(Module &M,
+ function_ref<bool(Function *)> ShouldRemove);
+
+} // End llvm namespace
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/Debugify.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/Debugify.h
new file mode 100644
index 0000000000..7fc4011991
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/Debugify.h
@@ -0,0 +1,162 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- Debugify.h - Attach synthetic debug info to everything -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file Interface to the `debugify` synthetic debug info testing utility.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORM_UTILS_DEBUGIFY_H
+#define LLVM_TRANSFORM_UTILS_DEBUGIFY_H
+
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Bitcode/BitcodeWriterPass.h"
+#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class DIBuilder;
+
+/// Add synthesized debug information to a module.
+///
+/// \param M The module to add debug information to.
+/// \param Functions A range of functions to add debug information to.
+/// \param Banner A prefix string to add to debug/error messages.
+/// \param ApplyToMF A call back that will add debug information to the
+/// MachineFunction for a Function. If nullptr, then the
+/// MachineFunction (if any) will not be modified.
+bool applyDebugifyMetadata(
+ Module &M, iterator_range<Module::iterator> Functions, StringRef Banner,
+ std::function<bool(DIBuilder &, Function &)> ApplyToMF);
+
+/// Strip out all of the metadata and debug info inserted by debugify. If no
+/// llvm.debugify module-level named metadata is present, this is a no-op.
+/// Returns true if any change was made.
+bool stripDebugifyMetadata(Module &M);
+
+llvm::ModulePass *createDebugifyModulePass();
+llvm::FunctionPass *createDebugifyFunctionPass();
+
+struct NewPMDebugifyPass : public llvm::PassInfoMixin<NewPMDebugifyPass> {
+ llvm::PreservedAnalyses run(llvm::Module &M, llvm::ModuleAnalysisManager &AM);
+};
+
+/// Track how much `debugify` information has been lost.
+struct DebugifyStatistics {
+ /// Number of missing dbg.values.
+ unsigned NumDbgValuesMissing = 0;
+
+ /// Number of dbg.values expected.
+ unsigned NumDbgValuesExpected = 0;
+
+ /// Number of instructions with empty debug locations.
+ unsigned NumDbgLocsMissing = 0;
+
+ /// Number of instructions expected to have debug locations.
+ unsigned NumDbgLocsExpected = 0;
+
+ /// Get the ratio of missing/expected dbg.values.
+ float getMissingValueRatio() const {
+ return float(NumDbgValuesMissing) / float(NumDbgLocsExpected);
+ }
+
+ /// Get the ratio of missing/expected instructions with locations.
+ float getEmptyLocationRatio() const {
+ return float(NumDbgLocsMissing) / float(NumDbgLocsExpected);
+ }
+};
+
+/// Map pass names to a per-pass DebugifyStatistics instance.
+using DebugifyStatsMap = llvm::MapVector<llvm::StringRef, DebugifyStatistics>;
+
+void exportDebugifyStats(StringRef Path, const DebugifyStatsMap &Map);
+
+llvm::ModulePass *
+createCheckDebugifyModulePass(bool Strip = false,
+ llvm::StringRef NameOfWrappedPass = "",
+ DebugifyStatsMap *StatsMap = nullptr);
+
+llvm::FunctionPass *
+createCheckDebugifyFunctionPass(bool Strip = false,
+ llvm::StringRef NameOfWrappedPass = "",
+ DebugifyStatsMap *StatsMap = nullptr);
+
+struct NewPMCheckDebugifyPass
+ : public llvm::PassInfoMixin<NewPMCheckDebugifyPass> {
+ llvm::PreservedAnalyses run(llvm::Module &M, llvm::ModuleAnalysisManager &AM);
+};
+
+struct DebugifyEachInstrumentation {
+ DebugifyStatsMap StatsMap;
+
+ void registerCallbacks(PassInstrumentationCallbacks &PIC);
+};
+
+/// DebugifyCustomPassManager wraps each pass with the debugify passes if
+/// needed.
+/// NOTE: We support legacy custom pass manager only.
+/// TODO: Add New PM support for custom pass manager.
+class DebugifyCustomPassManager : public legacy::PassManager {
+ DebugifyStatsMap DIStatsMap;
+ bool EnableDebugifyEach = false;
+
+public:
+ using super = legacy::PassManager;
+
+ void add(Pass *P) override {
+ // Wrap each pass with (-check)-debugify passes if requested, making
+ // exceptions for passes which shouldn't see -debugify instrumentation.
+ bool WrapWithDebugify = EnableDebugifyEach && !P->getAsImmutablePass() &&
+ !isIRPrintingPass(P) && !isBitcodeWriterPass(P);
+ if (!WrapWithDebugify) {
+ super::add(P);
+ return;
+ }
+
+ // Apply -debugify/-check-debugify before/after each pass and collect
+ // debug info loss statistics.
+ PassKind Kind = P->getPassKind();
+ StringRef Name = P->getPassName();
+
+ // TODO: Implement Debugify for LoopPass.
+ switch (Kind) {
+ case PT_Function:
+ super::add(createDebugifyFunctionPass());
+ super::add(P);
+ super::add(createCheckDebugifyFunctionPass(true, Name, &DIStatsMap));
+ break;
+ case PT_Module:
+ super::add(createDebugifyModulePass());
+ super::add(P);
+ super::add(createCheckDebugifyModulePass(true, Name, &DIStatsMap));
+ break;
+ default:
+ super::add(P);
+ break;
+ }
+ }
+
+ void enableDebugifyEach() { EnableDebugifyEach = true; }
+
+ const DebugifyStatsMap &getDebugifyStatsMap() const { return DIStatsMap; }
+};
+} // namespace llvm
+
+#endif // LLVM_TRANSFORM_UTILS_DEBUGIFY_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/EntryExitInstrumenter.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/EntryExitInstrumenter.h
new file mode 100644
index 0000000000..f21da73112
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/EntryExitInstrumenter.h
@@ -0,0 +1,46 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- EntryExitInstrumenter.h - Function Entry/Exit Instrumentation ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// EntryExitInstrumenter pass - Instrument function entry/exit with calls to
+// mcount(), @__cyg_profile_func_{enter,exit} and the like. There are two
+// variants, intended to run pre- and post-inlining, respectively.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_ENTRYEXITINSTRUMENTER_H
+#define LLVM_TRANSFORMS_UTILS_ENTRYEXITINSTRUMENTER_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+struct EntryExitInstrumenterPass
+ : public PassInfoMixin<EntryExitInstrumenterPass> {
+ EntryExitInstrumenterPass(bool PostInlining) : PostInlining(PostInlining) {}
+
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+ bool PostInlining;
+};
+
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_ENTRYEXITINSTRUMENTER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/EscapeEnumerator.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/EscapeEnumerator.h
new file mode 100644
index 0000000000..7ae235cbd1
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/EscapeEnumerator.h
@@ -0,0 +1,59 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- EscapeEnumerator.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines a helper class that enumerates all possible exits from a function,
+// including exception handling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_ESCAPEENUMERATOR_H
+#define LLVM_TRANSFORMS_UTILS_ESCAPEENUMERATOR_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+
+namespace llvm {
+
+/// EscapeEnumerator - This is a little algorithm to find all escape points
+/// from a function so that "finally"-style code can be inserted. In addition
+/// to finding the existing return and unwind instructions, it also (if
+/// necessary) transforms any call instructions into invokes and sends them to
+/// a landing pad.
+class EscapeEnumerator {
+ Function &F;
+ const char *CleanupBBName;
+
+ Function::iterator StateBB, StateE;
+ IRBuilder<> Builder;
+ bool Done;
+ bool HandleExceptions;
+
+public:
+ EscapeEnumerator(Function &F, const char *N = "cleanup",
+ bool HandleExceptions = true)
+ : F(F), CleanupBBName(N), StateBB(F.begin()), StateE(F.end()),
+ Builder(F.getContext()), Done(false),
+ HandleExceptions(HandleExceptions) {}
+
+ IRBuilder<> *Next();
+};
+
+}
+
+#endif // LLVM_TRANSFORMS_UTILS_ESCAPEENUMERATOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/Evaluator.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/Evaluator.h
new file mode 100644
index 0000000000..2e3402554a
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/Evaluator.h
@@ -0,0 +1,143 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- Evaluator.h - LLVM IR evaluator --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Function evaluator for LLVM IR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_EVALUATOR_H
+#define LLVM_TRANSFORMS_UTILS_EVALUATOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include <cassert>
+#include <deque>
+#include <memory>
+
+namespace llvm {
+
+class DataLayout;
+class Function;
+class TargetLibraryInfo;
+
+/// This class evaluates LLVM IR, producing the Constant representing each SSA
+/// instruction. Changes to global variables are stored in a mapping that can
+/// be iterated over after the evaluation is complete. Once an evaluation call
+/// fails, the evaluation object should not be reused.
+class Evaluator {
+public:
+ Evaluator(const DataLayout &DL, const TargetLibraryInfo *TLI)
+ : DL(DL), TLI(TLI) {
+ ValueStack.emplace_back();
+ }
+
+ ~Evaluator() {
+ for (auto &Tmp : AllocaTmps)
+ // If there are still users of the alloca, the program is doing something
+ // silly, e.g. storing the address of the alloca somewhere and using it
+ // later. Since this is undefined, we'll just make it be null.
+ if (!Tmp->use_empty())
+ Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType()));
+ }
+
+ /// Evaluate a call to function F, returning true if successful, false if we
+ /// can't evaluate it. ActualArgs contains the formal arguments for the
+ /// function.
+ bool EvaluateFunction(Function *F, Constant *&RetVal,
+ const SmallVectorImpl<Constant*> &ActualArgs);
+
+ /// Evaluate all instructions in block BB, returning true if successful, false
+ /// if we can't evaluate it. NewBB returns the next BB that control flows
+ /// into, or null upon return.
+ bool EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB);
+
+ Constant *getVal(Value *V) {
+ if (Constant *CV = dyn_cast<Constant>(V)) return CV;
+ Constant *R = ValueStack.back().lookup(V);
+ assert(R && "Reference to an uncomputed value!");
+ return R;
+ }
+
+ void setVal(Value *V, Constant *C) {
+ ValueStack.back()[V] = C;
+ }
+
+ /// Casts call result to a type of bitcast call expression
+ Constant *castCallResultIfNeeded(Value *CallExpr, Constant *RV);
+
+ const DenseMap<Constant*, Constant*> &getMutatedMemory() const {
+ return MutatedMemory;
+ }
+
+ const SmallPtrSetImpl<GlobalVariable*> &getInvariants() const {
+ return Invariants;
+ }
+
+private:
+ /// Given call site return callee and list of its formal arguments
+ Function *getCalleeWithFormalArgs(CallBase &CB,
+ SmallVectorImpl<Constant *> &Formals);
+
+ /// Given call site and callee returns list of callee formal argument
+ /// values converting them when necessary
+ bool getFormalParams(CallBase &CB, Function *F,
+ SmallVectorImpl<Constant *> &Formals);
+
+ Constant *ComputeLoadResult(Constant *P);
+
+ /// As we compute SSA register values, we store their contents here. The back
+ /// of the deque contains the current function and the stack contains the
+ /// values in the calling frames.
+ std::deque<DenseMap<Value*, Constant*>> ValueStack;
+
+ /// This is used to detect recursion. In pathological situations we could hit
+ /// exponential behavior, but at least there is nothing unbounded.
+ SmallVector<Function*, 4> CallStack;
+
+ /// For each store we execute, we update this map. Loads check this to get
+ /// the most up-to-date value. If evaluation is successful, this state is
+ /// committed to the process.
+ DenseMap<Constant*, Constant*> MutatedMemory;
+
+ /// To 'execute' an alloca, we create a temporary global variable to represent
+ /// its body. This vector is needed so we can delete the temporary globals
+ /// when we are done.
+ SmallVector<std::unique_ptr<GlobalVariable>, 32> AllocaTmps;
+
+ /// These global variables have been marked invariant by the static
+ /// constructor.
+ SmallPtrSet<GlobalVariable*, 8> Invariants;
+
+ /// These are constants we have checked and know to be simple enough to live
+ /// in a static initializer of a global.
+ SmallPtrSet<Constant*, 8> SimpleConstants;
+
+ const DataLayout &DL;
+ const TargetLibraryInfo *TLI;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_EVALUATOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/FixIrreducible.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/FixIrreducible.h
new file mode 100644
index 0000000000..6417ea5882
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/FixIrreducible.h
@@ -0,0 +1,31 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- FixIrreducible.h - Convert irreducible control-flow into loops -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_FIXIRREDUCIBLE_H
+#define LLVM_TRANSFORMS_UTILS_FIXIRREDUCIBLE_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+struct FixIrreduciblePass : PassInfoMixin<FixIrreduciblePass> {
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_FIXIRREDUCIBLE_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/FunctionComparator.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/FunctionComparator.h
new file mode 100644
index 0000000000..6d7a659ef2
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/FunctionComparator.h
@@ -0,0 +1,403 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- FunctionComparator.h - Function Comparator ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the FunctionComparator and GlobalNumberState classes which
+// are used by the MergeFunctions pass for comparing functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_FUNCTIONCOMPARATOR_H
+#define LLVM_TRANSFORMS_UTILS_FUNCTIONCOMPARATOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueMap.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
+#include <cstdint>
+#include <tuple>
+
+namespace llvm {
+
+class APFloat;
+class APInt;
+class BasicBlock;
+class Constant;
+class Function;
+class GlobalValue;
+class InlineAsm;
+class Instruction;
+class MDNode;
+class Type;
+class Value;
+
+/// GlobalNumberState assigns an integer to each global value in the program,
+/// which is used by the comparison routine to order references to globals. This
+/// state must be preserved throughout the pass, because Functions and other
+/// globals need to maintain their relative order. Globals are assigned a number
+/// when they are first visited. This order is deterministic, and so the
+/// assigned numbers are as well. When two functions are merged, neither number
+/// is updated. If the symbols are weak, this would be incorrect. If they are
+/// strong, then one will be replaced at all references to the other, and so
+/// direct callsites will now see one or the other symbol, and no update is
+/// necessary. Note that if we were guaranteed unique names, we could just
+/// compare those, but this would not work for stripped bitcodes or for those
+/// few symbols without a name.
+class GlobalNumberState {
+ struct Config : ValueMapConfig<GlobalValue *> {
+ enum { FollowRAUW = false };
+ };
+
+ // Each GlobalValue is mapped to an identifier. The Config ensures when RAUW
+ // occurs, the mapping does not change. Tracking changes is unnecessary, and
+ // also problematic for weak symbols (which may be overwritten).
+ using ValueNumberMap = ValueMap<GlobalValue *, uint64_t, Config>;
+ ValueNumberMap GlobalNumbers;
+
+ // The next unused serial number to assign to a global.
+ uint64_t NextNumber = 0;
+
+public:
+ GlobalNumberState() = default;
+
+ uint64_t getNumber(GlobalValue* Global) {
+ ValueNumberMap::iterator MapIter;
+ bool Inserted;
+ std::tie(MapIter, Inserted) = GlobalNumbers.insert({Global, NextNumber});
+ if (Inserted)
+ NextNumber++;
+ return MapIter->second;
+ }
+
+ void erase(GlobalValue *Global) {
+ GlobalNumbers.erase(Global);
+ }
+
+ void clear() {
+ GlobalNumbers.clear();
+ }
+};
+
+/// FunctionComparator - Compares two functions to determine whether or not
+/// they will generate machine code with the same behaviour. DataLayout is
+/// used if available. The comparator always fails conservatively (erring on the
+/// side of claiming that two functions are different).
+class FunctionComparator {
+public:
+ FunctionComparator(const Function *F1, const Function *F2,
+ GlobalNumberState* GN)
+ : FnL(F1), FnR(F2), GlobalNumbers(GN) {}
+
+ /// Test whether the two functions have equivalent behaviour.
+ int compare();
+
+ /// Hash a function. Equivalent functions will have the same hash, and unequal
+ /// functions will have different hashes with high probability.
+ using FunctionHash = uint64_t;
+ static FunctionHash functionHash(Function &);
+
+protected:
+ /// Start the comparison.
+ void beginCompare() {
+ sn_mapL.clear();
+ sn_mapR.clear();
+ }
+
+ /// Compares the signature and other general attributes of the two functions.
+ int compareSignature() const;
+
+ /// Test whether two basic blocks have equivalent behaviour.
+ int cmpBasicBlocks(const BasicBlock *BBL, const BasicBlock *BBR) const;
+
+ /// Constants comparison.
+ /// Its analog to lexicographical comparison between hypothetical numbers
+ /// of next format:
+ /// <bitcastability-trait><raw-bit-contents>
+ ///
+ /// 1. Bitcastability.
+ /// Check whether L's type could be losslessly bitcasted to R's type.
+ /// On this stage method, in case when lossless bitcast is not possible
+ /// method returns -1 or 1, thus also defining which type is greater in
+ /// context of bitcastability.
+ /// Stage 0: If types are equal in terms of cmpTypes, then we can go straight
+ /// to the contents comparison.
+ /// If types differ, remember types comparison result and check
+ /// whether we still can bitcast types.
+ /// Stage 1: Types that satisfies isFirstClassType conditions are always
+ /// greater then others.
+ /// Stage 2: Vector is greater then non-vector.
+ /// If both types are vectors, then vector with greater bitwidth is
+ /// greater.
+ /// If both types are vectors with the same bitwidth, then types
+ /// are bitcastable, and we can skip other stages, and go to contents
+ /// comparison.
+ /// Stage 3: Pointer types are greater than non-pointers. If both types are
+ /// pointers of the same address space - go to contents comparison.
+ /// Different address spaces: pointer with greater address space is
+ /// greater.
+ /// Stage 4: Types are neither vectors, nor pointers. And they differ.
+ /// We don't know how to bitcast them. So, we better don't do it,
+ /// and return types comparison result (so it determines the
+ /// relationship among constants we don't know how to bitcast).
+ ///
+ /// Just for clearance, let's see how the set of constants could look
+ /// on single dimension axis:
+ ///
+ /// [NFCT], [FCT, "others"], [FCT, pointers], [FCT, vectors]
+ /// Where: NFCT - Not a FirstClassType
+ /// FCT - FirstClassTyp:
+ ///
+ /// 2. Compare raw contents.
+ /// It ignores types on this stage and only compares bits from L and R.
+ /// Returns 0, if L and R has equivalent contents.
+ /// -1 or 1 if values are different.
+ /// Pretty trivial:
+ /// 2.1. If contents are numbers, compare numbers.
+ /// Ints with greater bitwidth are greater. Ints with same bitwidths
+ /// compared by their contents.
+ /// 2.2. "And so on". Just to avoid discrepancies with comments
+ /// perhaps it would be better to read the implementation itself.
+ /// 3. And again about overall picture. Let's look back at how the ordered set
+ /// of constants will look like:
+ /// [NFCT], [FCT, "others"], [FCT, pointers], [FCT, vectors]
+ ///
+ /// Now look, what could be inside [FCT, "others"], for example:
+ /// [FCT, "others"] =
+ /// [
+ /// [double 0.1], [double 1.23],
+ /// [i32 1], [i32 2],
+ /// { double 1.0 }, ; StructTyID, NumElements = 1
+ /// { i32 1 }, ; StructTyID, NumElements = 1
+ /// { double 1, i32 1 }, ; StructTyID, NumElements = 2
+ /// { i32 1, double 1 } ; StructTyID, NumElements = 2
+ /// ]
+ ///
+ /// Let's explain the order. Float numbers will be less than integers, just
+ /// because of cmpType terms: FloatTyID < IntegerTyID.
+ /// Floats (with same fltSemantics) are sorted according to their value.
+ /// Then you can see integers, and they are, like a floats,
+ /// could be easy sorted among each others.
+ /// The structures. Structures are grouped at the tail, again because of their
+ /// TypeID: StructTyID > IntegerTyID > FloatTyID.
+ /// Structures with greater number of elements are greater. Structures with
+ /// greater elements going first are greater.
+ /// The same logic with vectors, arrays and other possible complex types.
+ ///
+ /// Bitcastable constants.
+ /// Let's assume, that some constant, belongs to some group of
+ /// "so-called-equal" values with different types, and at the same time
+ /// belongs to another group of constants with equal types
+ /// and "really" equal values.
+ ///
+ /// Now, prove that this is impossible:
+ ///
+ /// If constant A with type TyA is bitcastable to B with type TyB, then:
+ /// 1. All constants with equal types to TyA, are bitcastable to B. Since
+ /// those should be vectors (if TyA is vector), pointers
+ /// (if TyA is pointer), or else (if TyA equal to TyB), those types should
+ /// be equal to TyB.
+ /// 2. All constants with non-equal, but bitcastable types to TyA, are
+ /// bitcastable to B.
+ /// Once again, just because we allow it to vectors and pointers only.
+ /// This statement could be expanded as below:
+ /// 2.1. All vectors with equal bitwidth to vector A, has equal bitwidth to
+ /// vector B, and thus bitcastable to B as well.
+ /// 2.2. All pointers of the same address space, no matter what they point to,
+ /// bitcastable. So if C is pointer, it could be bitcasted to A and to B.
+ /// So any constant equal or bitcastable to A is equal or bitcastable to B.
+ /// QED.
+ ///
+ /// In another words, for pointers and vectors, we ignore top-level type and
+ /// look at their particular properties (bit-width for vectors, and
+ /// address space for pointers).
+ /// If these properties are equal - compare their contents.
+ int cmpConstants(const Constant *L, const Constant *R) const;
+
+ /// Compares two global values by number. Uses the GlobalNumbersState to
+ /// identify the same gobals across function calls.
+ int cmpGlobalValues(GlobalValue *L, GlobalValue *R) const;
+
+ /// Assign or look up previously assigned numbers for the two values, and
+ /// return whether the numbers are equal. Numbers are assigned in the order
+ /// visited.
+ /// Comparison order:
+ /// Stage 0: Value that is function itself is always greater then others.
+ /// If left and right values are references to their functions, then
+ /// they are equal.
+ /// Stage 1: Constants are greater than non-constants.
+ /// If both left and right are constants, then the result of
+ /// cmpConstants is used as cmpValues result.
+ /// Stage 2: InlineAsm instances are greater than others. If both left and
+ /// right are InlineAsm instances, InlineAsm* pointers casted to
+ /// integers and compared as numbers.
+ /// Stage 3: For all other cases we compare order we meet these values in
+ /// their functions. If right value was met first during scanning,
+ /// then left value is greater.
+ /// In another words, we compare serial numbers, for more details
+ /// see comments for sn_mapL and sn_mapR.
+ int cmpValues(const Value *L, const Value *R) const;
+
+ /// Compare two Instructions for equivalence, similar to
+ /// Instruction::isSameOperationAs.
+ ///
+ /// Stages are listed in "most significant stage first" order:
+ /// On each stage below, we do comparison between some left and right
+ /// operation parts. If parts are non-equal, we assign parts comparison
+ /// result to the operation comparison result and exit from method.
+ /// Otherwise we proceed to the next stage.
+ /// Stages:
+ /// 1. Operations opcodes. Compared as numbers.
+ /// 2. Number of operands.
+ /// 3. Operation types. Compared with cmpType method.
+ /// 4. Compare operation subclass optional data as stream of bytes:
+ /// just convert it to integers and call cmpNumbers.
+ /// 5. Compare in operation operand types with cmpType in
+ /// most significant operand first order.
+ /// 6. Last stage. Check operations for some specific attributes.
+ /// For example, for Load it would be:
+ /// 6.1.Load: volatile (as boolean flag)
+ /// 6.2.Load: alignment (as integer numbers)
+ /// 6.3.Load: ordering (as underlying enum class value)
+ /// 6.4.Load: synch-scope (as integer numbers)
+ /// 6.5.Load: range metadata (as integer ranges)
+ /// On this stage its better to see the code, since its not more than 10-15
+ /// strings for particular instruction, and could change sometimes.
+ ///
+ /// Sets \p needToCmpOperands to true if the operands of the instructions
+ /// still must be compared afterwards. In this case it's already guaranteed
+ /// that both instructions have the same number of operands.
+ int cmpOperations(const Instruction *L, const Instruction *R,
+ bool &needToCmpOperands) const;
+
+ /// cmpType - compares two types,
+ /// defines total ordering among the types set.
+ ///
+ /// Return values:
+ /// 0 if types are equal,
+ /// -1 if Left is less than Right,
+ /// +1 if Left is greater than Right.
+ ///
+ /// Description:
+ /// Comparison is broken onto stages. Like in lexicographical comparison
+ /// stage coming first has higher priority.
+ /// On each explanation stage keep in mind total ordering properties.
+ ///
+ /// 0. Before comparison we coerce pointer types of 0 address space to
+ /// integer.
+ /// We also don't bother with same type at left and right, so
+ /// just return 0 in this case.
+ ///
+ /// 1. If types are of different kind (different type IDs).
+ /// Return result of type IDs comparison, treating them as numbers.
+ /// 2. If types are integers, check that they have the same width. If they
+ /// are vectors, check that they have the same count and subtype.
+ /// 3. Types have the same ID, so check whether they are one of:
+ /// * Void
+ /// * Float
+ /// * Double
+ /// * X86_FP80
+ /// * FP128
+ /// * PPC_FP128
+ /// * Label
+ /// * Metadata
+ /// We can treat these types as equal whenever their IDs are same.
+ /// 4. If Left and Right are pointers, return result of address space
+ /// comparison (numbers comparison). We can treat pointer types of same
+ /// address space as equal.
+ /// 5. If types are complex.
+ /// Then both Left and Right are to be expanded and their element types will
+ /// be checked with the same way. If we get Res != 0 on some stage, return it.
+ /// Otherwise return 0.
+ /// 6. For all other cases put llvm_unreachable.
+ int cmpTypes(Type *TyL, Type *TyR) const;
+
+ int cmpNumbers(uint64_t L, uint64_t R) const;
+ int cmpAPInts(const APInt &L, const APInt &R) const;
+ int cmpAPFloats(const APFloat &L, const APFloat &R) const;
+ int cmpMem(StringRef L, StringRef R) const;
+
+ // The two functions undergoing comparison.
+ const Function *FnL, *FnR;
+
+private:
+ int cmpOrderings(AtomicOrdering L, AtomicOrdering R) const;
+ int cmpInlineAsm(const InlineAsm *L, const InlineAsm *R) const;
+ int cmpAttrs(const AttributeList L, const AttributeList R) const;
+ int cmpRangeMetadata(const MDNode *L, const MDNode *R) const;
+ int cmpOperandBundlesSchema(const CallBase &LCS, const CallBase &RCS) const;
+
+ /// Compare two GEPs for equivalent pointer arithmetic.
+ /// Parts to be compared for each comparison stage,
+ /// most significant stage first:
+ /// 1. Address space. As numbers.
+ /// 2. Constant offset, (using GEPOperator::accumulateConstantOffset method).
+ /// 3. Pointer operand type (using cmpType method).
+ /// 4. Number of operands.
+ /// 5. Compare operands, using cmpValues method.
+ int cmpGEPs(const GEPOperator *GEPL, const GEPOperator *GEPR) const;
+ int cmpGEPs(const GetElementPtrInst *GEPL,
+ const GetElementPtrInst *GEPR) const {
+ return cmpGEPs(cast<GEPOperator>(GEPL), cast<GEPOperator>(GEPR));
+ }
+
+ /// Assign serial numbers to values from left function, and values from
+ /// right function.
+ /// Explanation:
+ /// Being comparing functions we need to compare values we meet at left and
+ /// right sides.
+ /// Its easy to sort things out for external values. It just should be
+ /// the same value at left and right.
+ /// But for local values (those were introduced inside function body)
+ /// we have to ensure they were introduced at exactly the same place,
+ /// and plays the same role.
+ /// Let's assign serial number to each value when we meet it first time.
+ /// Values that were met at same place will be with same serial numbers.
+ /// In this case it would be good to explain few points about values assigned
+ /// to BBs and other ways of implementation (see below).
+ ///
+ /// 1. Safety of BB reordering.
+ /// It's safe to change the order of BasicBlocks in function.
+ /// Relationship with other functions and serial numbering will not be
+ /// changed in this case.
+ /// As follows from FunctionComparator::compare(), we do CFG walk: we start
+ /// from the entry, and then take each terminator. So it doesn't matter how in
+ /// fact BBs are ordered in function. And since cmpValues are called during
+ /// this walk, the numbering depends only on how BBs located inside the CFG.
+ /// So the answer is - yes. We will get the same numbering.
+ ///
+ /// 2. Impossibility to use dominance properties of values.
+ /// If we compare two instruction operands: first is usage of local
+ /// variable AL from function FL, and second is usage of local variable AR
+ /// from FR, we could compare their origins and check whether they are
+ /// defined at the same place.
+ /// But, we are still not able to compare operands of PHI nodes, since those
+ /// could be operands from further BBs we didn't scan yet.
+ /// So it's impossible to use dominance properties in general.
+ mutable DenseMap<const Value*, int> sn_mapL, sn_mapR;
+
+ // The global state we will use
+ GlobalNumberState* GlobalNumbers;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_FUNCTIONCOMPARATOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/FunctionImportUtils.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/FunctionImportUtils.h
new file mode 100644
index 0000000000..af29586048
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/FunctionImportUtils.h
@@ -0,0 +1,147 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- FunctionImportUtils.h - Importing support utilities -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the FunctionImportGlobalProcessing class which is used
+// to perform the necessary global value handling for function importing.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_FUNCTIONIMPORTUTILS_H
+#define LLVM_TRANSFORMS_UTILS_FUNCTIONIMPORTUTILS_H
+
+#include "llvm/ADT/SetVector.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
+
+namespace llvm {
+class Module;
+
+/// Class to handle necessary GlobalValue changes required by ThinLTO
+/// function importing, including linkage changes and any necessary renaming.
+class FunctionImportGlobalProcessing {
+ /// The Module which we are exporting or importing functions from.
+ Module &M;
+
+ /// Module summary index passed in for function importing/exporting handling.
+ const ModuleSummaryIndex &ImportIndex;
+
+ /// Globals to import from this module, all other functions will be
+ /// imported as declarations instead of definitions.
+ SetVector<GlobalValue *> *GlobalsToImport;
+
+ /// Set to true if the given ModuleSummaryIndex contains any functions
+ /// from this source module, in which case we must conservatively assume
+ /// that any of its functions may be imported into another module
+ /// as part of a different backend compilation process.
+ bool HasExportedFunctions = false;
+
+ /// Set to true (only applicatable to ELF -fpic) if dso_local should be
+ /// dropped for a declaration.
+ ///
+ /// On ELF, the assembler is conservative and assumes a global default
+ /// visibility symbol can be interposable. No direct access relocation is
+ /// allowed, if the definition is not in the translation unit, even if the
+ /// definition is available in the linkage unit. Thus we need to clear
+ /// dso_local to disable direct access.
+ ///
+ /// This flag should not be set for -fno-pic or -fpie, which would
+ /// unnecessarily disable direct access.
+ bool ClearDSOLocalOnDeclarations;
+
+ /// Set of llvm.*used values, in order to validate that we don't try
+ /// to promote any non-renamable values.
+ SmallPtrSet<GlobalValue *, 8> Used;
+
+ /// Keep track of any COMDATs that require renaming (because COMDAT
+ /// leader was promoted and renamed). Maps from original COMDAT to one
+ /// with new name.
+ DenseMap<const Comdat *, Comdat *> RenamedComdats;
+
+ /// Check if we should promote the given local value to global scope.
+ bool shouldPromoteLocalToGlobal(const GlobalValue *SGV, ValueInfo VI);
+
+#ifndef NDEBUG
+ /// Check if the given value is a local that can't be renamed (promoted).
+ /// Only used in assertion checking, and disabled under NDEBUG since the Used
+ /// set will not be populated.
+ bool isNonRenamableLocal(const GlobalValue &GV) const;
+#endif
+
+ /// Helper methods to check if we are importing from or potentially
+ /// exporting from the current source module.
+ bool isPerformingImport() const { return GlobalsToImport != nullptr; }
+ bool isModuleExporting() const { return HasExportedFunctions; }
+
+ /// If we are importing from the source module, checks if we should
+ /// import SGV as a definition, otherwise import as a declaration.
+ bool doImportAsDefinition(const GlobalValue *SGV);
+
+ /// Get the name for a local SGV that should be promoted and renamed to global
+ /// scope in the linked destination module.
+ std::string getPromotedName(const GlobalValue *SGV);
+
+ /// Process globals so that they can be used in ThinLTO. This includes
+ /// promoting local variables so that they can be reference externally by
+ /// thin lto imported globals and converting strong external globals to
+ /// available_externally.
+ void processGlobalsForThinLTO();
+ void processGlobalForThinLTO(GlobalValue &GV);
+
+ /// Get the new linkage for SGV that should be used in the linked destination
+ /// module. Specifically, for ThinLTO importing or exporting it may need
+ /// to be adjusted. When \p DoPromote is true then we must adjust the
+ /// linkage for a required promotion of a local to global scope.
+ GlobalValue::LinkageTypes getLinkage(const GlobalValue *SGV, bool DoPromote);
+
+public:
+ FunctionImportGlobalProcessing(Module &M, const ModuleSummaryIndex &Index,
+ SetVector<GlobalValue *> *GlobalsToImport,
+ bool ClearDSOLocalOnDeclarations)
+ : M(M), ImportIndex(Index), GlobalsToImport(GlobalsToImport),
+ ClearDSOLocalOnDeclarations(ClearDSOLocalOnDeclarations) {
+ // If we have a ModuleSummaryIndex but no function to import,
+ // then this is the primary module being compiled in a ThinLTO
+ // backend compilation, and we need to see if it has functions that
+ // may be exported to another backend compilation.
+ if (!GlobalsToImport)
+ HasExportedFunctions = ImportIndex.hasExportedFunctions(M);
+
+#ifndef NDEBUG
+ // First collect those in the llvm.used set.
+ collectUsedGlobalVariables(M, Used, /*CompilerUsed*/ false);
+ // Next collect those in the llvm.compiler.used set.
+ collectUsedGlobalVariables(M, Used, /*CompilerUsed*/ true);
+#endif
+ }
+
+ bool run();
+};
+
+/// Perform in-place global value handling on the given Module for
+/// exported local functions renamed and promoted for ThinLTO.
+bool renameModuleForThinLTO(
+ Module &M, const ModuleSummaryIndex &Index,
+ bool ClearDSOLocalOnDeclarations,
+ SetVector<GlobalValue *> *GlobalsToImport = nullptr);
+
+/// Compute synthetic function entry counts.
+void computeSyntheticCounts(ModuleSummaryIndex &Index);
+
+} // End llvm namespace
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/GlobalStatus.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/GlobalStatus.h
new file mode 100644
index 0000000000..cd1121d282
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/GlobalStatus.h
@@ -0,0 +1,95 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- GlobalStatus.h - Compute status info for globals ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_GLOBALSTATUS_H
+#define LLVM_TRANSFORMS_UTILS_GLOBALSTATUS_H
+
+#include "llvm/Support/AtomicOrdering.h"
+
+namespace llvm {
+
+class Constant;
+class Function;
+class Value;
+
+/// It is safe to destroy a constant iff it is only used by constants itself.
+/// Note that constants cannot be cyclic, so this test is pretty easy to
+/// implement recursively.
+///
+bool isSafeToDestroyConstant(const Constant *C);
+
+/// As we analyze each global, keep track of some information about it. If we
+/// find out that the address of the global is taken, none of this info will be
+/// accurate.
+struct GlobalStatus {
+ /// True if the global's address is used in a comparison.
+ bool IsCompared = false;
+
+ /// True if the global is ever loaded. If the global isn't ever loaded it
+ /// can be deleted.
+ bool IsLoaded = false;
+
+ /// Keep track of what stores to the global look like.
+ enum StoredType {
+ /// There is no store to this global. It can thus be marked constant.
+ NotStored,
+
+ /// This global is stored to, but the only thing stored is the constant it
+ /// was initialized with. This is only tracked for scalar globals.
+ InitializerStored,
+
+ /// This global is stored to, but only its initializer and one other value
+ /// is ever stored to it. If this global isStoredOnce, we track the value
+ /// stored to it in StoredOnceValue below. This is only tracked for scalar
+ /// globals.
+ StoredOnce,
+
+ /// This global is stored to by multiple values or something else that we
+ /// cannot track.
+ Stored
+ } StoredType = NotStored;
+
+ /// If only one value (besides the initializer constant) is ever stored to
+ /// this global, keep track of what value it is.
+ Value *StoredOnceValue = nullptr;
+
+ /// These start out null/false. When the first accessing function is noticed,
+ /// it is recorded. When a second different accessing function is noticed,
+ /// HasMultipleAccessingFunctions is set to true.
+ const Function *AccessingFunction = nullptr;
+ bool HasMultipleAccessingFunctions = false;
+
+ /// Set to true if this global has a user that is not an instruction (e.g. a
+ /// constant expr or GV initializer).
+ bool HasNonInstructionUser = false;
+
+ /// Set to the strongest atomic ordering requirement.
+ AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
+
+ GlobalStatus();
+
+ /// Look at all uses of the global and fill in the GlobalStatus structure. If
+ /// the global has its address taken, return true to indicate we can't do
+ /// anything with it.
+ static bool analyzeGlobal(const Value *V, GlobalStatus &GS);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_GLOBALSTATUS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/GuardUtils.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/GuardUtils.h
new file mode 100644
index 0000000000..ee37635dd5
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/GuardUtils.h
@@ -0,0 +1,55 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- GuardUtils.h - Utils for work with guards ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Utils that are used to perform transformations related to guards and their
+// conditions.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_GUARDUTILS_H
+#define LLVM_TRANSFORMS_UTILS_GUARDUTILS_H
+
+namespace llvm {
+
+class BranchInst;
+class CallInst;
+class Function;
+class Value;
+
+/// Splits control flow at point of \p Guard, replacing it with explicit branch
+/// by the condition of guard's first argument. The taken branch then goes to
+/// the block that contains \p Guard's successors, and the non-taken branch
+/// goes to a newly-created deopt block that contains a sole call of the
+/// deoptimize function \p DeoptIntrinsic. If 'UseWC' is set, preserve the
+/// widenable nature of the guard by lowering to equivelent form. If not set,
+/// lower to a form without widenable semantics.
+void makeGuardControlFlowExplicit(Function *DeoptIntrinsic, CallInst *Guard,
+ bool UseWC);
+
+/// Given a branch we know is widenable (defined per Analysis/GuardUtils.h),
+/// widen it such that condition 'NewCond' is also known to hold on the taken
+/// path. Branch remains widenable after transform.
+void widenWidenableBranch(BranchInst *WidenableBR, Value *NewCond);
+
+/// Given a branch we know is widenable (defined per Analysis/GuardUtils.h),
+/// *set* it's condition such that (only) 'Cond' is known to hold on the taken
+/// path and that the branch remains widenable after transform.
+void setWidenableBranchCond(BranchInst *WidenableBR, Value *Cond);
+
+} // llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_GUARDUTILS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/InjectTLIMappings.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/InjectTLIMappings.h
new file mode 100644
index 0000000000..a3e8cc6a6b
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/InjectTLIMappings.h
@@ -0,0 +1,48 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- InjectTLIMAppings.h - TLI to VFABI attribute injection ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Populates the VFABI attribute with the scalar-to-vector mappings
+// from the TargetLibraryInfo.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_UTILS_INJECTTLIMAPPINGS_H
+#define LLVM_TRANSFORMS_UTILS_INJECTTLIMAPPINGS_H
+
+#include "llvm/IR/PassManager.h"
+#include "llvm/InitializePasses.h"
+
+namespace llvm {
+class InjectTLIMappings : public PassInfoMixin<InjectTLIMappings> {
+public:
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+// Legacy pass
+class InjectTLIMappingsLegacy : public FunctionPass {
+public:
+ static char ID;
+ InjectTLIMappingsLegacy() : FunctionPass(ID) {
+ initializeInjectTLIMappingsLegacyPass(*PassRegistry::getPassRegistry());
+ }
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool runOnFunction(Function &F) override;
+};
+
+} // End namespace llvm
+#endif // LLVM_TRANSFORMS_UTILS_INJECTTLIMAPPINGS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/InstructionNamer.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/InstructionNamer.h
new file mode 100644
index 0000000000..c9ede371d9
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/InstructionNamer.h
@@ -0,0 +1,31 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- InstructionNamer.h - Give anonymous instructions names -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_INSTRUCTIONNAMER_H
+#define LLVM_TRANSFORMS_UTILS_INSTRUCTIONNAMER_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+struct InstructionNamerPass : PassInfoMixin<InstructionNamerPass> {
+ PreservedAnalyses run(Function &, FunctionAnalysisManager &);
+};
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_INSTRUCTIONNAMER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/IntegerDivision.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/IntegerDivision.h
new file mode 100644
index 0000000000..565286ecc9
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/IntegerDivision.h
@@ -0,0 +1,83 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/Transforms/Utils/IntegerDivision.h ------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an implementation of 32bit and 64bit scalar integer
+// division for targets that don't have native support. It's largely derived
+// from compiler-rt's implementations of __udivsi3 and __udivmoddi4,
+// but hand-tuned for targets that prefer less control flow.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_INTEGERDIVISION_H
+#define LLVM_TRANSFORMS_UTILS_INTEGERDIVISION_H
+
+namespace llvm {
+ class BinaryOperator;
+}
+
+namespace llvm {
+
+ /// Generate code to calculate the remainder of two integers, replacing Rem
+ /// with the generated code. This currently generates code using the udiv
+ /// expansion, but future work includes generating more specialized code,
+ /// e.g. when more information about the operands are known. Implements both
+ /// 32bit and 64bit scalar division.
+ ///
+ /// Replace Rem with generated code.
+ bool expandRemainder(BinaryOperator *Rem);
+
+ /// Generate code to divide two integers, replacing Div with the generated
+ /// code. This currently generates code similarly to compiler-rt's
+ /// implementations, but future work includes generating more specialized code
+ /// when more information about the operands are known. Implements both
+ /// 32bit and 64bit scalar division.
+ ///
+ /// Replace Div with generated code.
+ bool expandDivision(BinaryOperator* Div);
+
+ /// Generate code to calculate the remainder of two integers, replacing Rem
+ /// with the generated code. Uses ExpandReminder with a 32bit Rem which
+ /// makes it useful for targets with little or no support for less than
+ /// 32 bit arithmetic.
+ ///
+ /// Replace Rem with generated code.
+ bool expandRemainderUpTo32Bits(BinaryOperator *Rem);
+
+ /// Generate code to calculate the remainder of two integers, replacing Rem
+ /// with the generated code. Uses ExpandReminder with a 64bit Rem.
+ ///
+ /// Replace Rem with generated code.
+ bool expandRemainderUpTo64Bits(BinaryOperator *Rem);
+
+ /// Generate code to divide two integers, replacing Div with the generated
+ /// code. Uses ExpandDivision with a 32bit Div which makes it useful for
+ /// targets with little or no support for less than 32 bit arithmetic.
+ ///
+ /// Replace Rem with generated code.
+ bool expandDivisionUpTo32Bits(BinaryOperator *Div);
+
+ /// Generate code to divide two integers, replacing Div with the generated
+ /// code. Uses ExpandDivision with a 64bit Div.
+ ///
+ /// Replace Rem with generated code.
+ bool expandDivisionUpTo64Bits(BinaryOperator *Div);
+
+} // End llvm namespace
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/LCSSA.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/LCSSA.h
new file mode 100644
index 0000000000..6e3ed90d04
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/LCSSA.h
@@ -0,0 +1,54 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- LCSSA.h - Loop-closed SSA transform Pass -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass transforms loops by placing phi nodes at the end of the loops for
+// all values that are live across the loop boundary. For example, it turns
+// the left into the right code:
+//
+// for (...) for (...)
+// if (c) if (c)
+// X1 = ... X1 = ...
+// else else
+// X2 = ... X2 = ...
+// X3 = phi(X1, X2) X3 = phi(X1, X2)
+// ... = X3 + 4 X4 = phi(X3)
+// ... = X4 + 4
+//
+// This is still valid LLVM; the extra phi nodes are purely redundant, and will
+// be trivially eliminated by InstCombine. The major benefit of this
+// transformation is that it makes many other loop optimizations, such as
+// LoopUnswitching, simpler.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LCSSA_H
+#define LLVM_TRANSFORMS_UTILS_LCSSA_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Converts loops into loop-closed SSA form.
+class LCSSAPass : public PassInfoMixin<LCSSAPass> {
+public:
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_LCSSA_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/LibCallsShrinkWrap.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/LibCallsShrinkWrap.h
new file mode 100644
index 0000000000..12894d98f9
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/LibCallsShrinkWrap.h
@@ -0,0 +1,37 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- LibCallsShrinkWrap.h - Shrink Wrap Library Calls -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LIBCALLSSHRINKWRAP_H
+#define LLVM_TRANSFORMS_UTILS_LIBCALLSSHRINKWRAP_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class LibCallsShrinkWrapPass : public PassInfoMixin<LibCallsShrinkWrapPass> {
+public:
+ static StringRef name() { return "LibCallsShrinkWrapPass"; }
+
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+};
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_LIBCALLSSHRINKWRAP_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/Local.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/Local.h
new file mode 100644
index 0000000000..9c76913256
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/Local.h
@@ -0,0 +1,502 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- Local.h - Functions to perform local transformations -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This family of functions perform various local transformations to the
+// program.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOCAL_H
+#define LLVM_TRANSFORMS_UTILS_LOCAL_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/TinyPtrVector.h"
+#include "llvm/Analysis/Utils/Local.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Transforms/Utils/SimplifyCFGOptions.h"
+#include <cstdint>
+#include <limits>
+
+namespace llvm {
+
+class AAResults;
+class AllocaInst;
+class AssumptionCache;
+class BasicBlock;
+class BranchInst;
+class CallBase;
+class CallInst;
+class DbgDeclareInst;
+class DbgVariableIntrinsic;
+class DbgValueInst;
+class DIBuilder;
+class DomTreeUpdater;
+class Function;
+class Instruction;
+class InvokeInst;
+class LoadInst;
+class MDNode;
+class MemorySSAUpdater;
+class PHINode;
+class StoreInst;
+class TargetLibraryInfo;
+class TargetTransformInfo;
+
+//===----------------------------------------------------------------------===//
+// Local constant propagation.
+//
+
+/// If a terminator instruction is predicated on a constant value, convert it
+/// into an unconditional branch to the constant destination.
+/// This is a nontrivial operation because the successors of this basic block
+/// must have their PHI nodes updated.
+/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
+/// conditions and indirectbr addresses this might make dead if
+/// DeleteDeadConditions is true.
+bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false,
+ const TargetLibraryInfo *TLI = nullptr,
+ DomTreeUpdater *DTU = nullptr);
+
+//===----------------------------------------------------------------------===//
+// Local dead code elimination.
+//
+
+/// Return true if the result produced by the instruction is not used, and the
+/// instruction has no side effects.
+bool isInstructionTriviallyDead(Instruction *I,
+ const TargetLibraryInfo *TLI = nullptr);
+
+/// Return true if the result produced by the instruction would have no side
+/// effects if it was not used. This is equivalent to checking whether
+/// isInstructionTriviallyDead would be true if the use count was 0.
+bool wouldInstructionBeTriviallyDead(Instruction *I,
+ const TargetLibraryInfo *TLI = nullptr);
+
+/// If the specified value is a trivially dead instruction, delete it.
+/// If that makes any of its operands trivially dead, delete them too,
+/// recursively. Return true if any instructions were deleted.
+bool RecursivelyDeleteTriviallyDeadInstructions(
+ Value *V, const TargetLibraryInfo *TLI = nullptr,
+ MemorySSAUpdater *MSSAU = nullptr,
+ std::function<void(Value *)> AboutToDeleteCallback =
+ std::function<void(Value *)>());
+
+/// Delete all of the instructions in `DeadInsts`, and all other instructions
+/// that deleting these in turn causes to be trivially dead.
+///
+/// The initial instructions in the provided vector must all have empty use
+/// lists and satisfy `isInstructionTriviallyDead`.
+///
+/// `DeadInsts` will be used as scratch storage for this routine and will be
+/// empty afterward.
+void RecursivelyDeleteTriviallyDeadInstructions(
+ SmallVectorImpl<WeakTrackingVH> &DeadInsts,
+ const TargetLibraryInfo *TLI = nullptr, MemorySSAUpdater *MSSAU = nullptr,
+ std::function<void(Value *)> AboutToDeleteCallback =
+ std::function<void(Value *)>());
+
+/// Same functionality as RecursivelyDeleteTriviallyDeadInstructions, but allow
+/// instructions that are not trivially dead. These will be ignored.
+/// Returns true if any changes were made, i.e. any instructions trivially dead
+/// were found and deleted.
+bool RecursivelyDeleteTriviallyDeadInstructionsPermissive(
+ SmallVectorImpl<WeakTrackingVH> &DeadInsts,
+ const TargetLibraryInfo *TLI = nullptr, MemorySSAUpdater *MSSAU = nullptr,
+ std::function<void(Value *)> AboutToDeleteCallback =
+ std::function<void(Value *)>());
+
+/// If the specified value is an effectively dead PHI node, due to being a
+/// def-use chain of single-use nodes that either forms a cycle or is terminated
+/// by a trivially dead instruction, delete it. If that makes any of its
+/// operands trivially dead, delete them too, recursively. Return true if a
+/// change was made.
+bool RecursivelyDeleteDeadPHINode(PHINode *PN,
+ const TargetLibraryInfo *TLI = nullptr,
+ MemorySSAUpdater *MSSAU = nullptr);
+
+/// Scan the specified basic block and try to simplify any instructions in it
+/// and recursively delete dead instructions.
+///
+/// This returns true if it changed the code, note that it can delete
+/// instructions in other blocks as well in this block.
+bool SimplifyInstructionsInBlock(BasicBlock *BB,
+ const TargetLibraryInfo *TLI = nullptr);
+
+/// Replace all the uses of an SSA value in @llvm.dbg intrinsics with
+/// undef. This is useful for signaling that a variable, e.g. has been
+/// found dead and hence it's unavailable at a given program point.
+/// Returns true if the dbg values have been changed.
+bool replaceDbgUsesWithUndef(Instruction *I);
+
+//===----------------------------------------------------------------------===//
+// Control Flow Graph Restructuring.
+//
+
+/// BB is a block with one predecessor and its predecessor is known to have one
+/// successor (BB!). Eliminate the edge between them, moving the instructions in
+/// the predecessor into BB. This deletes the predecessor block.
+void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DomTreeUpdater *DTU = nullptr);
+
+/// BB is known to contain an unconditional branch, and contains no instructions
+/// other than PHI nodes, potential debug intrinsics and the branch. If
+/// possible, eliminate BB by rewriting all the predecessors to branch to the
+/// successor block and return true. If we can't transform, return false.
+bool TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
+ DomTreeUpdater *DTU = nullptr);
+
+/// Check for and eliminate duplicate PHI nodes in this block. This doesn't try
+/// to be clever about PHI nodes which differ only in the order of the incoming
+/// values, but instcombine orders them so it usually won't matter.
+bool EliminateDuplicatePHINodes(BasicBlock *BB);
+
+/// This function is used to do simplification of a CFG. For example, it
+/// adjusts branches to branches to eliminate the extra hop, it eliminates
+/// unreachable basic blocks, and does other peephole optimization of the CFG.
+/// It returns true if a modification was made, possibly deleting the basic
+/// block that was pointed to. LoopHeaders is an optional input parameter
+/// providing the set of loop headers that SimplifyCFG should not eliminate.
+extern cl::opt<bool> RequireAndPreserveDomTree;
+bool simplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI,
+ DomTreeUpdater *DTU = nullptr,
+ const SimplifyCFGOptions &Options = {},
+ ArrayRef<WeakVH> LoopHeaders = {});
+
+/// This function is used to flatten a CFG. For example, it uses parallel-and
+/// and parallel-or mode to collapse if-conditions and merge if-regions with
+/// identical statements.
+bool FlattenCFG(BasicBlock *BB, AAResults *AA = nullptr);
+
+/// If this basic block is ONLY a setcc and a branch, and if a predecessor
+/// branches to us and one of our successors, fold the setcc into the
+/// predecessor and use logical operations to pick the right destination.
+bool FoldBranchToCommonDest(BranchInst *BI, llvm::DomTreeUpdater *DTU = nullptr,
+ MemorySSAUpdater *MSSAU = nullptr,
+ const TargetTransformInfo *TTI = nullptr,
+ unsigned BonusInstThreshold = 1);
+
+/// This function takes a virtual register computed by an Instruction and
+/// replaces it with a slot in the stack frame, allocated via alloca.
+/// This allows the CFG to be changed around without fear of invalidating the
+/// SSA information for the value. It returns the pointer to the alloca inserted
+/// to create a stack slot for X.
+AllocaInst *DemoteRegToStack(Instruction &X,
+ bool VolatileLoads = false,
+ Instruction *AllocaPoint = nullptr);
+
+/// This function takes a virtual register computed by a phi node and replaces
+/// it with a slot in the stack frame, allocated via alloca. The phi node is
+/// deleted and it returns the pointer to the alloca inserted.
+AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = nullptr);
+
+/// Try to ensure that the alignment of \p V is at least \p PrefAlign bytes. If
+/// the owning object can be modified and has an alignment less than \p
+/// PrefAlign, it will be increased and \p PrefAlign returned. If the alignment
+/// cannot be increased, the known alignment of the value is returned.
+///
+/// It is not always possible to modify the alignment of the underlying object,
+/// so if alignment is important, a more reliable approach is to simply align
+/// all global variables and allocation instructions to their preferred
+/// alignment from the beginning.
+Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign,
+ const DataLayout &DL,
+ const Instruction *CxtI = nullptr,
+ AssumptionCache *AC = nullptr,
+ const DominatorTree *DT = nullptr);
+
+/// Try to infer an alignment for the specified pointer.
+inline Align getKnownAlignment(Value *V, const DataLayout &DL,
+ const Instruction *CxtI = nullptr,
+ AssumptionCache *AC = nullptr,
+ const DominatorTree *DT = nullptr) {
+ return getOrEnforceKnownAlignment(V, MaybeAlign(), DL, CxtI, AC, DT);
+}
+
+/// Create a call that matches the invoke \p II in terms of arguments,
+/// attributes, debug information, etc. The call is not placed in a block and it
+/// will not have a name. The invoke instruction is not removed, nor are the
+/// uses replaced by the new call.
+CallInst *createCallMatchingInvoke(InvokeInst *II);
+
+/// This function converts the specified invoek into a normall call.
+void changeToCall(InvokeInst *II, DomTreeUpdater *DTU = nullptr);
+
+///===---------------------------------------------------------------------===//
+/// Dbg Intrinsic utilities
+///
+
+/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
+/// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
+void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
+ StoreInst *SI, DIBuilder &Builder);
+
+/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
+/// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
+void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
+ LoadInst *LI, DIBuilder &Builder);
+
+/// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
+/// llvm.dbg.declare or llvm.dbg.addr intrinsic.
+void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
+ PHINode *LI, DIBuilder &Builder);
+
+/// Lowers llvm.dbg.declare intrinsics into appropriate set of
+/// llvm.dbg.value intrinsics.
+bool LowerDbgDeclare(Function &F);
+
+/// Propagate dbg.value intrinsics through the newly inserted PHIs.
+void insertDebugValuesForPHIs(BasicBlock *BB,
+ SmallVectorImpl<PHINode *> &InsertedPHIs);
+
+/// Finds all intrinsics declaring local variables as living in the memory that
+/// 'V' points to. This may include a mix of dbg.declare and
+/// dbg.addr intrinsics.
+TinyPtrVector<DbgVariableIntrinsic *> FindDbgAddrUses(Value *V);
+
+/// Like \c FindDbgAddrUses, but only returns dbg.declare intrinsics, not
+/// dbg.addr.
+TinyPtrVector<DbgDeclareInst *> FindDbgDeclareUses(Value *V);
+
+/// Finds the llvm.dbg.value intrinsics describing a value.
+void findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V);
+
+/// Finds the debug info intrinsics describing a value.
+void findDbgUsers(SmallVectorImpl<DbgVariableIntrinsic *> &DbgInsts, Value *V);
+
+/// Replaces llvm.dbg.declare instruction when the address it
+/// describes is replaced with a new value. If Deref is true, an
+/// additional DW_OP_deref is prepended to the expression. If Offset
+/// is non-zero, a constant displacement is added to the expression
+/// (between the optional Deref operations). Offset can be negative.
+bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder,
+ uint8_t DIExprFlags, int Offset);
+
+/// Replaces multiple llvm.dbg.value instructions when the alloca it describes
+/// is replaced with a new value. If Offset is non-zero, a constant displacement
+/// is added to the expression (after the mandatory Deref). Offset can be
+/// negative. New llvm.dbg.value instructions are inserted at the locations of
+/// the instructions they replace.
+void replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
+ DIBuilder &Builder, int Offset = 0);
+
+/// Assuming the instruction \p I is going to be deleted, attempt to salvage
+/// debug users of \p I by writing the effect of \p I in a DIExpression. If it
+/// cannot be salvaged changes its debug uses to undef.
+void salvageDebugInfo(Instruction &I);
+
+
+/// Implementation of salvageDebugInfo, applying only to instructions in
+/// \p Insns, rather than all debug users from findDbgUsers( \p I).
+/// Returns true if any debug users were updated.
+/// Mark undef if salvaging cannot be completed.
+void salvageDebugInfoForDbgValues(Instruction &I,
+ ArrayRef<DbgVariableIntrinsic *> Insns);
+
+/// Given an instruction \p I and DIExpression \p DIExpr operating on it, write
+/// the effects of \p I into the returned DIExpression, or return nullptr if
+/// it cannot be salvaged. \p StackVal: whether DW_OP_stack_value should be
+/// appended to the expression.
+DIExpression *salvageDebugInfoImpl(Instruction &I, DIExpression *DIExpr,
+ bool StackVal);
+
+/// Point debug users of \p From to \p To or salvage them. Use this function
+/// only when replacing all uses of \p From with \p To, with a guarantee that
+/// \p From is going to be deleted.
+///
+/// Follow these rules to prevent use-before-def of \p To:
+/// . If \p To is a linked Instruction, set \p DomPoint to \p To.
+/// . If \p To is an unlinked Instruction, set \p DomPoint to the Instruction
+/// \p To will be inserted after.
+/// . If \p To is not an Instruction (e.g a Constant), the choice of
+/// \p DomPoint is arbitrary. Pick \p From for simplicity.
+///
+/// If a debug user cannot be preserved without reordering variable updates or
+/// introducing a use-before-def, it is either salvaged (\ref salvageDebugInfo)
+/// or deleted. Returns true if any debug users were updated.
+bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint,
+ DominatorTree &DT);
+
+/// Remove all instructions from a basic block other than its terminator
+/// and any present EH pad instructions. Returns a pair where the first element
+/// is the number of instructions (excluding debug info instrinsics) that have
+/// been removed, and the second element is the number of debug info intrinsics
+/// that have been removed.
+std::pair<unsigned, unsigned>
+removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB);
+
+/// Insert an unreachable instruction before the specified
+/// instruction, making it and the rest of the code in the block dead.
+unsigned changeToUnreachable(Instruction *I, bool UseLLVMTrap,
+ bool PreserveLCSSA = false,
+ DomTreeUpdater *DTU = nullptr,
+ MemorySSAUpdater *MSSAU = nullptr);
+
+/// Convert the CallInst to InvokeInst with the specified unwind edge basic
+/// block. This also splits the basic block where CI is located, because
+/// InvokeInst is a terminator instruction. Returns the newly split basic
+/// block.
+BasicBlock *changeToInvokeAndSplitBasicBlock(CallInst *CI,
+ BasicBlock *UnwindEdge);
+
+/// Replace 'BB's terminator with one that does not have an unwind successor
+/// block. Rewrites `invoke` to `call`, etc. Updates any PHIs in unwind
+/// successor.
+///
+/// \param BB Block whose terminator will be replaced. Its terminator must
+/// have an unwind successor.
+void removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU = nullptr);
+
+/// Remove all blocks that can not be reached from the function's entry.
+///
+/// Returns true if any basic block was removed.
+bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU = nullptr,
+ MemorySSAUpdater *MSSAU = nullptr);
+
+/// Combine the metadata of two instructions so that K can replace J. Some
+/// metadata kinds can only be kept if K does not move, meaning it dominated
+/// J in the original IR.
+///
+/// Metadata not listed as known via KnownIDs is removed
+void combineMetadata(Instruction *K, const Instruction *J,
+ ArrayRef<unsigned> KnownIDs, bool DoesKMove);
+
+/// Combine the metadata of two instructions so that K can replace J. This
+/// specifically handles the case of CSE-like transformations. Some
+/// metadata can only be kept if K dominates J. For this to be correct,
+/// K cannot be hoisted.
+///
+/// Unknown metadata is removed.
+void combineMetadataForCSE(Instruction *K, const Instruction *J,
+ bool DoesKMove);
+
+/// Copy the metadata from the source instruction to the destination (the
+/// replacement for the source instruction).
+void copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source);
+
+/// Patch the replacement so that it is not more restrictive than the value
+/// being replaced. It assumes that the replacement does not get moved from
+/// its original position.
+void patchReplacementInstruction(Instruction *I, Value *Repl);
+
+// Replace each use of 'From' with 'To', if that use does not belong to basic
+// block where 'From' is defined. Returns the number of replacements made.
+unsigned replaceNonLocalUsesWith(Instruction *From, Value *To);
+
+/// Replace each use of 'From' with 'To' if that use is dominated by
+/// the given edge. Returns the number of replacements made.
+unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT,
+ const BasicBlockEdge &Edge);
+/// Replace each use of 'From' with 'To' if that use is dominated by
+/// the end of the given BasicBlock. Returns the number of replacements made.
+unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT,
+ const BasicBlock *BB);
+
+/// Return true if this call calls a gc leaf function.
+///
+/// A leaf function is a function that does not safepoint the thread during its
+/// execution. During a call or invoke to such a function, the callers stack
+/// does not have to be made parseable.
+///
+/// Most passes can and should ignore this information, and it is only used
+/// during lowering by the GC infrastructure.
+bool callsGCLeafFunction(const CallBase *Call, const TargetLibraryInfo &TLI);
+
+/// Copy a nonnull metadata node to a new load instruction.
+///
+/// This handles mapping it to range metadata if the new load is an integer
+/// load instead of a pointer load.
+void copyNonnullMetadata(const LoadInst &OldLI, MDNode *N, LoadInst &NewLI);
+
+/// Copy a range metadata node to a new load instruction.
+///
+/// This handles mapping it to nonnull metadata if the new load is a pointer
+/// load instead of an integer load and the range doesn't cover null.
+void copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, MDNode *N,
+ LoadInst &NewLI);
+
+/// Remove the debug intrinsic instructions for the given instruction.
+void dropDebugUsers(Instruction &I);
+
+/// Hoist all of the instructions in the \p IfBlock to the dominant block
+/// \p DomBlock, by moving its instructions to the insertion point \p InsertPt.
+///
+/// The moved instructions receive the insertion point debug location values
+/// (DILocations) and their debug intrinsic instructions are removed.
+void hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt,
+ BasicBlock *BB);
+
+//===----------------------------------------------------------------------===//
+// Intrinsic pattern matching
+//
+
+/// Try to match a bswap or bitreverse idiom.
+///
+/// If an idiom is matched, an intrinsic call is inserted before \c I. Any added
+/// instructions are returned in \c InsertedInsts. They will all have been added
+/// to a basic block.
+///
+/// A bitreverse idiom normally requires around 2*BW nodes to be searched (where
+/// BW is the bitwidth of the integer type). A bswap idiom requires anywhere up
+/// to BW / 4 nodes to be searched, so is significantly faster.
+///
+/// This function returns true on a successful match or false otherwise.
+bool recognizeBSwapOrBitReverseIdiom(
+ Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
+ SmallVectorImpl<Instruction *> &InsertedInsts);
+
+//===----------------------------------------------------------------------===//
+// Sanitizer utilities
+//
+
+/// Given a CallInst, check if it calls a string function known to CodeGen,
+/// and mark it with NoBuiltin if so. To be used by sanitizers that intend
+/// to intercept string functions and want to avoid converting them to target
+/// specific instructions.
+void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI,
+ const TargetLibraryInfo *TLI);
+
+//===----------------------------------------------------------------------===//
+// Transform predicates
+//
+
+/// Given an instruction, is it legal to set operand OpIdx to a non-constant
+/// value?
+bool canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx);
+
+//===----------------------------------------------------------------------===//
+// Value helper functions
+//
+
+/// Invert the given true/false value, possibly reusing an existing copy.
+Value *invertCondition(Value *Condition);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_LOCAL_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/LoopPeel.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/LoopPeel.h
new file mode 100644
index 0000000000..df15ce4fca
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/LoopPeel.h
@@ -0,0 +1,51 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/Transforms/Utils/LoopPeel.h ----- Peeling utilities -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines some loop peeling utilities. It does not define any
+// actual pass or policy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOOPPEEL_H
+#define LLVM_TRANSFORMS_UTILS_LOOPPEEL_H
+
+#include "llvm/Analysis/TargetTransformInfo.h"
+
+namespace llvm {
+
+bool canPeel(Loop *L);
+
+bool peelLoop(Loop *L, unsigned PeelCount, LoopInfo *LI, ScalarEvolution *SE,
+ DominatorTree *DT, AssumptionCache *AC, bool PreserveLCSSA);
+
+TargetTransformInfo::PeelingPreferences
+gatherPeelingPreferences(Loop *L, ScalarEvolution &SE,
+ const TargetTransformInfo &TTI,
+ Optional<bool> UserAllowPeeling,
+ Optional<bool> UserAllowProfileBasedPeeling,
+ bool UnrollingSpecficValues = false);
+
+void computePeelCount(Loop *L, unsigned LoopSize,
+ TargetTransformInfo::PeelingPreferences &PP,
+ unsigned &TripCount, ScalarEvolution &SE,
+ unsigned Threshold = UINT_MAX);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_LOOPPEEL_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/LoopRotationUtils.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/LoopRotationUtils.h
new file mode 100644
index 0000000000..acfbf03d02
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/LoopRotationUtils.h
@@ -0,0 +1,52 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- LoopRotationUtils.h - Utilities to perform loop rotation -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides utilities to convert a loop into a loop with bottom test.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOOPROTATIONUTILS_H
+#define LLVM_TRANSFORMS_UTILS_LOOPROTATIONUTILS_H
+
+namespace llvm {
+
+class AssumptionCache;
+class DominatorTree;
+class Loop;
+class LoopInfo;
+class MemorySSAUpdater;
+class ScalarEvolution;
+struct SimplifyQuery;
+class TargetTransformInfo;
+
+/// Convert a loop into a loop with bottom test. It may
+/// perform loop latch simplication as well if the flag RotationOnly
+/// is false. The flag Threshold represents the size threshold of the loop
+/// header. If the loop header's size exceeds the threshold, the loop rotation
+/// will give up. The flag IsUtilMode controls the heuristic used in the
+/// LoopRotation. If it is true, the profitability heuristic will be ignored.
+bool LoopRotation(Loop *L, LoopInfo *LI, const TargetTransformInfo *TTI,
+ AssumptionCache *AC, DominatorTree *DT, ScalarEvolution *SE,
+ MemorySSAUpdater *MSSAU, const SimplifyQuery &SQ,
+ bool RotationOnly, unsigned Threshold, bool IsUtilMode,
+ bool PrepareForLTO = false);
+
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/LoopSimplify.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/LoopSimplify.h
new file mode 100644
index 0000000000..05545b7b82
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/LoopSimplify.h
@@ -0,0 +1,81 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- LoopSimplify.h - Loop Canonicalization Pass --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass performs several transformations to transform natural loops into a
+// simpler form, which makes subsequent analyses and transformations simpler and
+// more effective.
+//
+// Loop pre-header insertion guarantees that there is a single, non-critical
+// entry edge from outside of the loop to the loop header. This simplifies a
+// number of analyses and transformations, such as LICM.
+//
+// Loop exit-block insertion guarantees that all exit blocks from the loop
+// (blocks which are outside of the loop that have predecessors inside of the
+// loop) only have predecessors from inside of the loop (and are thus dominated
+// by the loop header). This simplifies transformations such as store-sinking
+// that are built into LICM.
+//
+// This pass also guarantees that loops will have exactly one backedge.
+//
+// Indirectbr instructions introduce several complications. If the loop
+// contains or is entered by an indirectbr instruction, it may not be possible
+// to transform the loop and make these guarantees. Client code should check
+// that these conditions are true before relying on them.
+//
+// Note that the simplifycfg pass will clean up blocks which are split out but
+// end up being unnecessary, so usage of this pass should not pessimize
+// generated code.
+//
+// This pass obviously modifies the CFG, but updates loop information and
+// dominator information.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_UTILS_LOOPSIMPLIFY_H
+#define LLVM_TRANSFORMS_UTILS_LOOPSIMPLIFY_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class AssumptionCache;
+class DominatorTree;
+class Loop;
+class LoopInfo;
+class MemorySSAUpdater;
+class ScalarEvolution;
+
+/// This pass is responsible for loop canonicalization.
+class LoopSimplifyPass : public PassInfoMixin<LoopSimplifyPass> {
+public:
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// Simplify each loop in a loop nest recursively.
+///
+/// This takes a potentially un-simplified loop L (and its children) and turns
+/// it into a simplified loop nest with preheaders and single backedges. It will
+/// update \c DominatorTree, \c LoopInfo, \c ScalarEvolution and \c MemorySSA
+/// analyses if they're non-null, and LCSSA if \c PreserveLCSSA is true.
+bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE,
+ AssumptionCache *AC, MemorySSAUpdater *MSSAU,
+ bool PreserveLCSSA);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_LOOPSIMPLIFY_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/LoopUtils.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/LoopUtils.h
new file mode 100644
index 0000000000..dc655420b5
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/LoopUtils.h
@@ -0,0 +1,498 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/Transforms/Utils/LoopUtils.h - Loop utilities -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines some loop transformation utilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
+#define LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/IVDescriptors.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+
+namespace llvm {
+
+template <typename T> class DomTreeNodeBase;
+using DomTreeNode = DomTreeNodeBase<BasicBlock>;
+class AAResults;
+class AliasSet;
+class AliasSetTracker;
+class BasicBlock;
+class BlockFrequencyInfo;
+class ICFLoopSafetyInfo;
+class IRBuilderBase;
+class Loop;
+class LoopInfo;
+class MemoryAccess;
+class MemorySSA;
+class MemorySSAUpdater;
+class OptimizationRemarkEmitter;
+class PredIteratorCache;
+class ScalarEvolution;
+class SCEV;
+class SCEVExpander;
+class TargetLibraryInfo;
+class LPPassManager;
+class Instruction;
+struct RuntimeCheckingPtrGroup;
+typedef std::pair<const RuntimeCheckingPtrGroup *,
+ const RuntimeCheckingPtrGroup *>
+ RuntimePointerCheck;
+
+template <typename T> class Optional;
+template <typename T, unsigned N> class SmallSetVector;
+template <typename T, unsigned N> class SmallVector;
+template <typename T> class SmallVectorImpl;
+template <typename T, unsigned N> class SmallPriorityWorklist;
+
+BasicBlock *InsertPreheaderForLoop(Loop *L, DominatorTree *DT, LoopInfo *LI,
+ MemorySSAUpdater *MSSAU, bool PreserveLCSSA);
+
+/// Ensure that all exit blocks of the loop are dedicated exits.
+///
+/// For any loop exit block with non-loop predecessors, we split the loop
+/// predecessors to use a dedicated loop exit block. We update the dominator
+/// tree and loop info if provided, and will preserve LCSSA if requested.
+bool formDedicatedExitBlocks(Loop *L, DominatorTree *DT, LoopInfo *LI,
+ MemorySSAUpdater *MSSAU, bool PreserveLCSSA);
+
+/// Ensures LCSSA form for every instruction from the Worklist in the scope of
+/// innermost containing loop.
+///
+/// For the given instruction which have uses outside of the loop, an LCSSA PHI
+/// node is inserted and the uses outside the loop are rewritten to use this
+/// node.
+///
+/// LoopInfo and DominatorTree are required and, since the routine makes no
+/// changes to CFG, preserved.
+///
+/// Returns true if any modifications are made.
+///
+/// This function may introduce unused PHI nodes. If \p PHIsToRemove is not
+/// nullptr, those are added to it (before removing, the caller has to check if
+/// they still do not have any uses). Otherwise the PHIs are directly removed.
+bool formLCSSAForInstructions(
+ SmallVectorImpl<Instruction *> &Worklist, const DominatorTree &DT,
+ const LoopInfo &LI, ScalarEvolution *SE, IRBuilderBase &Builder,
+ SmallVectorImpl<PHINode *> *PHIsToRemove = nullptr);
+
+/// Put loop into LCSSA form.
+///
+/// Looks at all instructions in the loop which have uses outside of the
+/// current loop. For each, an LCSSA PHI node is inserted and the uses outside
+/// the loop are rewritten to use this node. Sub-loops must be in LCSSA form
+/// already.
+///
+/// LoopInfo and DominatorTree are required and preserved.
+///
+/// If ScalarEvolution is passed in, it will be preserved.
+///
+/// Returns true if any modifications are made to the loop.
+bool formLCSSA(Loop &L, const DominatorTree &DT, const LoopInfo *LI,
+ ScalarEvolution *SE);
+
+/// Put a loop nest into LCSSA form.
+///
+/// This recursively forms LCSSA for a loop nest.
+///
+/// LoopInfo and DominatorTree are required and preserved.
+///
+/// If ScalarEvolution is passed in, it will be preserved.
+///
+/// Returns true if any modifications are made to the loop.
+bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI,
+ ScalarEvolution *SE);
+
+/// Flags controlling how much is checked when sinking or hoisting
+/// instructions. The number of memory access in the loop (and whether there
+/// are too many) is determined in the constructors when using MemorySSA.
+class SinkAndHoistLICMFlags {
+public:
+ // Explicitly set limits.
+ SinkAndHoistLICMFlags(unsigned LicmMssaOptCap,
+ unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
+ Loop *L = nullptr, MemorySSA *MSSA = nullptr);
+ // Use default limits.
+ SinkAndHoistLICMFlags(bool IsSink, Loop *L = nullptr,
+ MemorySSA *MSSA = nullptr);
+
+ void setIsSink(bool B) { IsSink = B; }
+ bool getIsSink() { return IsSink; }
+ bool tooManyMemoryAccesses() { return NoOfMemAccTooLarge; }
+ bool tooManyClobberingCalls() { return LicmMssaOptCounter >= LicmMssaOptCap; }
+ void incrementClobberingCalls() { ++LicmMssaOptCounter; }
+
+protected:
+ bool NoOfMemAccTooLarge = false;
+ unsigned LicmMssaOptCounter = 0;
+ unsigned LicmMssaOptCap;
+ unsigned LicmMssaNoAccForPromotionCap;
+ bool IsSink;
+};
+
+/// Walk the specified region of the CFG (defined by all blocks
+/// dominated by the specified block, and that are in the current loop) in
+/// reverse depth first order w.r.t the DominatorTree. This allows us to visit
+/// uses before definitions, allowing us to sink a loop body in one pass without
+/// iteration. Takes DomTreeNode, AAResults, LoopInfo, DominatorTree,
+/// BlockFrequencyInfo, TargetLibraryInfo, Loop, AliasSet information for all
+/// instructions of the loop and loop safety information as
+/// arguments. Diagnostics is emitted via \p ORE. It returns changed status.
+bool sinkRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *,
+ BlockFrequencyInfo *, TargetLibraryInfo *,
+ TargetTransformInfo *, Loop *, AliasSetTracker *,
+ MemorySSAUpdater *, ICFLoopSafetyInfo *,
+ SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *);
+
+/// Walk the specified region of the CFG (defined by all blocks
+/// dominated by the specified block, and that are in the current loop) in depth
+/// first order w.r.t the DominatorTree. This allows us to visit definitions
+/// before uses, allowing us to hoist a loop body in one pass without iteration.
+/// Takes DomTreeNode, AAResults, LoopInfo, DominatorTree,
+/// BlockFrequencyInfo, TargetLibraryInfo, Loop, AliasSet information for all
+/// instructions of the loop and loop safety information as arguments.
+/// Diagnostics is emitted via \p ORE. It returns changed status.
+bool hoistRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *,
+ BlockFrequencyInfo *, TargetLibraryInfo *, Loop *,
+ AliasSetTracker *, MemorySSAUpdater *, ScalarEvolution *,
+ ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &,
+ OptimizationRemarkEmitter *);
+
+/// This function deletes dead loops. The caller of this function needs to
+/// guarantee that the loop is infact dead.
+/// The function requires a bunch or prerequisites to be present:
+/// - The loop needs to be in LCSSA form
+/// - The loop needs to have a Preheader
+/// - A unique dedicated exit block must exist
+///
+/// This also updates the relevant analysis information in \p DT, \p SE, \p LI
+/// and \p MSSA if pointers to those are provided.
+/// It also updates the loop PM if an updater struct is provided.
+
+void deleteDeadLoop(Loop *L, DominatorTree *DT, ScalarEvolution *SE,
+ LoopInfo *LI, MemorySSA *MSSA = nullptr);
+
+/// Remove the backedge of the specified loop. Handles loop nests and general
+/// loop structures subject to the precondition that the loop has no parent
+/// loop and has a single latch block. Preserves all listed analyses.
+void breakLoopBackedge(Loop *L, DominatorTree &DT, ScalarEvolution &SE,
+ LoopInfo &LI, MemorySSA *MSSA);
+
+/// Try to promote memory values to scalars by sinking stores out of
+/// the loop and moving loads to before the loop. We do this by looping over
+/// the stores in the loop, looking for stores to Must pointers which are
+/// loop invariant. It takes a set of must-alias values, Loop exit blocks
+/// vector, loop exit blocks insertion point vector, PredIteratorCache,
+/// LoopInfo, DominatorTree, Loop, AliasSet information for all instructions
+/// of the loop and loop safety information as arguments.
+/// Diagnostics is emitted via \p ORE. It returns changed status.
+bool promoteLoopAccessesToScalars(
+ const SmallSetVector<Value *, 8> &, SmallVectorImpl<BasicBlock *> &,
+ SmallVectorImpl<Instruction *> &, SmallVectorImpl<MemoryAccess *> &,
+ PredIteratorCache &, LoopInfo *, DominatorTree *, const TargetLibraryInfo *,
+ Loop *, AliasSetTracker *, MemorySSAUpdater *, ICFLoopSafetyInfo *,
+ OptimizationRemarkEmitter *);
+
+/// Does a BFS from a given node to all of its children inside a given loop.
+/// The returned vector of nodes includes the starting point.
+SmallVector<DomTreeNode *, 16> collectChildrenInLoop(DomTreeNode *N,
+ const Loop *CurLoop);
+
+/// Returns the instructions that use values defined in the loop.
+SmallVector<Instruction *, 8> findDefsUsedOutsideOfLoop(Loop *L);
+
+/// Find string metadata for loop
+///
+/// If it has a value (e.g. {"llvm.distribute", 1} return the value as an
+/// operand or null otherwise. If the string metadata is not found return
+/// Optional's not-a-value.
+Optional<const MDOperand *> findStringMetadataForLoop(const Loop *TheLoop,
+ StringRef Name);
+
+/// Find named metadata for a loop with an integer value.
+llvm::Optional<int> getOptionalIntLoopAttribute(Loop *TheLoop, StringRef Name);
+
+/// Find a combination of metadata ("llvm.loop.vectorize.width" and
+/// "llvm.loop.vectorize.scalable.enable") for a loop and use it to construct a
+/// ElementCount. If the metadata "llvm.loop.vectorize.width" cannot be found
+/// then None is returned.
+Optional<ElementCount>
+getOptionalElementCountLoopAttribute(Loop *TheLoop);
+
+/// Create a new loop identifier for a loop created from a loop transformation.
+///
+/// @param OrigLoopID The loop ID of the loop before the transformation.
+/// @param FollowupAttrs List of attribute names that contain attributes to be
+/// added to the new loop ID.
+/// @param InheritOptionsAttrsPrefix Selects which attributes should be inherited
+/// from the original loop. The following values
+/// are considered:
+/// nullptr : Inherit all attributes from @p OrigLoopID.
+/// "" : Do not inherit any attribute from @p OrigLoopID; only use
+/// those specified by a followup attribute.
+/// "<prefix>": Inherit all attributes except those which start with
+/// <prefix>; commonly used to remove metadata for the
+/// applied transformation.
+/// @param AlwaysNew If true, do not try to reuse OrigLoopID and never return
+/// None.
+///
+/// @return The loop ID for the after-transformation loop. The following values
+/// can be returned:
+/// None : No followup attribute was found; it is up to the
+/// transformation to choose attributes that make sense.
+/// @p OrigLoopID: The original identifier can be reused.
+/// nullptr : The new loop has no attributes.
+/// MDNode* : A new unique loop identifier.
+Optional<MDNode *>
+makeFollowupLoopID(MDNode *OrigLoopID, ArrayRef<StringRef> FollowupAttrs,
+ const char *InheritOptionsAttrsPrefix = "",
+ bool AlwaysNew = false);
+
+/// Look for the loop attribute that disables all transformation heuristic.
+bool hasDisableAllTransformsHint(const Loop *L);
+
+/// Look for the loop attribute that disables the LICM transformation heuristics.
+bool hasDisableLICMTransformsHint(const Loop *L);
+
+/// Look for the loop attribute that requires progress within the loop.
+bool hasMustProgress(const Loop *L);
+
+/// The mode sets how eager a transformation should be applied.
+enum TransformationMode {
+ /// The pass can use heuristics to determine whether a transformation should
+ /// be applied.
+ TM_Unspecified,
+
+ /// The transformation should be applied without considering a cost model.
+ TM_Enable,
+
+ /// The transformation should not be applied.
+ TM_Disable,
+
+ /// Force is a flag and should not be used alone.
+ TM_Force = 0x04,
+
+ /// The transformation was directed by the user, e.g. by a #pragma in
+ /// the source code. If the transformation could not be applied, a
+ /// warning should be emitted.
+ TM_ForcedByUser = TM_Enable | TM_Force,
+
+ /// The transformation must not be applied. For instance, `#pragma clang loop
+ /// unroll(disable)` explicitly forbids any unrolling to take place. Unlike
+ /// general loop metadata, it must not be dropped. Most passes should not
+ /// behave differently under TM_Disable and TM_SuppressedByUser.
+ TM_SuppressedByUser = TM_Disable | TM_Force
+};
+
+/// @{
+/// Get the mode for LLVM's supported loop transformations.
+TransformationMode hasUnrollTransformation(Loop *L);
+TransformationMode hasUnrollAndJamTransformation(Loop *L);
+TransformationMode hasVectorizeTransformation(Loop *L);
+TransformationMode hasDistributeTransformation(Loop *L);
+TransformationMode hasLICMVersioningTransformation(Loop *L);
+/// @}
+
+/// Set input string into loop metadata by keeping other values intact.
+/// If the string is already in loop metadata update value if it is
+/// different.
+void addStringMetadataToLoop(Loop *TheLoop, const char *MDString,
+ unsigned V = 0);
+
+/// Returns true if Name is applied to TheLoop and enabled.
+bool getBooleanLoopAttribute(const Loop *TheLoop, StringRef Name);
+
+/// Returns a loop's estimated trip count based on branch weight metadata.
+/// In addition if \p EstimatedLoopInvocationWeight is not null it is
+/// initialized with weight of loop's latch leading to the exit.
+/// Returns 0 when the count is estimated to be 0, or None when a meaningful
+/// estimate can not be made.
+Optional<unsigned>
+getLoopEstimatedTripCount(Loop *L,
+ unsigned *EstimatedLoopInvocationWeight = nullptr);
+
+/// Set a loop's branch weight metadata to reflect that loop has \p
+/// EstimatedTripCount iterations and \p EstimatedLoopInvocationWeight exits
+/// through latch. Returns true if metadata is successfully updated, false
+/// otherwise. Note that loop must have a latch block which controls loop exit
+/// in order to succeed.
+bool setLoopEstimatedTripCount(Loop *L, unsigned EstimatedTripCount,
+ unsigned EstimatedLoopInvocationWeight);
+
+/// Check inner loop (L) backedge count is known to be invariant on all
+/// iterations of its outer loop. If the loop has no parent, this is trivially
+/// true.
+bool hasIterationCountInvariantInParent(Loop *L, ScalarEvolution &SE);
+
+/// Helper to consistently add the set of standard passes to a loop pass's \c
+/// AnalysisUsage.
+///
+/// All loop passes should call this as part of implementing their \c
+/// getAnalysisUsage.
+void getLoopAnalysisUsage(AnalysisUsage &AU);
+
+/// Returns true if is legal to hoist or sink this instruction disregarding the
+/// possible introduction of faults. Reasoning about potential faulting
+/// instructions is the responsibility of the caller since it is challenging to
+/// do efficiently from within this routine.
+/// \p TargetExecutesOncePerLoop is true only when it is guaranteed that the
+/// target executes at most once per execution of the loop body. This is used
+/// to assess the legality of duplicating atomic loads. Generally, this is
+/// true when moving out of loop and not true when moving into loops.
+/// If \p ORE is set use it to emit optimization remarks.
+bool canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
+ Loop *CurLoop, AliasSetTracker *CurAST,
+ MemorySSAUpdater *MSSAU, bool TargetExecutesOncePerLoop,
+ SinkAndHoistLICMFlags *LICMFlags = nullptr,
+ OptimizationRemarkEmitter *ORE = nullptr);
+
+/// Returns a Min/Max operation corresponding to MinMaxRecurrenceKind.
+Value *createMinMaxOp(IRBuilderBase &Builder, RecurKind RK, Value *Left,
+ Value *Right);
+
+/// Generates an ordered vector reduction using extracts to reduce the value.
+Value *getOrderedReduction(IRBuilderBase &Builder, Value *Acc, Value *Src,
+ unsigned Op, RecurKind MinMaxKind = RecurKind::None,
+ ArrayRef<Value *> RedOps = None);
+
+/// Generates a vector reduction using shufflevectors to reduce the value.
+/// Fast-math-flags are propagated using the IRBuilder's setting.
+Value *getShuffleReduction(IRBuilderBase &Builder, Value *Src, unsigned Op,
+ RecurKind MinMaxKind = RecurKind::None,
+ ArrayRef<Value *> RedOps = None);
+
+/// Create a target reduction of the given vector. The reduction operation
+/// is described by the \p Opcode parameter. min/max reductions require
+/// additional information supplied in \p RdxKind.
+/// The target is queried to determine if intrinsics or shuffle sequences are
+/// required to implement the reduction.
+/// Fast-math-flags are propagated using the IRBuilder's setting.
+Value *createSimpleTargetReduction(IRBuilderBase &B,
+ const TargetTransformInfo *TTI, Value *Src,
+ RecurKind RdxKind,
+ ArrayRef<Value *> RedOps = None);
+
+/// Create a generic target reduction using a recurrence descriptor \p Desc
+/// The target is queried to determine if intrinsics or shuffle sequences are
+/// required to implement the reduction.
+/// Fast-math-flags are propagated using the RecurrenceDescriptor.
+Value *createTargetReduction(IRBuilderBase &B, const TargetTransformInfo *TTI,
+ RecurrenceDescriptor &Desc, Value *Src);
+
+/// Get the intersection (logical and) of all of the potential IR flags
+/// of each scalar operation (VL) that will be converted into a vector (I).
+/// If OpValue is non-null, we only consider operations similar to OpValue
+/// when intersecting.
+/// Flag set: NSW, NUW, exact, and all of fast-math.
+void propagateIRFlags(Value *I, ArrayRef<Value *> VL, Value *OpValue = nullptr);
+
+/// Returns true if we can prove that \p S is defined and always negative in
+/// loop \p L.
+bool isKnownNegativeInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE);
+
+/// Returns true if we can prove that \p S is defined and always non-negative in
+/// loop \p L.
+bool isKnownNonNegativeInLoop(const SCEV *S, const Loop *L,
+ ScalarEvolution &SE);
+
+/// Returns true if \p S is defined and never is equal to signed/unsigned max.
+bool cannotBeMaxInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
+ bool Signed);
+
+/// Returns true if \p S is defined and never is equal to signed/unsigned min.
+bool cannotBeMinInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
+ bool Signed);
+
+enum ReplaceExitVal { NeverRepl, OnlyCheapRepl, NoHardUse, AlwaysRepl };
+
+/// If the final value of any expressions that are recurrent in the loop can
+/// be computed, substitute the exit values from the loop into any instructions
+/// outside of the loop that use the final values of the current expressions.
+/// Return the number of loop exit values that have been replaced, and the
+/// corresponding phi node will be added to DeadInsts.
+int rewriteLoopExitValues(Loop *L, LoopInfo *LI, TargetLibraryInfo *TLI,
+ ScalarEvolution *SE, const TargetTransformInfo *TTI,
+ SCEVExpander &Rewriter, DominatorTree *DT,
+ ReplaceExitVal ReplaceExitValue,
+ SmallVector<WeakTrackingVH, 16> &DeadInsts);
+
+/// Set weights for \p UnrolledLoop and \p RemainderLoop based on weights for
+/// \p OrigLoop and the following distribution of \p OrigLoop iteration among \p
+/// UnrolledLoop and \p RemainderLoop. \p UnrolledLoop receives weights that
+/// reflect TC/UF iterations, and \p RemainderLoop receives weights that reflect
+/// the remaining TC%UF iterations.
+///
+/// Note that \p OrigLoop may be equal to either \p UnrolledLoop or \p
+/// RemainderLoop in which case weights for \p OrigLoop are updated accordingly.
+/// Note also behavior is undefined if \p UnrolledLoop and \p RemainderLoop are
+/// equal. \p UF must be greater than zero.
+/// If \p OrigLoop has no profile info associated nothing happens.
+///
+/// This utility may be useful for such optimizations as unroller and
+/// vectorizer as it's typical transformation for them.
+void setProfileInfoAfterUnrolling(Loop *OrigLoop, Loop *UnrolledLoop,
+ Loop *RemainderLoop, uint64_t UF);
+
+/// Utility that implements appending of loops onto a worklist given a range.
+/// We want to process loops in postorder, but the worklist is a LIFO data
+/// structure, so we append to it in *reverse* postorder.
+/// For trees, a preorder traversal is a viable reverse postorder, so we
+/// actually append using a preorder walk algorithm.
+template <typename RangeT>
+void appendLoopsToWorklist(RangeT &&, SmallPriorityWorklist<Loop *, 4> &);
+/// Utility that implements appending of loops onto a worklist given a range.
+/// It has the same behavior as appendLoopsToWorklist, but assumes the range of
+/// loops has already been reversed, so it processes loops in the given order.
+template <typename RangeT>
+void appendReversedLoopsToWorklist(RangeT &&,
+ SmallPriorityWorklist<Loop *, 4> &);
+
+/// Utility that implements appending of loops onto a worklist given LoopInfo.
+/// Calls the templated utility taking a Range of loops, handing it the Loops
+/// in LoopInfo, iterated in reverse. This is because the loops are stored in
+/// RPO w.r.t. the control flow graph in LoopInfo. For the purpose of unrolling,
+/// loop deletion, and LICM, we largely want to work forward across the CFG so
+/// that we visit defs before uses and can propagate simplifications from one
+/// loop nest into the next. Calls appendReversedLoopsToWorklist with the
+/// already reversed loops in LI.
+/// FIXME: Consider changing the order in LoopInfo.
+void appendLoopsToWorklist(LoopInfo &, SmallPriorityWorklist<Loop *, 4> &);
+
+/// Recursively clone the specified loop and all of its children,
+/// mapping the blocks with the specified map.
+Loop *cloneLoop(Loop *L, Loop *PL, ValueToValueMapTy &VM,
+ LoopInfo *LI, LPPassManager *LPM);
+
+/// Add code that checks at runtime if the accessed arrays in \p PointerChecks
+/// overlap.
+///
+/// Returns a pair of instructions where the first element is the first
+/// instruction generated in possibly a sequence of instructions and the
+/// second value is the final comparator value or NULL if no check is needed.
+std::pair<Instruction *, Instruction *>
+addRuntimeChecks(Instruction *Loc, Loop *TheLoop,
+ const SmallVectorImpl<RuntimePointerCheck> &PointerChecks,
+ ScalarEvolution *SE);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/LoopVersioning.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/LoopVersioning.h
new file mode 100644
index 0000000000..dd7394a3f0
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/LoopVersioning.h
@@ -0,0 +1,166 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- LoopVersioning.h - Utility to version a loop -------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a utility class to perform loop versioning. The versioned
+// loop speculates that otherwise may-aliasing memory accesses don't overlap and
+// emits checks to prove this.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOOPVERSIONING_H
+#define LLVM_TRANSFORMS_UTILS_LOOPVERSIONING_H
+
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Utils/LoopUtils.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+
+namespace llvm {
+
+class Loop;
+class LoopAccessInfo;
+class LoopInfo;
+struct RuntimeCheckingPtrGroup;
+typedef std::pair<const RuntimeCheckingPtrGroup *,
+ const RuntimeCheckingPtrGroup *>
+ RuntimePointerCheck;
+
+template <typename T> class ArrayRef;
+
+/// This class emits a version of the loop where run-time checks ensure
+/// that may-alias pointers can't overlap.
+///
+/// It currently only supports single-exit loops and assumes that the loop
+/// already has a preheader.
+class LoopVersioning {
+public:
+ /// Expects LoopAccessInfo, Loop, LoopInfo, DominatorTree as input.
+ /// It uses runtime check provided by the user. If \p UseLAIChecks is true,
+ /// we will retain the default checks made by LAI. Otherwise, construct an
+ /// object having no checks and we expect the user to add them.
+ LoopVersioning(const LoopAccessInfo &LAI,
+ ArrayRef<RuntimePointerCheck> Checks, Loop *L, LoopInfo *LI,
+ DominatorTree *DT, ScalarEvolution *SE);
+
+ /// Performs the CFG manipulation part of versioning the loop including
+ /// the DominatorTree and LoopInfo updates.
+ ///
+ /// The loop that was used to construct the class will be the "versioned" loop
+ /// i.e. the loop that will receive control if all the memchecks pass.
+ ///
+ /// This allows the loop transform pass to operate on the same loop regardless
+ /// of whether versioning was necessary or not:
+ ///
+ /// for each loop L:
+ /// analyze L
+ /// if versioning is necessary version L
+ /// transform L
+ void versionLoop() { versionLoop(findDefsUsedOutsideOfLoop(VersionedLoop)); }
+
+ /// Same but if the client has already precomputed the set of values
+ /// used outside the loop, this API will allows passing that.
+ void versionLoop(const SmallVectorImpl<Instruction *> &DefsUsedOutside);
+
+ /// Returns the versioned loop. Control flows here if pointers in the
+ /// loop don't alias (i.e. all memchecks passed). (This loop is actually the
+ /// same as the original loop that we got constructed with.)
+ Loop *getVersionedLoop() { return VersionedLoop; }
+
+ /// Returns the fall-back loop. Control flows here if pointers in the
+ /// loop may alias (i.e. one of the memchecks failed).
+ Loop *getNonVersionedLoop() { return NonVersionedLoop; }
+
+ /// Annotate memory instructions in the versioned loop with no-alias
+ /// metadata based on the memchecks issued.
+ ///
+ /// This is just wrapper that calls prepareNoAliasMetadata and
+ /// annotateInstWithNoAlias on the instructions of the versioned loop.
+ void annotateLoopWithNoAlias();
+
+ /// Set up the aliasing scopes based on the memchecks. This needs to
+ /// be called before the first call to annotateInstWithNoAlias.
+ void prepareNoAliasMetadata();
+
+ /// Add the noalias annotations to \p VersionedInst.
+ ///
+ /// \p OrigInst is the instruction corresponding to \p VersionedInst in the
+ /// original loop. Initialize the aliasing scopes with
+ /// prepareNoAliasMetadata once before this can be called.
+ void annotateInstWithNoAlias(Instruction *VersionedInst,
+ const Instruction *OrigInst);
+
+private:
+ /// Adds the necessary PHI nodes for the versioned loops based on the
+ /// loop-defined values used outside of the loop.
+ ///
+ /// This needs to be called after versionLoop if there are defs in the loop
+ /// that are used outside the loop.
+ void addPHINodes(const SmallVectorImpl<Instruction *> &DefsUsedOutside);
+
+ /// Add the noalias annotations to \p I. Initialize the aliasing
+ /// scopes with prepareNoAliasMetadata once before this can be called.
+ void annotateInstWithNoAlias(Instruction *I) {
+ annotateInstWithNoAlias(I, I);
+ }
+
+ /// The original loop. This becomes the "versioned" one. I.e.,
+ /// control flows here if pointers in the loop don't alias.
+ Loop *VersionedLoop;
+ /// The fall-back loop. I.e. control flows here if pointers in the
+ /// loop may alias (memchecks failed).
+ Loop *NonVersionedLoop;
+
+ /// This maps the instructions from VersionedLoop to their counterpart
+ /// in NonVersionedLoop.
+ ValueToValueMapTy VMap;
+
+ /// The set of alias checks that we are versioning for.
+ SmallVector<RuntimePointerCheck, 4> AliasChecks;
+
+ /// The set of SCEV checks that we are versioning for.
+ const SCEVUnionPredicate &Preds;
+
+ /// Maps a pointer to the pointer checking group that the pointer
+ /// belongs to.
+ DenseMap<const Value *, const RuntimeCheckingPtrGroup *> PtrToGroup;
+
+ /// The alias scope corresponding to a pointer checking group.
+ DenseMap<const RuntimeCheckingPtrGroup *, MDNode *> GroupToScope;
+
+ /// The list of alias scopes that a pointer checking group can't alias.
+ DenseMap<const RuntimeCheckingPtrGroup *, MDNode *>
+ GroupToNonAliasingScopeList;
+
+ /// Analyses used.
+ const LoopAccessInfo &LAI;
+ LoopInfo *LI;
+ DominatorTree *DT;
+ ScalarEvolution *SE;
+};
+
+/// Expose LoopVersioning as a pass. Currently this is only used for
+/// unit-testing. It adds all memchecks necessary to remove all may-aliasing
+/// array accesses from the loop.
+class LoopVersioningPass : public PassInfoMixin<LoopVersioningPass> {
+public:
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
+};
+}
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/LowerInvoke.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/LowerInvoke.h
new file mode 100644
index 0000000000..f88d5b6558
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/LowerInvoke.h
@@ -0,0 +1,40 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- LowerInvoke.h - Eliminate Invoke instructions ----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This transformation is designed for use by code generators which do not yet
+// support stack unwinding. This pass converts 'invoke' instructions to 'call'
+// instructions, so that any exception-handling 'landingpad' blocks become dead
+// code (which can be removed by running the '-simplifycfg' pass afterwards).
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_UTILS_LOWERINVOKE_H
+#define LLVM_TRANSFORMS_UTILS_LOWERINVOKE_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class LowerInvokePass : public PassInfoMixin<LowerInvokePass> {
+public:
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+}
+
+#endif // LLVM_TRANSFORMS_UTILS_LOWERINVOKE_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/LowerMemIntrinsics.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
new file mode 100644
index 0000000000..ff286c0590
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/LowerMemIntrinsics.h
@@ -0,0 +1,66 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/Transforms/Utils/LowerMemIntrinsics.h ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Lower memset, memcpy, memmov intrinsics to loops (e.g. for targets without
+// library support).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOWERMEMINTRINSICS_H
+#define LLVM_TRANSFORMS_UTILS_LOWERMEMINTRINSICS_H
+
+namespace llvm {
+
+class ConstantInt;
+class Instruction;
+class MemCpyInst;
+class MemMoveInst;
+class MemSetInst;
+class TargetTransformInfo;
+class Value;
+struct Align;
+
+/// Emit a loop implementing the semantics of llvm.memcpy where the size is not
+/// a compile-time constant. Loop will be insterted at \p InsertBefore.
+void createMemCpyLoopUnknownSize(Instruction *InsertBefore, Value *SrcAddr,
+ Value *DstAddr, Value *CopyLen,
+ Align SrcAlign, Align DestAlign,
+ bool SrcIsVolatile, bool DstIsVolatile,
+ const TargetTransformInfo &TTI);
+
+/// Emit a loop implementing the semantics of an llvm.memcpy whose size is a
+/// compile time constant. Loop is inserted at \p InsertBefore.
+void createMemCpyLoopKnownSize(Instruction *InsertBefore, Value *SrcAddr,
+ Value *DstAddr, ConstantInt *CopyLen,
+ Align SrcAlign, Align DestAlign,
+ bool SrcIsVolatile, bool DstIsVolatile,
+ const TargetTransformInfo &TTI);
+
+/// Expand \p MemCpy as a loop. \p MemCpy is not deleted.
+void expandMemCpyAsLoop(MemCpyInst *MemCpy, const TargetTransformInfo &TTI);
+
+/// Expand \p MemMove as a loop. \p MemMove is not deleted.
+void expandMemMoveAsLoop(MemMoveInst *MemMove);
+
+/// Expand \p MemSet as a loop. \p MemSet is not deleted.
+void expandMemSetAsLoop(MemSetInst *MemSet);
+
+} // End llvm namespace
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/LowerSwitch.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/LowerSwitch.h
new file mode 100644
index 0000000000..1d177dd618
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/LowerSwitch.h
@@ -0,0 +1,37 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- LowerSwitch.h - Eliminate Switch instructions ----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// The LowerSwitch transformation rewrites switch instructions with a sequence
+// of branches, which allows targets to get away with not implementing the
+// switch instruction until it is convenient.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOWERSWITCH_H
+#define LLVM_TRANSFORMS_UTILS_LOWERSWITCH_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+struct LowerSwitchPass : public PassInfoMixin<LowerSwitchPass> {
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_LOWERSWITCH_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/MatrixUtils.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/MatrixUtils.h
new file mode 100644
index 0000000000..948e51e22b
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/MatrixUtils.h
@@ -0,0 +1,105 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- MatrixUtils.h - Utilities to lower matrix intrinsics -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Utilities for generating tiled loops for matrix operations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_MATRIXUTILS_H
+#define LLVM_TRANSFORMS_UTILS_MATRIXUTILS_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+class DomTreeUpdater;
+class BasicBlock;
+class Value;
+class Loop;
+class LoopInfo;
+class IRBuilderBase;
+
+/// A helper struct to create IR loop nests for tiling in IR of the following
+/// form:
+/// for CurrentColumn = 0..NumColumns
+/// for CurrentRow = 0..NumRows
+/// for CurrentInner = 0..NumInner
+struct TileInfo {
+ /// Number of rows of the matrix.
+ unsigned NumRows;
+
+ /// Number of columns of the matrix.
+ unsigned NumColumns;
+
+ /// Number of columns of the first matrix of a multiply /
+ /// number of rows of the second matrix of a multiply.
+ unsigned NumInner;
+
+ /// Number of rows/columns in a tile.
+ unsigned TileSize = -1;
+
+ /// Start row of the current tile to compute.
+ Value *CurrentRow;
+
+ /// Start column of the current tile to compute.
+ Value *CurrentCol;
+
+ /// Current tile offset during the tile computation.
+ Value *CurrentK;
+
+ /// Header of the outermost loop iterating from 0..NumColumns.
+ BasicBlock *ColumnLoopHeader = nullptr;
+
+ /// Header of the second loop iterating from 0..NumRows.
+ BasicBlock *RowLoopHeader = nullptr;
+ /// Latch of the second loop iterating from 0..NumRows.
+ BasicBlock *RowLoopLatch = nullptr;
+ /// Header of the innermost loop iterating from 0..NumInner.
+ BasicBlock *InnerLoopHeader = nullptr;
+ /// Latch of the innermost loop iterating from 0..NumInner.
+ BasicBlock *InnerLoopLatch = nullptr;
+
+ TileInfo(unsigned NumRows, unsigned NumColumns, unsigned NumInner,
+ unsigned TileSize)
+ : NumRows(NumRows), NumColumns(NumColumns), NumInner(NumInner),
+ TileSize(TileSize) {}
+
+ /// Creates an IR loop nests for tiling of the form below. Returns the block
+ /// for the inner loop body and sets {Column,Row,Inner}LoopHeader/Latch
+ /// fields.
+ ///
+ /// for CurrentColumn = 0..NumColumns
+ /// for CurrentRow = 0..NumRows
+ /// for CurrentInner = 0..NumInner
+ BasicBlock *CreateTiledLoops(BasicBlock *Start, BasicBlock *End,
+ IRBuilderBase &B, DomTreeUpdater &DTU,
+ LoopInfo &LI);
+
+private:
+ /// Creates a new loop with header, body and latch blocks that iterates from
+ /// [0, Bound). Updates \p Preheader to branch to the new header and uses \p
+ /// Exit as exit block. Adds the new loop blocks to \L and applies dominator
+ /// tree updates to \p DTU.
+ static BasicBlock *CreateLoop(BasicBlock *Preheader, BasicBlock *Exit,
+ Value *Bound, Value *Step, StringRef Name,
+ IRBuilderBase &B, DomTreeUpdater &DTU, Loop *L,
+ LoopInfo &LI);
+};
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/Mem2Reg.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/Mem2Reg.h
new file mode 100644
index 0000000000..a26554dd77
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/Mem2Reg.h
@@ -0,0 +1,41 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- Mem2Reg.h - The -mem2reg pass, a wrapper around the Utils lib ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass is a simple pass wrapper around the PromoteMemToReg function call
+// exposed by the Utils library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_MEM2REG_H
+#define LLVM_TRANSFORMS_UTILS_MEM2REG_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+class PromotePass : public PassInfoMixin<PromotePass> {
+public:
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_MEM2REG_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/MetaRenamer.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/MetaRenamer.h
new file mode 100644
index 0000000000..f7aab0a1c5
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/MetaRenamer.h
@@ -0,0 +1,37 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- MetaRenamer.h - Rename everything with metasyntatic names ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass renames everything with metasyntatic names. The intent is to use
+// this pass after bugpoint reduction to conceal the nature of the original
+// program.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_METARENAMER_H
+#define LLVM_TRANSFORMS_UTILS_METARENAMER_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+struct MetaRenamerPass : PassInfoMixin<MetaRenamerPass> {
+ PreservedAnalyses run(Module &, ModuleAnalysisManager &);
+};
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_METARENAMER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/ModuleUtils.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/ModuleUtils.h
new file mode 100644
index 0000000000..72917489a5
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/ModuleUtils.h
@@ -0,0 +1,134 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- ModuleUtils.h - Functions to manipulate Modules ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This family of functions perform manipulations on Modules.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_MODULEUTILS_H
+#define LLVM_TRANSFORMS_UTILS_MODULEUTILS_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include <utility> // for std::pair
+
+namespace llvm {
+
+template <typename T> class ArrayRef;
+class Module;
+class Function;
+class FunctionCallee;
+class GlobalValue;
+class Constant;
+class Value;
+class Type;
+
+/// Append F to the list of global ctors of module M with the given Priority.
+/// This wraps the function in the appropriate structure and stores it along
+/// side other global constructors. For details see
+/// http://llvm.org/docs/LangRef.html#intg_global_ctors
+void appendToGlobalCtors(Module &M, Function *F, int Priority,
+ Constant *Data = nullptr);
+
+/// Same as appendToGlobalCtors(), but for global dtors.
+void appendToGlobalDtors(Module &M, Function *F, int Priority,
+ Constant *Data = nullptr);
+
+FunctionCallee declareSanitizerInitFunction(Module &M, StringRef InitName,
+ ArrayRef<Type *> InitArgTypes);
+
+/// Creates sanitizer constructor function.
+/// \return Returns pointer to constructor.
+Function *createSanitizerCtor(Module &M, StringRef CtorName);
+
+/// Creates sanitizer constructor function, and calls sanitizer's init
+/// function from it.
+/// \return Returns pair of pointers to constructor, and init functions
+/// respectively.
+std::pair<Function *, FunctionCallee> createSanitizerCtorAndInitFunctions(
+ Module &M, StringRef CtorName, StringRef InitName,
+ ArrayRef<Type *> InitArgTypes, ArrayRef<Value *> InitArgs,
+ StringRef VersionCheckName = StringRef());
+
+/// Creates sanitizer constructor function lazily. If a constructor and init
+/// function already exist, this function returns it. Otherwise it calls \c
+/// createSanitizerCtorAndInitFunctions. The FunctionsCreatedCallback is invoked
+/// in that case, passing the new Ctor and Init function.
+///
+/// \return Returns pair of pointers to constructor, and init functions
+/// respectively.
+std::pair<Function *, FunctionCallee> getOrCreateSanitizerCtorAndInitFunctions(
+ Module &M, StringRef CtorName, StringRef InitName,
+ ArrayRef<Type *> InitArgTypes, ArrayRef<Value *> InitArgs,
+ function_ref<void(Function *, FunctionCallee)> FunctionsCreatedCallback,
+ StringRef VersionCheckName = StringRef());
+
+// Creates and returns a sanitizer init function without argument if it doesn't
+// exist, and adds it to the global constructors list. Otherwise it returns the
+// existing function.
+Function *getOrCreateInitFunction(Module &M, StringRef Name);
+
+/// Rename all the anon globals in the module using a hash computed from
+/// the list of public globals in the module.
+bool nameUnamedGlobals(Module &M);
+
+/// Adds global values to the llvm.used list.
+void appendToUsed(Module &M, ArrayRef<GlobalValue *> Values);
+
+/// Adds global values to the llvm.compiler.used list.
+void appendToCompilerUsed(Module &M, ArrayRef<GlobalValue *> Values);
+
+/// Filter out potentially dead comdat functions where other entries keep the
+/// entire comdat group alive.
+///
+/// This is designed for cases where functions appear to become dead but remain
+/// alive due to other live entries in their comdat group.
+///
+/// The \p DeadComdatFunctions container should only have pointers to
+/// `Function`s which are members of a comdat group and are believed to be
+/// dead.
+///
+/// After this routine finishes, the only remaining `Function`s in \p
+/// DeadComdatFunctions are those where every member of the comdat is listed
+/// and thus removing them is safe (provided *all* are removed).
+void filterDeadComdatFunctions(
+ Module &M, SmallVectorImpl<Function *> &DeadComdatFunctions);
+
+/// Produce a unique identifier for this module by taking the MD5 sum of
+/// the names of the module's strong external symbols that are not comdat
+/// members.
+///
+/// This identifier is normally guaranteed to be unique, or the program would
+/// fail to link due to multiply defined symbols.
+///
+/// If the module has no strong external symbols (such a module may still have a
+/// semantic effect if it performs global initialization), we cannot produce a
+/// unique identifier for this module, so we return the empty string.
+std::string getUniqueModuleId(Module *M);
+
+class CallInst;
+namespace VFABI {
+/// Overwrite the Vector Function ABI variants attribute with the names provide
+/// in \p VariantMappings.
+void setVectorVariantNames(CallInst *CI,
+ const SmallVector<std::string, 8> &VariantMappings);
+} // End VFABI namespace
+} // End llvm namespace
+
+#endif // LLVM_TRANSFORMS_UTILS_MODULEUTILS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/NameAnonGlobals.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/NameAnonGlobals.h
new file mode 100644
index 0000000000..53b721f0cf
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/NameAnonGlobals.h
@@ -0,0 +1,43 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- NameAnonGlobals.h - Anonymous Global Naming Pass --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements naming anonymous globals to make sure they can be
+// referred to by ThinLTO.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_NAMEANONGLOBALSS_H
+#define LLVM_TRANSFORMS_UTILS_NAMEANONGLOBALSS_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Simple pass that provides a name to every anonymous globals.
+class NameAnonGlobalPass : public PassInfoMixin<NameAnonGlobalPass> {
+public:
+ NameAnonGlobalPass() = default;
+
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_NAMEANONGLOBALS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/PredicateInfo.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/PredicateInfo.h
new file mode 100644
index 0000000000..ced45bb3bd
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/PredicateInfo.h
@@ -0,0 +1,252 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- PredicateInfo.h - Build PredicateInfo ----------------------*-C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the PredicateInfo analysis, which creates an Extended
+/// SSA form for operations used in branch comparisons and llvm.assume
+/// comparisons.
+///
+/// Copies of these operations are inserted into the true/false edge (and after
+/// assumes), and information attached to the copies. All uses of the original
+/// operation in blocks dominated by the true/false edge (and assume), are
+/// replaced with uses of the copies. This enables passes to easily and sparsely
+/// propagate condition based info into the operations that may be affected.
+///
+/// Example:
+/// %cmp = icmp eq i32 %x, 50
+/// br i1 %cmp, label %true, label %false
+/// true:
+/// ret i32 %x
+/// false:
+/// ret i32 1
+///
+/// will become
+///
+/// %cmp = icmp eq i32, %x, 50
+/// br i1 %cmp, label %true, label %false
+/// true:
+/// %x.0 = call \@llvm.ssa_copy.i32(i32 %x)
+/// ret i32 %x.0
+/// false:
+/// ret i32 1
+///
+/// Using getPredicateInfoFor on x.0 will give you the comparison it is
+/// dominated by (the icmp), and that you are located in the true edge of that
+/// comparison, which tells you x.0 is 50.
+///
+/// In order to reduce the number of copies inserted, predicateinfo is only
+/// inserted where it would actually be live. This means if there are no uses of
+/// an operation dominated by the branch edges, or by an assume, the associated
+/// predicate info is never inserted.
+///
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_PREDICATEINFO_H
+#define LLVM_TRANSFORMS_UTILS_PREDICATEINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+class AssumptionCache;
+class DominatorTree;
+class Function;
+class IntrinsicInst;
+class raw_ostream;
+
+enum PredicateType { PT_Branch, PT_Assume, PT_Switch };
+
+/// Constraint for a predicate of the form "cmp Pred Op, OtherOp", where Op
+/// is the value the constraint applies to (the ssa.copy result).
+struct PredicateConstraint {
+ CmpInst::Predicate Predicate;
+ Value *OtherOp;
+};
+
+// Base class for all predicate information we provide.
+// All of our predicate information has at least a comparison.
+class PredicateBase : public ilist_node<PredicateBase> {
+public:
+ PredicateType Type;
+ // The original operand before we renamed it.
+ // This can be use by passes, when destroying predicateinfo, to know
+ // whether they can just drop the intrinsic, or have to merge metadata.
+ Value *OriginalOp;
+ // The renamed operand in the condition used for this predicate. For nested
+ // predicates, this is different to OriginalOp which refers to the initial
+ // operand.
+ Value *RenamedOp;
+ // The condition associated with this predicate.
+ Value *Condition;
+
+ PredicateBase(const PredicateBase &) = delete;
+ PredicateBase &operator=(const PredicateBase &) = delete;
+ PredicateBase() = delete;
+ virtual ~PredicateBase() = default;
+ static bool classof(const PredicateBase *PB) {
+ return PB->Type == PT_Assume || PB->Type == PT_Branch ||
+ PB->Type == PT_Switch;
+ }
+
+ /// Fetch condition in the form of PredicateConstraint, if possible.
+ Optional<PredicateConstraint> getConstraint() const;
+
+protected:
+ PredicateBase(PredicateType PT, Value *Op, Value *Condition)
+ : Type(PT), OriginalOp(Op), Condition(Condition) {}
+};
+
+// Provides predicate information for assumes. Since assumes are always true,
+// we simply provide the assume instruction, so you can tell your relative
+// position to it.
+class PredicateAssume : public PredicateBase {
+public:
+ IntrinsicInst *AssumeInst;
+ PredicateAssume(Value *Op, IntrinsicInst *AssumeInst, Value *Condition)
+ : PredicateBase(PT_Assume, Op, Condition), AssumeInst(AssumeInst) {}
+ PredicateAssume() = delete;
+ static bool classof(const PredicateBase *PB) {
+ return PB->Type == PT_Assume;
+ }
+};
+
+// Mixin class for edge predicates. The FROM block is the block where the
+// predicate originates, and the TO block is the block where the predicate is
+// valid.
+class PredicateWithEdge : public PredicateBase {
+public:
+ BasicBlock *From;
+ BasicBlock *To;
+ PredicateWithEdge() = delete;
+ static bool classof(const PredicateBase *PB) {
+ return PB->Type == PT_Branch || PB->Type == PT_Switch;
+ }
+
+protected:
+ PredicateWithEdge(PredicateType PType, Value *Op, BasicBlock *From,
+ BasicBlock *To, Value *Cond)
+ : PredicateBase(PType, Op, Cond), From(From), To(To) {}
+};
+
+// Provides predicate information for branches.
+class PredicateBranch : public PredicateWithEdge {
+public:
+ // If true, SplitBB is the true successor, otherwise it's the false successor.
+ bool TrueEdge;
+ PredicateBranch(Value *Op, BasicBlock *BranchBB, BasicBlock *SplitBB,
+ Value *Condition, bool TakenEdge)
+ : PredicateWithEdge(PT_Branch, Op, BranchBB, SplitBB, Condition),
+ TrueEdge(TakenEdge) {}
+ PredicateBranch() = delete;
+ static bool classof(const PredicateBase *PB) {
+ return PB->Type == PT_Branch;
+ }
+};
+
+class PredicateSwitch : public PredicateWithEdge {
+public:
+ Value *CaseValue;
+ // This is the switch instruction.
+ SwitchInst *Switch;
+ PredicateSwitch(Value *Op, BasicBlock *SwitchBB, BasicBlock *TargetBB,
+ Value *CaseValue, SwitchInst *SI)
+ : PredicateWithEdge(PT_Switch, Op, SwitchBB, TargetBB,
+ SI->getCondition()),
+ CaseValue(CaseValue), Switch(SI) {}
+ PredicateSwitch() = delete;
+ static bool classof(const PredicateBase *PB) {
+ return PB->Type == PT_Switch;
+ }
+};
+
+/// Encapsulates PredicateInfo, including all data associated with memory
+/// accesses.
+class PredicateInfo {
+public:
+ PredicateInfo(Function &, DominatorTree &, AssumptionCache &);
+ ~PredicateInfo();
+
+ void verifyPredicateInfo() const;
+
+ void dump() const;
+ void print(raw_ostream &) const;
+
+ const PredicateBase *getPredicateInfoFor(const Value *V) const {
+ return PredicateMap.lookup(V);
+ }
+
+protected:
+ // Used by PredicateInfo annotater, dumpers, and wrapper pass.
+ friend class PredicateInfoAnnotatedWriter;
+ friend class PredicateInfoPrinterLegacyPass;
+ friend class PredicateInfoBuilder;
+
+private:
+ Function &F;
+
+ // This owns the all the predicate infos in the function, placed or not.
+ iplist<PredicateBase> AllInfos;
+
+ // This maps from copy operands to Predicate Info. Note that it does not own
+ // the Predicate Info, they belong to the ValueInfo structs in the ValueInfos
+ // vector.
+ DenseMap<const Value *, const PredicateBase *> PredicateMap;
+ // The set of ssa_copy declarations we created with our custom mangling.
+ SmallSet<AssertingVH<Function>, 20> CreatedDeclarations;
+};
+
+// This pass does eager building and then printing of PredicateInfo. It is used
+// by
+// the tests to be able to build, dump, and verify PredicateInfo.
+class PredicateInfoPrinterLegacyPass : public FunctionPass {
+public:
+ PredicateInfoPrinterLegacyPass();
+
+ static char ID;
+ bool runOnFunction(Function &) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+/// Printer pass for \c PredicateInfo.
+class PredicateInfoPrinterPass
+ : public PassInfoMixin<PredicateInfoPrinterPass> {
+ raw_ostream &OS;
+
+public:
+ explicit PredicateInfoPrinterPass(raw_ostream &OS) : OS(OS) {}
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+/// Verifier pass for \c PredicateInfo.
+struct PredicateInfoVerifierPass : PassInfoMixin<PredicateInfoVerifierPass> {
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_PREDICATEINFO_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/PromoteMemToReg.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/PromoteMemToReg.h
new file mode 100644
index 0000000000..1aef4da123
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/PromoteMemToReg.h
@@ -0,0 +1,55 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- PromoteMemToReg.h - Promote Allocas to Scalars -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes an interface to promote alloca instructions to SSA
+// registers, by using the SSA construction algorithm.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_PROMOTEMEMTOREG_H
+#define LLVM_TRANSFORMS_UTILS_PROMOTEMEMTOREG_H
+
+namespace llvm {
+
+template <typename T> class ArrayRef;
+class AllocaInst;
+class DominatorTree;
+class AssumptionCache;
+
+/// Return true if this alloca is legal for promotion.
+///
+/// This is true if there are only loads, stores, and lifetime markers
+/// (transitively) using this alloca. This also enforces that there is only
+/// ever one layer of bitcasts or GEPs between the alloca and the lifetime
+/// markers.
+bool isAllocaPromotable(const AllocaInst *AI);
+
+/// Promote the specified list of alloca instructions into scalar
+/// registers, inserting PHI nodes as appropriate.
+///
+/// This function makes use of DominanceFrontier information. This function
+/// does not modify the CFG of the function at all. All allocas must be from
+/// the same function.
+///
+void PromoteMemToReg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT,
+ AssumptionCache *AC = nullptr);
+
+} // End llvm namespace
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/SSAUpdater.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/SSAUpdater.h
new file mode 100644
index 0000000000..dbe889fb75
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/SSAUpdater.h
@@ -0,0 +1,187 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- SSAUpdater.h - Unstructured SSA Update Tool --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SSAUpdater class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SSAUPDATER_H
+#define LLVM_TRANSFORMS_UTILS_SSAUPDATER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include <string>
+
+namespace llvm {
+
+class BasicBlock;
+class Instruction;
+class LoadInst;
+class PHINode;
+template <typename T> class SmallVectorImpl;
+template <typename T> class SSAUpdaterTraits;
+class Type;
+class Use;
+class Value;
+
+/// Helper class for SSA formation on a set of values defined in
+/// multiple blocks.
+///
+/// This is used when code duplication or another unstructured
+/// transformation wants to rewrite a set of uses of one value with uses of a
+/// set of values.
+class SSAUpdater {
+ friend class SSAUpdaterTraits<SSAUpdater>;
+
+private:
+ /// This keeps track of which value to use on a per-block basis. When we
+ /// insert PHI nodes, we keep track of them here.
+ void *AV = nullptr;
+
+ /// ProtoType holds the type of the values being rewritten.
+ Type *ProtoType = nullptr;
+
+ /// PHI nodes are given a name based on ProtoName.
+ std::string ProtoName;
+
+ /// If this is non-null, the SSAUpdater adds all PHI nodes that it creates to
+ /// the vector.
+ SmallVectorImpl<PHINode *> *InsertedPHIs;
+
+public:
+ /// If InsertedPHIs is specified, it will be filled
+ /// in with all PHI Nodes created by rewriting.
+ explicit SSAUpdater(SmallVectorImpl<PHINode *> *InsertedPHIs = nullptr);
+ SSAUpdater(const SSAUpdater &) = delete;
+ SSAUpdater &operator=(const SSAUpdater &) = delete;
+ ~SSAUpdater();
+
+ /// Reset this object to get ready for a new set of SSA updates with
+ /// type 'Ty'.
+ ///
+ /// PHI nodes get a name based on 'Name'.
+ void Initialize(Type *Ty, StringRef Name);
+
+ /// Indicate that a rewritten value is available in the specified block
+ /// with the specified value.
+ void AddAvailableValue(BasicBlock *BB, Value *V);
+
+ /// Return true if the SSAUpdater already has a value for the specified
+ /// block.
+ bool HasValueForBlock(BasicBlock *BB) const;
+
+ /// Return the value for the specified block if the SSAUpdater has one,
+ /// otherwise return nullptr.
+ Value *FindValueForBlock(BasicBlock *BB) const;
+
+ /// Construct SSA form, materializing a value that is live at the end
+ /// of the specified block.
+ Value *GetValueAtEndOfBlock(BasicBlock *BB);
+
+ /// Construct SSA form, materializing a value that is live in the
+ /// middle of the specified block.
+ ///
+ /// \c GetValueInMiddleOfBlock is the same as \c GetValueAtEndOfBlock except
+ /// in one important case: if there is a definition of the rewritten value
+ /// after the 'use' in BB. Consider code like this:
+ ///
+ /// \code
+ /// X1 = ...
+ /// SomeBB:
+ /// use(X)
+ /// X2 = ...
+ /// br Cond, SomeBB, OutBB
+ /// \endcode
+ ///
+ /// In this case, there are two values (X1 and X2) added to the AvailableVals
+ /// set by the client of the rewriter, and those values are both live out of
+ /// their respective blocks. However, the use of X happens in the *middle* of
+ /// a block. Because of this, we need to insert a new PHI node in SomeBB to
+ /// merge the appropriate values, and this value isn't live out of the block.
+ Value *GetValueInMiddleOfBlock(BasicBlock *BB);
+
+ /// Rewrite a use of the symbolic value.
+ ///
+ /// This handles PHI nodes, which use their value in the corresponding
+ /// predecessor. Note that this will not work if the use is supposed to be
+ /// rewritten to a value defined in the same block as the use, but above it.
+ /// Any 'AddAvailableValue's added for the use's block will be considered to
+ /// be below it.
+ void RewriteUse(Use &U);
+
+ /// Rewrite a use like \c RewriteUse but handling in-block definitions.
+ ///
+ /// This version of the method can rewrite uses in the same block as
+ /// a definition, because it assumes that all uses of a value are below any
+ /// inserted values.
+ void RewriteUseAfterInsertions(Use &U);
+
+private:
+ Value *GetValueAtEndOfBlockInternal(BasicBlock *BB);
+};
+
+/// Helper class for promoting a collection of loads and stores into SSA
+/// Form using the SSAUpdater.
+///
+/// This handles complexities that SSAUpdater doesn't, such as multiple loads
+/// and stores in one block.
+///
+/// Clients of this class are expected to subclass this and implement the
+/// virtual methods.
+class LoadAndStorePromoter {
+protected:
+ SSAUpdater &SSA;
+
+public:
+ LoadAndStorePromoter(ArrayRef<const Instruction *> Insts,
+ SSAUpdater &S, StringRef Name = StringRef());
+ virtual ~LoadAndStorePromoter() = default;
+
+ /// This does the promotion.
+ ///
+ /// Insts is a list of loads and stores to promote, and Name is the basename
+ /// for the PHIs to insert. After this is complete, the loads and stores are
+ /// removed from the code.
+ void run(const SmallVectorImpl<Instruction *> &Insts);
+
+ /// Return true if the specified instruction is in the Inst list.
+ ///
+ /// The Insts list is the one passed into the constructor. Clients should
+ /// implement this with a more efficient version if possible.
+ virtual bool isInstInList(Instruction *I,
+ const SmallVectorImpl<Instruction *> &Insts) const;
+
+ /// This hook is invoked after all the stores are found and inserted as
+ /// available values.
+ virtual void doExtraRewritesBeforeFinalDeletion() {}
+
+ /// Clients can choose to implement this to get notified right before
+ /// a load is RAUW'd another value.
+ virtual void replaceLoadWithValue(LoadInst *LI, Value *V) const {}
+
+ /// Called before each instruction is deleted.
+ virtual void instructionDeleted(Instruction *I) const {}
+
+ /// Called to update debug info associated with the instruction.
+ virtual void updateDebugInfo(Instruction *I) const {}
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_SSAUPDATER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/SSAUpdaterBulk.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/SSAUpdaterBulk.h
new file mode 100644
index 0000000000..eb85251f1a
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/SSAUpdaterBulk.h
@@ -0,0 +1,101 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- SSAUpdaterBulk.h - Unstructured SSA Update Tool ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SSAUpdaterBulk class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SSAUPDATERBULK_H
+#define LLVM_TRANSFORMS_UTILS_SSAUPDATERBULK_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/PredIteratorCache.h"
+
+namespace llvm {
+
+class BasicBlock;
+class PHINode;
+template <typename T> class SmallVectorImpl;
+class Type;
+class Use;
+class Value;
+class DominatorTree;
+
+/// Helper class for SSA formation on a set of values defined in multiple
+/// blocks.
+///
+/// This is used when code duplication or another unstructured transformation
+/// wants to rewrite a set of uses of one value with uses of a set of values.
+/// The update is done only when RewriteAllUses is called, all other methods are
+/// used for book-keeping. That helps to share some common computations between
+/// updates of different uses (which is not the case when traditional SSAUpdater
+/// is used).
+class SSAUpdaterBulk {
+ struct RewriteInfo {
+ DenseMap<BasicBlock *, Value *> Defines;
+ SmallVector<Use *, 4> Uses;
+ StringRef Name;
+ Type *Ty;
+ RewriteInfo(){};
+ RewriteInfo(StringRef &N, Type *T) : Name(N), Ty(T){};
+ };
+ SmallVector<RewriteInfo, 4> Rewrites;
+
+ PredIteratorCache PredCache;
+
+ Value *computeValueAt(BasicBlock *BB, RewriteInfo &R, DominatorTree *DT);
+
+public:
+ explicit SSAUpdaterBulk(){};
+ SSAUpdaterBulk(const SSAUpdaterBulk &) = delete;
+ SSAUpdaterBulk &operator=(const SSAUpdaterBulk &) = delete;
+ ~SSAUpdaterBulk(){};
+
+ /// Add a new variable to the SSA rewriter. This needs to be called before
+ /// AddAvailableValue or AddUse calls. The return value is the variable ID,
+ /// which needs to be passed to AddAvailableValue and AddUse.
+ unsigned AddVariable(StringRef Name, Type *Ty);
+
+ /// Indicate that a rewritten value is available in the specified block with
+ /// the specified value.
+ void AddAvailableValue(unsigned Var, BasicBlock *BB, Value *V);
+
+ /// Record a use of the symbolic value. This use will be updated with a
+ /// rewritten value when RewriteAllUses is called.
+ void AddUse(unsigned Var, Use *U);
+
+ /// Return true if the SSAUpdater already has a value for the specified
+ /// variable in the specified block.
+ bool HasValueForBlock(unsigned Var, BasicBlock *BB);
+
+ /// Perform all the necessary updates, including new PHI-nodes insertion and
+ /// the requested uses update.
+ ///
+ /// The function requires dominator tree DT, which is used for computing
+ /// locations for new phi-nodes insertions. If a nonnull pointer to a vector
+ /// InsertedPHIs is passed, all the new phi-nodes will be added to this
+ /// vector.
+ void RewriteAllUses(DominatorTree *DT,
+ SmallVectorImpl<PHINode *> *InsertedPHIs = nullptr);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_SSAUPDATERBULK_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/SSAUpdaterImpl.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
new file mode 100644
index 0000000000..473e0c2ec2
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
@@ -0,0 +1,478 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- SSAUpdaterImpl.h - SSA Updater Implementation ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides a template that implements the core algorithm for the
+// SSAUpdater and MachineSSAUpdater.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
+#define LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "ssaupdater"
+
+namespace llvm {
+
+template<typename T> class SSAUpdaterTraits;
+
+template<typename UpdaterT>
+class SSAUpdaterImpl {
+private:
+ UpdaterT *Updater;
+
+ using Traits = SSAUpdaterTraits<UpdaterT>;
+ using BlkT = typename Traits::BlkT;
+ using ValT = typename Traits::ValT;
+ using PhiT = typename Traits::PhiT;
+
+ /// BBInfo - Per-basic block information used internally by SSAUpdaterImpl.
+ /// The predecessors of each block are cached here since pred_iterator is
+ /// slow and we need to iterate over the blocks at least a few times.
+ class BBInfo {
+ public:
+ // Back-pointer to the corresponding block.
+ BlkT *BB;
+
+ // Value to use in this block.
+ ValT AvailableVal;
+
+ // Block that defines the available value.
+ BBInfo *DefBB;
+
+ // Postorder number.
+ int BlkNum = 0;
+
+ // Immediate dominator.
+ BBInfo *IDom = nullptr;
+
+ // Number of predecessor blocks.
+ unsigned NumPreds = 0;
+
+ // Array[NumPreds] of predecessor blocks.
+ BBInfo **Preds = nullptr;
+
+ // Marker for existing PHIs that match.
+ PhiT *PHITag = nullptr;
+
+ BBInfo(BlkT *ThisBB, ValT V)
+ : BB(ThisBB), AvailableVal(V), DefBB(V ? this : nullptr) {}
+ };
+
+ using AvailableValsTy = DenseMap<BlkT *, ValT>;
+
+ AvailableValsTy *AvailableVals;
+
+ SmallVectorImpl<PhiT *> *InsertedPHIs;
+
+ using BlockListTy = SmallVectorImpl<BBInfo *>;
+ using BBMapTy = DenseMap<BlkT *, BBInfo *>;
+
+ BBMapTy BBMap;
+ BumpPtrAllocator Allocator;
+
+public:
+ explicit SSAUpdaterImpl(UpdaterT *U, AvailableValsTy *A,
+ SmallVectorImpl<PhiT *> *Ins) :
+ Updater(U), AvailableVals(A), InsertedPHIs(Ins) {}
+
+ /// GetValue - Check to see if AvailableVals has an entry for the specified
+ /// BB and if so, return it. If not, construct SSA form by first
+ /// calculating the required placement of PHIs and then inserting new PHIs
+ /// where needed.
+ ValT GetValue(BlkT *BB) {
+ SmallVector<BBInfo *, 100> BlockList;
+ BBInfo *PseudoEntry = BuildBlockList(BB, &BlockList);
+
+ // Special case: bail out if BB is unreachable.
+ if (BlockList.size() == 0) {
+ ValT V = Traits::GetUndefVal(BB, Updater);
+ (*AvailableVals)[BB] = V;
+ return V;
+ }
+
+ FindDominators(&BlockList, PseudoEntry);
+ FindPHIPlacement(&BlockList);
+ FindAvailableVals(&BlockList);
+
+ return BBMap[BB]->DefBB->AvailableVal;
+ }
+
+ /// BuildBlockList - Starting from the specified basic block, traverse back
+ /// through its predecessors until reaching blocks with known values.
+ /// Create BBInfo structures for the blocks and append them to the block
+ /// list.
+ BBInfo *BuildBlockList(BlkT *BB, BlockListTy *BlockList) {
+ SmallVector<BBInfo *, 10> RootList;
+ SmallVector<BBInfo *, 64> WorkList;
+
+ BBInfo *Info = new (Allocator) BBInfo(BB, 0);
+ BBMap[BB] = Info;
+ WorkList.push_back(Info);
+
+ // Search backward from BB, creating BBInfos along the way and stopping
+ // when reaching blocks that define the value. Record those defining
+ // blocks on the RootList.
+ SmallVector<BlkT *, 10> Preds;
+ while (!WorkList.empty()) {
+ Info = WorkList.pop_back_val();
+ Preds.clear();
+ Traits::FindPredecessorBlocks(Info->BB, &Preds);
+ Info->NumPreds = Preds.size();
+ if (Info->NumPreds == 0)
+ Info->Preds = nullptr;
+ else
+ Info->Preds = static_cast<BBInfo **>(Allocator.Allocate(
+ Info->NumPreds * sizeof(BBInfo *), alignof(BBInfo *)));
+
+ for (unsigned p = 0; p != Info->NumPreds; ++p) {
+ BlkT *Pred = Preds[p];
+ // Check if BBMap already has a BBInfo for the predecessor block.
+ typename BBMapTy::value_type &BBMapBucket =
+ BBMap.FindAndConstruct(Pred);
+ if (BBMapBucket.second) {
+ Info->Preds[p] = BBMapBucket.second;
+ continue;
+ }
+
+ // Create a new BBInfo for the predecessor.
+ ValT PredVal = AvailableVals->lookup(Pred);
+ BBInfo *PredInfo = new (Allocator) BBInfo(Pred, PredVal);
+ BBMapBucket.second = PredInfo;
+ Info->Preds[p] = PredInfo;
+
+ if (PredInfo->AvailableVal) {
+ RootList.push_back(PredInfo);
+ continue;
+ }
+ WorkList.push_back(PredInfo);
+ }
+ }
+
+ // Now that we know what blocks are backwards-reachable from the starting
+ // block, do a forward depth-first traversal to assign postorder numbers
+ // to those blocks.
+ BBInfo *PseudoEntry = new (Allocator) BBInfo(nullptr, 0);
+ unsigned BlkNum = 1;
+
+ // Initialize the worklist with the roots from the backward traversal.
+ while (!RootList.empty()) {
+ Info = RootList.pop_back_val();
+ Info->IDom = PseudoEntry;
+ Info->BlkNum = -1;
+ WorkList.push_back(Info);
+ }
+
+ while (!WorkList.empty()) {
+ Info = WorkList.back();
+
+ if (Info->BlkNum == -2) {
+ // All the successors have been handled; assign the postorder number.
+ Info->BlkNum = BlkNum++;
+ // If not a root, put it on the BlockList.
+ if (!Info->AvailableVal)
+ BlockList->push_back(Info);
+ WorkList.pop_back();
+ continue;
+ }
+
+ // Leave this entry on the worklist, but set its BlkNum to mark that its
+ // successors have been put on the worklist. When it returns to the top
+ // the list, after handling its successors, it will be assigned a
+ // number.
+ Info->BlkNum = -2;
+
+ // Add unvisited successors to the work list.
+ for (typename Traits::BlkSucc_iterator SI =
+ Traits::BlkSucc_begin(Info->BB),
+ E = Traits::BlkSucc_end(Info->BB); SI != E; ++SI) {
+ BBInfo *SuccInfo = BBMap[*SI];
+ if (!SuccInfo || SuccInfo->BlkNum)
+ continue;
+ SuccInfo->BlkNum = -1;
+ WorkList.push_back(SuccInfo);
+ }
+ }
+ PseudoEntry->BlkNum = BlkNum;
+ return PseudoEntry;
+ }
+
+ /// IntersectDominators - This is the dataflow lattice "meet" operation for
+ /// finding dominators. Given two basic blocks, it walks up the dominator
+ /// tree until it finds a common dominator of both. It uses the postorder
+ /// number of the blocks to determine how to do that.
+ BBInfo *IntersectDominators(BBInfo *Blk1, BBInfo *Blk2) {
+ while (Blk1 != Blk2) {
+ while (Blk1->BlkNum < Blk2->BlkNum) {
+ Blk1 = Blk1->IDom;
+ if (!Blk1)
+ return Blk2;
+ }
+ while (Blk2->BlkNum < Blk1->BlkNum) {
+ Blk2 = Blk2->IDom;
+ if (!Blk2)
+ return Blk1;
+ }
+ }
+ return Blk1;
+ }
+
+ /// FindDominators - Calculate the dominator tree for the subset of the CFG
+ /// corresponding to the basic blocks on the BlockList. This uses the
+ /// algorithm from: "A Simple, Fast Dominance Algorithm" by Cooper, Harvey
+ /// and Kennedy, published in Software--Practice and Experience, 2001,
+ /// 4:1-10. Because the CFG subset does not include any edges leading into
+ /// blocks that define the value, the results are not the usual dominator
+ /// tree. The CFG subset has a single pseudo-entry node with edges to a set
+ /// of root nodes for blocks that define the value. The dominators for this
+ /// subset CFG are not the standard dominators but they are adequate for
+ /// placing PHIs within the subset CFG.
+ void FindDominators(BlockListTy *BlockList, BBInfo *PseudoEntry) {
+ bool Changed;
+ do {
+ Changed = false;
+ // Iterate over the list in reverse order, i.e., forward on CFG edges.
+ for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
+ E = BlockList->rend(); I != E; ++I) {
+ BBInfo *Info = *I;
+ BBInfo *NewIDom = nullptr;
+
+ // Iterate through the block's predecessors.
+ for (unsigned p = 0; p != Info->NumPreds; ++p) {
+ BBInfo *Pred = Info->Preds[p];
+
+ // Treat an unreachable predecessor as a definition with 'undef'.
+ if (Pred->BlkNum == 0) {
+ Pred->AvailableVal = Traits::GetUndefVal(Pred->BB, Updater);
+ (*AvailableVals)[Pred->BB] = Pred->AvailableVal;
+ Pred->DefBB = Pred;
+ Pred->BlkNum = PseudoEntry->BlkNum;
+ PseudoEntry->BlkNum++;
+ }
+
+ if (!NewIDom)
+ NewIDom = Pred;
+ else
+ NewIDom = IntersectDominators(NewIDom, Pred);
+ }
+
+ // Check if the IDom value has changed.
+ if (NewIDom && NewIDom != Info->IDom) {
+ Info->IDom = NewIDom;
+ Changed = true;
+ }
+ }
+ } while (Changed);
+ }
+
+ /// IsDefInDomFrontier - Search up the dominator tree from Pred to IDom for
+ /// any blocks containing definitions of the value. If one is found, then
+ /// the successor of Pred is in the dominance frontier for the definition,
+ /// and this function returns true.
+ bool IsDefInDomFrontier(const BBInfo *Pred, const BBInfo *IDom) {
+ for (; Pred != IDom; Pred = Pred->IDom) {
+ if (Pred->DefBB == Pred)
+ return true;
+ }
+ return false;
+ }
+
+ /// FindPHIPlacement - PHIs are needed in the iterated dominance frontiers
+ /// of the known definitions. Iteratively add PHIs in the dom frontiers
+ /// until nothing changes. Along the way, keep track of the nearest
+ /// dominating definitions for non-PHI blocks.
+ void FindPHIPlacement(BlockListTy *BlockList) {
+ bool Changed;
+ do {
+ Changed = false;
+ // Iterate over the list in reverse order, i.e., forward on CFG edges.
+ for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
+ E = BlockList->rend(); I != E; ++I) {
+ BBInfo *Info = *I;
+
+ // If this block already needs a PHI, there is nothing to do here.
+ if (Info->DefBB == Info)
+ continue;
+
+ // Default to use the same def as the immediate dominator.
+ BBInfo *NewDefBB = Info->IDom->DefBB;
+ for (unsigned p = 0; p != Info->NumPreds; ++p) {
+ if (IsDefInDomFrontier(Info->Preds[p], Info->IDom)) {
+ // Need a PHI here.
+ NewDefBB = Info;
+ break;
+ }
+ }
+
+ // Check if anything changed.
+ if (NewDefBB != Info->DefBB) {
+ Info->DefBB = NewDefBB;
+ Changed = true;
+ }
+ }
+ } while (Changed);
+ }
+
+ /// FindAvailableVal - If this block requires a PHI, first check if an
+ /// existing PHI matches the PHI placement and reaching definitions computed
+ /// earlier, and if not, create a new PHI. Visit all the block's
+ /// predecessors to calculate the available value for each one and fill in
+ /// the incoming values for a new PHI.
+ void FindAvailableVals(BlockListTy *BlockList) {
+ // Go through the worklist in forward order (i.e., backward through the CFG)
+ // and check if existing PHIs can be used. If not, create empty PHIs where
+ // they are needed.
+ for (typename BlockListTy::iterator I = BlockList->begin(),
+ E = BlockList->end(); I != E; ++I) {
+ BBInfo *Info = *I;
+ // Check if there needs to be a PHI in BB.
+ if (Info->DefBB != Info)
+ continue;
+
+ // Look for an existing PHI.
+ FindExistingPHI(Info->BB, BlockList);
+ if (Info->AvailableVal)
+ continue;
+
+ ValT PHI = Traits::CreateEmptyPHI(Info->BB, Info->NumPreds, Updater);
+ Info->AvailableVal = PHI;
+ (*AvailableVals)[Info->BB] = PHI;
+ }
+
+ // Now go back through the worklist in reverse order to fill in the
+ // arguments for any new PHIs added in the forward traversal.
+ for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
+ E = BlockList->rend(); I != E; ++I) {
+ BBInfo *Info = *I;
+
+ if (Info->DefBB != Info) {
+ // Record the available value to speed up subsequent uses of this
+ // SSAUpdater for the same value.
+ (*AvailableVals)[Info->BB] = Info->DefBB->AvailableVal;
+ continue;
+ }
+
+ // Check if this block contains a newly added PHI.
+ PhiT *PHI = Traits::ValueIsNewPHI(Info->AvailableVal, Updater);
+ if (!PHI)
+ continue;
+
+ // Iterate through the block's predecessors.
+ for (unsigned p = 0; p != Info->NumPreds; ++p) {
+ BBInfo *PredInfo = Info->Preds[p];
+ BlkT *Pred = PredInfo->BB;
+ // Skip to the nearest preceding definition.
+ if (PredInfo->DefBB != PredInfo)
+ PredInfo = PredInfo->DefBB;
+ Traits::AddPHIOperand(PHI, PredInfo->AvailableVal, Pred);
+ }
+
+ LLVM_DEBUG(dbgs() << " Inserted PHI: " << *PHI << "\n");
+
+ // If the client wants to know about all new instructions, tell it.
+ if (InsertedPHIs) InsertedPHIs->push_back(PHI);
+ }
+ }
+
+ /// FindExistingPHI - Look through the PHI nodes in a block to see if any of
+ /// them match what is needed.
+ void FindExistingPHI(BlkT *BB, BlockListTy *BlockList) {
+ for (auto &SomePHI : BB->phis()) {
+ if (CheckIfPHIMatches(&SomePHI)) {
+ RecordMatchingPHIs(BlockList);
+ break;
+ }
+ // Match failed: clear all the PHITag values.
+ for (typename BlockListTy::iterator I = BlockList->begin(),
+ E = BlockList->end(); I != E; ++I)
+ (*I)->PHITag = nullptr;
+ }
+ }
+
+ /// CheckIfPHIMatches - Check if a PHI node matches the placement and values
+ /// in the BBMap.
+ bool CheckIfPHIMatches(PhiT *PHI) {
+ SmallVector<PhiT *, 20> WorkList;
+ WorkList.push_back(PHI);
+
+ // Mark that the block containing this PHI has been visited.
+ BBMap[PHI->getParent()]->PHITag = PHI;
+
+ while (!WorkList.empty()) {
+ PHI = WorkList.pop_back_val();
+
+ // Iterate through the PHI's incoming values.
+ for (typename Traits::PHI_iterator I = Traits::PHI_begin(PHI),
+ E = Traits::PHI_end(PHI); I != E; ++I) {
+ ValT IncomingVal = I.getIncomingValue();
+ BBInfo *PredInfo = BBMap[I.getIncomingBlock()];
+ // Skip to the nearest preceding definition.
+ if (PredInfo->DefBB != PredInfo)
+ PredInfo = PredInfo->DefBB;
+
+ // Check if it matches the expected value.
+ if (PredInfo->AvailableVal) {
+ if (IncomingVal == PredInfo->AvailableVal)
+ continue;
+ return false;
+ }
+
+ // Check if the value is a PHI in the correct block.
+ PhiT *IncomingPHIVal = Traits::ValueIsPHI(IncomingVal, Updater);
+ if (!IncomingPHIVal || IncomingPHIVal->getParent() != PredInfo->BB)
+ return false;
+
+ // If this block has already been visited, check if this PHI matches.
+ if (PredInfo->PHITag) {
+ if (IncomingPHIVal == PredInfo->PHITag)
+ continue;
+ return false;
+ }
+ PredInfo->PHITag = IncomingPHIVal;
+
+ WorkList.push_back(IncomingPHIVal);
+ }
+ }
+ return true;
+ }
+
+ /// RecordMatchingPHIs - For each PHI node that matches, record it in both
+ /// the BBMap and the AvailableVals mapping.
+ void RecordMatchingPHIs(BlockListTy *BlockList) {
+ for (typename BlockListTy::iterator I = BlockList->begin(),
+ E = BlockList->end(); I != E; ++I)
+ if (PhiT *PHI = (*I)->PHITag) {
+ BlkT *BB = PHI->getParent();
+ ValT PHIVal = Traits::GetPHIValue(PHI);
+ (*AvailableVals)[BB] = PHIVal;
+ BBMap[BB]->AvailableVal = PHIVal;
+ }
+ }
+};
+
+} // end namespace llvm
+
+#undef DEBUG_TYPE // "ssaupdater"
+
+#endif // LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/SanitizerStats.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/SanitizerStats.h
new file mode 100644
index 0000000000..5036a515de
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/SanitizerStats.h
@@ -0,0 +1,66 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- SanitizerStats.h - Sanitizer statistics gathering -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Declares functions and data structures for sanitizer statistics gathering.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SANITIZERSTATS_H
+#define LLVM_TRANSFORMS_UTILS_SANITIZERSTATS_H
+
+#include "llvm/IR/IRBuilder.h"
+
+namespace llvm {
+
+// Number of bits in data that are used for the sanitizer kind. Needs to match
+// __sanitizer::kKindBits in compiler-rt/lib/stats/stats.h
+enum { kSanitizerStatKindBits = 3 };
+
+enum SanitizerStatKind {
+ SanStat_CFI_VCall,
+ SanStat_CFI_NVCall,
+ SanStat_CFI_DerivedCast,
+ SanStat_CFI_UnrelatedCast,
+ SanStat_CFI_ICall,
+};
+
+struct SanitizerStatReport {
+ SanitizerStatReport(Module *M);
+
+ /// Generates code into B that increments a location-specific counter tagged
+ /// with the given sanitizer kind SK.
+ void create(IRBuilder<> &B, SanitizerStatKind SK);
+
+ /// Finalize module stats array and add global constructor to register it.
+ void finish();
+
+private:
+ Module *M;
+ GlobalVariable *ModuleStatsGV;
+ ArrayType *StatTy;
+ StructType *EmptyModuleStatsTy;
+
+ std::vector<Constant *> Inits;
+ ArrayType *makeModuleStatsArrayTy();
+ StructType *makeModuleStatsTy();
+};
+
+}
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h
new file mode 100644
index 0000000000..02e8cf94ba
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h
@@ -0,0 +1,522 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===---- llvm/Analysis/ScalarEvolutionExpander.h - SCEV Exprs --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the classes used to generate code from scalar expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_SCALAREVOLUTIONEXPANDER_H
+#define LLVM_ANALYSIS_SCALAREVOLUTIONEXPANDER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/ScalarEvolutionNormalization.h"
+#include "llvm/Analysis/TargetFolder.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/CommandLine.h"
+
+namespace llvm {
+extern cl::opt<unsigned> SCEVCheapExpansionBudget;
+
+/// Return true if the given expression is safe to expand in the sense that
+/// all materialized values are safe to speculate anywhere their operands are
+/// defined.
+bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE);
+
+/// Return true if the given expression is safe to expand in the sense that
+/// all materialized values are defined and safe to speculate at the specified
+/// location and their operands are defined at this location.
+bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint,
+ ScalarEvolution &SE);
+
+/// struct for holding enough information to help calculate the cost of the
+/// given SCEV when expanded into IR.
+struct SCEVOperand {
+ explicit SCEVOperand(unsigned Opc, int Idx, const SCEV *S) :
+ ParentOpcode(Opc), OperandIdx(Idx), S(S) { }
+ /// LLVM instruction opcode that uses the operand.
+ unsigned ParentOpcode;
+ /// The use index of an expanded instruction.
+ int OperandIdx;
+ /// The SCEV operand to be costed.
+ const SCEV* S;
+};
+
+/// This class uses information about analyze scalars to rewrite expressions
+/// in canonical form.
+///
+/// Clients should create an instance of this class when rewriting is needed,
+/// and destroy it when finished to allow the release of the associated
+/// memory.
+class SCEVExpander : public SCEVVisitor<SCEVExpander, Value *> {
+ ScalarEvolution &SE;
+ const DataLayout &DL;
+
+ // New instructions receive a name to identify them with the current pass.
+ const char *IVName;
+
+ /// Indicates whether LCSSA phis should be created for inserted values.
+ bool PreserveLCSSA;
+
+ // InsertedExpressions caches Values for reuse, so must track RAUW.
+ DenseMap<std::pair<const SCEV *, Instruction *>, TrackingVH<Value>>
+ InsertedExpressions;
+
+ // InsertedValues only flags inserted instructions so needs no RAUW.
+ DenseSet<AssertingVH<Value>> InsertedValues;
+ DenseSet<AssertingVH<Value>> InsertedPostIncValues;
+
+ /// Keep track of the existing IR values re-used during expansion.
+ /// FIXME: Ideally re-used instructions would not be added to
+ /// InsertedValues/InsertedPostIncValues.
+ SmallPtrSet<Value *, 16> ReusedValues;
+
+ /// A memoization of the "relevant" loop for a given SCEV.
+ DenseMap<const SCEV *, const Loop *> RelevantLoops;
+
+ /// Addrecs referring to any of the given loops are expanded in post-inc
+ /// mode. For example, expanding {1,+,1}<L> in post-inc mode returns the add
+ /// instruction that adds one to the phi for {0,+,1}<L>, as opposed to a new
+ /// phi starting at 1. This is only supported in non-canonical mode.
+ PostIncLoopSet PostIncLoops;
+
+ /// When this is non-null, addrecs expanded in the loop it indicates should
+ /// be inserted with increments at IVIncInsertPos.
+ const Loop *IVIncInsertLoop;
+
+ /// When expanding addrecs in the IVIncInsertLoop loop, insert the IV
+ /// increment at this position.
+ Instruction *IVIncInsertPos;
+
+ /// Phis that complete an IV chain. Reuse
+ DenseSet<AssertingVH<PHINode>> ChainedPhis;
+
+ /// When true, SCEVExpander tries to expand expressions in "canonical" form.
+ /// When false, expressions are expanded in a more literal form.
+ ///
+ /// In "canonical" form addrecs are expanded as arithmetic based on a
+ /// canonical induction variable. Note that CanonicalMode doesn't guarantee
+ /// that all expressions are expanded in "canonical" form. For some
+ /// expressions literal mode can be preferred.
+ bool CanonicalMode;
+
+ /// When invoked from LSR, the expander is in "strength reduction" mode. The
+ /// only difference is that phi's are only reused if they are already in
+ /// "expanded" form.
+ bool LSRMode;
+
+ typedef IRBuilder<TargetFolder, IRBuilderCallbackInserter> BuilderType;
+ BuilderType Builder;
+
+ // RAII object that stores the current insertion point and restores it when
+ // the object is destroyed. This includes the debug location. Duplicated
+ // from InsertPointGuard to add SetInsertPoint() which is used to updated
+ // InsertPointGuards stack when insert points are moved during SCEV
+ // expansion.
+ class SCEVInsertPointGuard {
+ IRBuilderBase &Builder;
+ AssertingVH<BasicBlock> Block;
+ BasicBlock::iterator Point;
+ DebugLoc DbgLoc;
+ SCEVExpander *SE;
+
+ SCEVInsertPointGuard(const SCEVInsertPointGuard &) = delete;
+ SCEVInsertPointGuard &operator=(const SCEVInsertPointGuard &) = delete;
+
+ public:
+ SCEVInsertPointGuard(IRBuilderBase &B, SCEVExpander *SE)
+ : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()),
+ DbgLoc(B.getCurrentDebugLocation()), SE(SE) {
+ SE->InsertPointGuards.push_back(this);
+ }
+
+ ~SCEVInsertPointGuard() {
+ // These guards should always created/destroyed in FIFO order since they
+ // are used to guard lexically scoped blocks of code in
+ // ScalarEvolutionExpander.
+ assert(SE->InsertPointGuards.back() == this);
+ SE->InsertPointGuards.pop_back();
+ Builder.restoreIP(IRBuilderBase::InsertPoint(Block, Point));
+ Builder.SetCurrentDebugLocation(DbgLoc);
+ }
+
+ BasicBlock::iterator GetInsertPoint() const { return Point; }
+ void SetInsertPoint(BasicBlock::iterator I) { Point = I; }
+ };
+
+ /// Stack of pointers to saved insert points, used to keep insert points
+ /// consistent when instructions are moved.
+ SmallVector<SCEVInsertPointGuard *, 8> InsertPointGuards;
+
+#ifndef NDEBUG
+ const char *DebugType;
+#endif
+
+ friend struct SCEVVisitor<SCEVExpander, Value *>;
+
+public:
+ /// Construct a SCEVExpander in "canonical" mode.
+ explicit SCEVExpander(ScalarEvolution &se, const DataLayout &DL,
+ const char *name, bool PreserveLCSSA = true)
+ : SE(se), DL(DL), IVName(name), PreserveLCSSA(PreserveLCSSA),
+ IVIncInsertLoop(nullptr), IVIncInsertPos(nullptr), CanonicalMode(true),
+ LSRMode(false),
+ Builder(se.getContext(), TargetFolder(DL),
+ IRBuilderCallbackInserter(
+ [this](Instruction *I) { rememberInstruction(I); })) {
+#ifndef NDEBUG
+ DebugType = "";
+#endif
+ }
+
+ ~SCEVExpander() {
+ // Make sure the insert point guard stack is consistent.
+ assert(InsertPointGuards.empty());
+ }
+
+#ifndef NDEBUG
+ void setDebugType(const char *s) { DebugType = s; }
+#endif
+
+ /// Erase the contents of the InsertedExpressions map so that users trying
+ /// to expand the same expression into multiple BasicBlocks or different
+ /// places within the same BasicBlock can do so.
+ void clear() {
+ InsertedExpressions.clear();
+ InsertedValues.clear();
+ InsertedPostIncValues.clear();
+ ReusedValues.clear();
+ ChainedPhis.clear();
+ }
+
+ /// Return a vector containing all instructions inserted during expansion.
+ SmallVector<Instruction *, 32> getAllInsertedInstructions() const {
+ SmallVector<Instruction *, 32> Result;
+ for (auto &VH : InsertedValues) {
+ Value *V = VH;
+ if (ReusedValues.contains(V))
+ continue;
+ if (auto *Inst = dyn_cast<Instruction>(V))
+ Result.push_back(Inst);
+ }
+ for (auto &VH : InsertedPostIncValues) {
+ Value *V = VH;
+ if (ReusedValues.contains(V))
+ continue;
+ if (auto *Inst = dyn_cast<Instruction>(V))
+ Result.push_back(Inst);
+ }
+
+ return Result;
+ }
+
+ /// Return true for expressions that can't be evaluated at runtime
+ /// within given \b Budget.
+ ///
+ /// At is a parameter which specifies point in code where user is going to
+ /// expand this expression. Sometimes this knowledge can lead to
+ /// a less pessimistic cost estimation.
+ bool isHighCostExpansion(const SCEV *Expr, Loop *L, unsigned Budget,
+ const TargetTransformInfo *TTI,
+ const Instruction *At) {
+ assert(TTI && "This function requires TTI to be provided.");
+ assert(At && "This function requires At instruction to be provided.");
+ if (!TTI) // In assert-less builds, avoid crashing
+ return true; // by always claiming to be high-cost.
+ SmallVector<SCEVOperand, 8> Worklist;
+ SmallPtrSet<const SCEV *, 8> Processed;
+ int BudgetRemaining = Budget * TargetTransformInfo::TCC_Basic;
+ Worklist.emplace_back(-1, -1, Expr);
+ while (!Worklist.empty()) {
+ const SCEVOperand WorkItem = Worklist.pop_back_val();
+ if (isHighCostExpansionHelper(WorkItem, L, *At, BudgetRemaining,
+ *TTI, Processed, Worklist))
+ return true;
+ }
+ assert(BudgetRemaining >= 0 && "Should have returned from inner loop.");
+ return false;
+ }
+
+ /// Return the induction variable increment's IV operand.
+ Instruction *getIVIncOperand(Instruction *IncV, Instruction *InsertPos,
+ bool allowScale);
+
+ /// Utility for hoisting an IV increment.
+ bool hoistIVInc(Instruction *IncV, Instruction *InsertPos);
+
+ /// replace congruent phis with their most canonical representative. Return
+ /// the number of phis eliminated.
+ unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT,
+ SmallVectorImpl<WeakTrackingVH> &DeadInsts,
+ const TargetTransformInfo *TTI = nullptr);
+
+ /// Insert code to directly compute the specified SCEV expression into the
+ /// program. The code is inserted into the specified block.
+ Value *expandCodeFor(const SCEV *SH, Type *Ty, Instruction *I) {
+ return expandCodeForImpl(SH, Ty, I, true);
+ }
+
+ /// Insert code to directly compute the specified SCEV expression into the
+ /// program. The code is inserted into the SCEVExpander's current
+ /// insertion point. If a type is specified, the result will be expanded to
+ /// have that type, with a cast if necessary.
+ Value *expandCodeFor(const SCEV *SH, Type *Ty = nullptr) {
+ return expandCodeForImpl(SH, Ty, true);
+ }
+
+ /// Generates a code sequence that evaluates this predicate. The inserted
+ /// instructions will be at position \p Loc. The result will be of type i1
+ /// and will have a value of 0 when the predicate is false and 1 otherwise.
+ Value *expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc);
+
+ /// A specialized variant of expandCodeForPredicate, handling the case when
+ /// we are expanding code for a SCEVEqualPredicate.
+ Value *expandEqualPredicate(const SCEVEqualPredicate *Pred, Instruction *Loc);
+
+ /// Generates code that evaluates if the \p AR expression will overflow.
+ Value *generateOverflowCheck(const SCEVAddRecExpr *AR, Instruction *Loc,
+ bool Signed);
+
+ /// A specialized variant of expandCodeForPredicate, handling the case when
+ /// we are expanding code for a SCEVWrapPredicate.
+ Value *expandWrapPredicate(const SCEVWrapPredicate *P, Instruction *Loc);
+
+ /// A specialized variant of expandCodeForPredicate, handling the case when
+ /// we are expanding code for a SCEVUnionPredicate.
+ Value *expandUnionPredicate(const SCEVUnionPredicate *Pred, Instruction *Loc);
+
+ /// Set the current IV increment loop and position.
+ void setIVIncInsertPos(const Loop *L, Instruction *Pos) {
+ assert(!CanonicalMode &&
+ "IV increment positions are not supported in CanonicalMode");
+ IVIncInsertLoop = L;
+ IVIncInsertPos = Pos;
+ }
+
+ /// Enable post-inc expansion for addrecs referring to the given
+ /// loops. Post-inc expansion is only supported in non-canonical mode.
+ void setPostInc(const PostIncLoopSet &L) {
+ assert(!CanonicalMode &&
+ "Post-inc expansion is not supported in CanonicalMode");
+ PostIncLoops = L;
+ }
+
+ /// Disable all post-inc expansion.
+ void clearPostInc() {
+ PostIncLoops.clear();
+
+ // When we change the post-inc loop set, cached expansions may no
+ // longer be valid.
+ InsertedPostIncValues.clear();
+ }
+
+ /// Disable the behavior of expanding expressions in canonical form rather
+ /// than in a more literal form. Non-canonical mode is useful for late
+ /// optimization passes.
+ void disableCanonicalMode() { CanonicalMode = false; }
+
+ void enableLSRMode() { LSRMode = true; }
+
+ /// Set the current insertion point. This is useful if multiple calls to
+ /// expandCodeFor() are going to be made with the same insert point and the
+ /// insert point may be moved during one of the expansions (e.g. if the
+ /// insert point is not a block terminator).
+ void setInsertPoint(Instruction *IP) {
+ assert(IP);
+ Builder.SetInsertPoint(IP);
+ }
+
+ /// Clear the current insertion point. This is useful if the instruction
+ /// that had been serving as the insertion point may have been deleted.
+ void clearInsertPoint() { Builder.ClearInsertionPoint(); }
+
+ /// Set location information used by debugging information.
+ void SetCurrentDebugLocation(DebugLoc L) {
+ Builder.SetCurrentDebugLocation(std::move(L));
+ }
+
+ /// Get location information used by debugging information.
+ DebugLoc getCurrentDebugLocation() const {
+ return Builder.getCurrentDebugLocation();
+ }
+
+ /// Return true if the specified instruction was inserted by the code
+ /// rewriter. If so, the client should not modify the instruction. Note that
+ /// this also includes instructions re-used during expansion.
+ bool isInsertedInstruction(Instruction *I) const {
+ return InsertedValues.count(I) || InsertedPostIncValues.count(I);
+ }
+
+ void setChainedPhi(PHINode *PN) { ChainedPhis.insert(PN); }
+
+ /// Try to find the ValueOffsetPair for S. The function is mainly used to
+ /// check whether S can be expanded cheaply. If this returns a non-None
+ /// value, we know we can codegen the `ValueOffsetPair` into a suitable
+ /// expansion identical with S so that S can be expanded cheaply.
+ ///
+ /// L is a hint which tells in which loop to look for the suitable value.
+ /// On success return value which is equivalent to the expanded S at point
+ /// At. Return nullptr if value was not found.
+ ///
+ /// Note that this function does not perform an exhaustive search. I.e if it
+ /// didn't find any value it does not mean that there is no such value.
+ ///
+ Optional<ScalarEvolution::ValueOffsetPair>
+ getRelatedExistingExpansion(const SCEV *S, const Instruction *At, Loop *L);
+
+ /// Returns a suitable insert point after \p I, that dominates \p
+ /// MustDominate. Skips instructions inserted by the expander.
+ BasicBlock::iterator findInsertPointAfter(Instruction *I,
+ Instruction *MustDominate);
+
+private:
+ LLVMContext &getContext() const { return SE.getContext(); }
+
+ /// Insert code to directly compute the specified SCEV expression into the
+ /// program. The code is inserted into the SCEVExpander's current
+ /// insertion point. If a type is specified, the result will be expanded to
+ /// have that type, with a cast if necessary. If \p Root is true, this
+ /// indicates that \p SH is the top-level expression to expand passed from
+ /// an external client call.
+ Value *expandCodeForImpl(const SCEV *SH, Type *Ty, bool Root);
+
+ /// Insert code to directly compute the specified SCEV expression into the
+ /// program. The code is inserted into the specified block. If \p
+ /// Root is true, this indicates that \p SH is the top-level expression to
+ /// expand passed from an external client call.
+ Value *expandCodeForImpl(const SCEV *SH, Type *Ty, Instruction *I, bool Root);
+
+ /// Recursive helper function for isHighCostExpansion.
+ bool isHighCostExpansionHelper(
+ const SCEVOperand &WorkItem, Loop *L, const Instruction &At,
+ int &BudgetRemaining, const TargetTransformInfo &TTI,
+ SmallPtrSetImpl<const SCEV *> &Processed,
+ SmallVectorImpl<SCEVOperand> &Worklist);
+
+ /// Insert the specified binary operator, doing a small amount of work to
+ /// avoid inserting an obviously redundant operation, and hoisting to an
+ /// outer loop when the opportunity is there and it is safe.
+ Value *InsertBinop(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS,
+ SCEV::NoWrapFlags Flags, bool IsSafeToHoist);
+
+ /// Arrange for there to be a cast of V to Ty at IP, reusing an existing
+ /// cast if a suitable one exists, moving an existing cast if a suitable one
+ /// exists but isn't in the right place, or creating a new one.
+ Value *ReuseOrCreateCast(Value *V, Type *Ty, Instruction::CastOps Op,
+ BasicBlock::iterator IP);
+
+ /// Insert a cast of V to the specified type, which must be possible with a
+ /// noop cast, doing what we can to share the casts.
+ Value *InsertNoopCastOfTo(Value *V, Type *Ty);
+
+ /// Expand a SCEVAddExpr with a pointer type into a GEP instead of using
+ /// ptrtoint+arithmetic+inttoptr.
+ Value *expandAddToGEP(const SCEV *const *op_begin, const SCEV *const *op_end,
+ PointerType *PTy, Type *Ty, Value *V);
+ Value *expandAddToGEP(const SCEV *Op, PointerType *PTy, Type *Ty, Value *V);
+
+ /// Find a previous Value in ExprValueMap for expand.
+ ScalarEvolution::ValueOffsetPair
+ FindValueInExprValueMap(const SCEV *S, const Instruction *InsertPt);
+
+ Value *expand(const SCEV *S);
+
+ /// Determine the most "relevant" loop for the given SCEV.
+ const Loop *getRelevantLoop(const SCEV *);
+
+ Value *visitConstant(const SCEVConstant *S) { return S->getValue(); }
+
+ Value *visitPtrToIntExpr(const SCEVPtrToIntExpr *S);
+
+ Value *visitTruncateExpr(const SCEVTruncateExpr *S);
+
+ Value *visitZeroExtendExpr(const SCEVZeroExtendExpr *S);
+
+ Value *visitSignExtendExpr(const SCEVSignExtendExpr *S);
+
+ Value *visitAddExpr(const SCEVAddExpr *S);
+
+ Value *visitMulExpr(const SCEVMulExpr *S);
+
+ Value *visitUDivExpr(const SCEVUDivExpr *S);
+
+ Value *visitAddRecExpr(const SCEVAddRecExpr *S);
+
+ Value *visitSMaxExpr(const SCEVSMaxExpr *S);
+
+ Value *visitUMaxExpr(const SCEVUMaxExpr *S);
+
+ Value *visitSMinExpr(const SCEVSMinExpr *S);
+
+ Value *visitUMinExpr(const SCEVUMinExpr *S);
+
+ Value *visitUnknown(const SCEVUnknown *S) { return S->getValue(); }
+
+ void rememberInstruction(Value *I);
+
+ bool isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV, const Loop *L);
+
+ bool isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV, const Loop *L);
+
+ Value *expandAddRecExprLiterally(const SCEVAddRecExpr *);
+ PHINode *getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
+ const Loop *L, Type *ExpandTy, Type *IntTy,
+ Type *&TruncTy, bool &InvertStep);
+ Value *expandIVInc(PHINode *PN, Value *StepV, const Loop *L, Type *ExpandTy,
+ Type *IntTy, bool useSubtract);
+
+ void hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist,
+ Instruction *Pos, PHINode *LoopPhi);
+
+ void fixupInsertPoints(Instruction *I);
+
+ /// If required, create LCSSA PHIs for \p Users' operand \p OpIdx. If new
+ /// LCSSA PHIs have been created, return the LCSSA PHI available at \p User.
+ /// If no PHIs have been created, return the unchanged operand \p OpIdx.
+ Value *fixupLCSSAFormFor(Instruction *User, unsigned OpIdx);
+};
+
+/// Helper to remove instructions inserted during SCEV expansion, unless they
+/// are marked as used.
+class SCEVExpanderCleaner {
+ SCEVExpander &Expander;
+
+ DominatorTree &DT;
+
+ /// Indicates whether the result of the expansion is used. If false, the
+ /// instructions added during expansion are removed.
+ bool ResultUsed;
+
+public:
+ SCEVExpanderCleaner(SCEVExpander &Expander, DominatorTree &DT)
+ : Expander(Expander), DT(DT), ResultUsed(false) {}
+
+ ~SCEVExpanderCleaner();
+
+ /// Indicate that the result of the expansion is used.
+ void markResultUsed() { ResultUsed = true; }
+};
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/SimplifyCFGOptions.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/SimplifyCFGOptions.h
new file mode 100644
index 0000000000..37ace5188d
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/SimplifyCFGOptions.h
@@ -0,0 +1,88 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- SimplifyCFGOptions.h - Control structure for SimplifyCFG -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A set of parameters used to control the transforms in the SimplifyCFG pass.
+// Options may change depending on the position in the optimization pipeline.
+// For example, canonical form that includes switches and branches may later be
+// replaced by lookup tables and selects.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SIMPLIFYCFGOPTIONS_H
+#define LLVM_TRANSFORMS_UTILS_SIMPLIFYCFGOPTIONS_H
+
+namespace llvm {
+
+class AssumptionCache;
+
+struct SimplifyCFGOptions {
+ int BonusInstThreshold = 1;
+ bool ForwardSwitchCondToPhi = false;
+ bool ConvertSwitchToLookupTable = false;
+ bool NeedCanonicalLoop = true;
+ bool HoistCommonInsts = false;
+ bool SinkCommonInsts = false;
+ bool SimplifyCondBranch = true;
+ bool FoldTwoEntryPHINode = true;
+
+ AssumptionCache *AC = nullptr;
+
+ // Support 'builder' pattern to set members by name at construction time.
+ SimplifyCFGOptions &bonusInstThreshold(int I) {
+ BonusInstThreshold = I;
+ return *this;
+ }
+ SimplifyCFGOptions &forwardSwitchCondToPhi(bool B) {
+ ForwardSwitchCondToPhi = B;
+ return *this;
+ }
+ SimplifyCFGOptions &convertSwitchToLookupTable(bool B) {
+ ConvertSwitchToLookupTable = B;
+ return *this;
+ }
+ SimplifyCFGOptions &needCanonicalLoops(bool B) {
+ NeedCanonicalLoop = B;
+ return *this;
+ }
+ SimplifyCFGOptions &hoistCommonInsts(bool B) {
+ HoistCommonInsts = B;
+ return *this;
+ }
+ SimplifyCFGOptions &sinkCommonInsts(bool B) {
+ SinkCommonInsts = B;
+ return *this;
+ }
+ SimplifyCFGOptions &setAssumptionCache(AssumptionCache *Cache) {
+ AC = Cache;
+ return *this;
+ }
+ SimplifyCFGOptions &setSimplifyCondBranch(bool B) {
+ SimplifyCondBranch = B;
+ return *this;
+ }
+
+ SimplifyCFGOptions &setFoldTwoEntryPHINode(bool B) {
+ FoldTwoEntryPHINode = B;
+ return *this;
+ }
+};
+
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_SIMPLIFYCFGOPTIONS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/SimplifyIndVar.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/SimplifyIndVar.h
new file mode 100644
index 0000000000..375bf2963a
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/SimplifyIndVar.h
@@ -0,0 +1,96 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- llvm/Transforms/Utils/SimplifyIndVar.h - Indvar Utils ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines in interface for induction variable simplification. It does
+// not define any actual pass or policy, but provides a single function to
+// simplify a loop's induction variables based on ScalarEvolution.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SIMPLIFYINDVAR_H
+#define LLVM_TRANSFORMS_UTILS_SIMPLIFYINDVAR_H
+
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/ValueHandle.h"
+
+namespace llvm {
+
+class CastInst;
+class DominatorTree;
+class Loop;
+class LoopInfo;
+class PHINode;
+class ScalarEvolution;
+class SCEVExpander;
+class TargetTransformInfo;
+
+/// Interface for visiting interesting IV users that are recognized but not
+/// simplified by this utility.
+class IVVisitor {
+protected:
+ const DominatorTree *DT = nullptr;
+
+ virtual void anchor();
+
+public:
+ IVVisitor() = default;
+ virtual ~IVVisitor() = default;
+
+ const DominatorTree *getDomTree() const { return DT; }
+ virtual void visitCast(CastInst *Cast) = 0;
+};
+
+/// simplifyUsersOfIV - Simplify instructions that use this induction variable
+/// by using ScalarEvolution to analyze the IV's recurrence.
+bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, DominatorTree *DT,
+ LoopInfo *LI, const TargetTransformInfo *TTI,
+ SmallVectorImpl<WeakTrackingVH> &Dead,
+ SCEVExpander &Rewriter, IVVisitor *V = nullptr);
+
+/// SimplifyLoopIVs - Simplify users of induction variables within this
+/// loop. This does not actually change or add IVs.
+bool simplifyLoopIVs(Loop *L, ScalarEvolution *SE, DominatorTree *DT,
+ LoopInfo *LI, const TargetTransformInfo *TTI,
+ SmallVectorImpl<WeakTrackingVH> &Dead);
+
+/// Collect information about induction variables that are used by sign/zero
+/// extend operations. This information is recorded by CollectExtend and provides
+/// the input to WidenIV.
+struct WideIVInfo {
+ PHINode *NarrowIV = nullptr;
+
+ // Widest integer type created [sz]ext
+ Type *WidestNativeType = nullptr;
+
+ // Was a sext user seen before a zext?
+ bool IsSigned = false;
+};
+
+/// Widen Induction Variables - Extend the width of an IV to cover its
+/// widest uses.
+PHINode *createWideIV(const WideIVInfo &WI,
+ LoopInfo *LI, ScalarEvolution *SE, SCEVExpander &Rewriter,
+ DominatorTree *DT, SmallVectorImpl<WeakTrackingVH> &DeadInsts,
+ unsigned &NumElimExt, unsigned &NumWidened,
+ bool HasGuards, bool UsePostIncrementRanges);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_SIMPLIFYINDVAR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/SimplifyLibCalls.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/SimplifyLibCalls.h
new file mode 100644
index 0000000000..29e2eb9b3d
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/SimplifyLibCalls.h
@@ -0,0 +1,257 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- SimplifyLibCalls.h - Library call simplifier -------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes an interface to build some C language libcalls for
+// optimization passes that need to call the various functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SIMPLIFYLIBCALLS_H
+#define LLVM_TRANSFORMS_UTILS_SIMPLIFYLIBCALLS_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+
+namespace llvm {
+class StringRef;
+class Value;
+class CallInst;
+class DataLayout;
+class Instruction;
+class IRBuilderBase;
+class Function;
+class OptimizationRemarkEmitter;
+class BlockFrequencyInfo;
+class ProfileSummaryInfo;
+
+/// This class implements simplifications for calls to fortified library
+/// functions (__st*cpy_chk, __memcpy_chk, __memmove_chk, __memset_chk), to,
+/// when possible, replace them with their non-checking counterparts.
+/// Other optimizations can also be done, but it's possible to disable them and
+/// only simplify needless use of the checking versions (when the object size
+/// is unknown) by passing true for OnlyLowerUnknownSize.
+class FortifiedLibCallSimplifier {
+private:
+ const TargetLibraryInfo *TLI;
+ bool OnlyLowerUnknownSize;
+
+public:
+ FortifiedLibCallSimplifier(const TargetLibraryInfo *TLI,
+ bool OnlyLowerUnknownSize = false);
+
+ /// Take the given call instruction and return a more
+ /// optimal value to replace the instruction with or 0 if a more
+ /// optimal form can't be found.
+ /// The call must not be an indirect call.
+ Value *optimizeCall(CallInst *CI, IRBuilderBase &B);
+
+private:
+ Value *optimizeMemCpyChk(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeMemMoveChk(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeMemSetChk(CallInst *CI, IRBuilderBase &B);
+
+ /// Str/Stp cpy are similar enough to be handled in the same functions.
+ Value *optimizeStrpCpyChk(CallInst *CI, IRBuilderBase &B, LibFunc Func);
+ Value *optimizeStrpNCpyChk(CallInst *CI, IRBuilderBase &B, LibFunc Func);
+ Value *optimizeStrLenChk(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeMemPCpyChk(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeMemCCpyChk(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeSNPrintfChk(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeSPrintfChk(CallInst *CI,IRBuilderBase &B);
+ Value *optimizeStrCatChk(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeStrLCat(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeStrNCatChk(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeStrLCpyChk(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeVSNPrintfChk(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeVSPrintfChk(CallInst *CI, IRBuilderBase &B);
+
+ /// Checks whether the call \p CI to a fortified libcall is foldable
+ /// to the non-fortified version.
+ ///
+ /// \param CI the call to the fortified libcall.
+ ///
+ /// \param ObjSizeOp the index of the object size parameter of this chk
+ /// function. Not optional since this is mandatory.
+ ///
+ /// \param SizeOp optionally set to the parameter index of an explicit buffer
+ /// size argument. For instance, set to '2' for __strncpy_chk.
+ ///
+ /// \param StrOp optionally set to the parameter index of the source string
+ /// parameter to strcpy-like functions, where only the strlen of the source
+ /// will be writtin into the destination.
+ ///
+ /// \param FlagsOp optionally set to the parameter index of a 'flags'
+ /// parameter. These are used by an implementation to opt-into stricter
+ /// checking.
+ bool isFortifiedCallFoldable(CallInst *CI, unsigned ObjSizeOp,
+ Optional<unsigned> SizeOp = None,
+ Optional<unsigned> StrOp = None,
+ Optional<unsigned> FlagsOp = None);
+};
+
+/// LibCallSimplifier - This class implements a collection of optimizations
+/// that replace well formed calls to library functions with a more optimal
+/// form. For example, replacing 'printf("Hello!")' with 'puts("Hello!")'.
+class LibCallSimplifier {
+private:
+ FortifiedLibCallSimplifier FortifiedSimplifier;
+ const DataLayout &DL;
+ const TargetLibraryInfo *TLI;
+ OptimizationRemarkEmitter &ORE;
+ BlockFrequencyInfo *BFI;
+ ProfileSummaryInfo *PSI;
+ bool UnsafeFPShrink;
+ function_ref<void(Instruction *, Value *)> Replacer;
+ function_ref<void(Instruction *)> Eraser;
+
+ /// Internal wrapper for RAUW that is the default implementation.
+ ///
+ /// Other users may provide an alternate function with this signature instead
+ /// of this one.
+ static void replaceAllUsesWithDefault(Instruction *I, Value *With) {
+ I->replaceAllUsesWith(With);
+ }
+
+ /// Internal wrapper for eraseFromParent that is the default implementation.
+ static void eraseFromParentDefault(Instruction *I) { I->eraseFromParent(); }
+
+ /// Replace an instruction's uses with a value using our replacer.
+ void replaceAllUsesWith(Instruction *I, Value *With);
+
+ /// Erase an instruction from its parent with our eraser.
+ void eraseFromParent(Instruction *I);
+
+ /// Replace an instruction with a value and erase it from its parent.
+ void substituteInParent(Instruction *I, Value *With) {
+ replaceAllUsesWith(I, With);
+ eraseFromParent(I);
+ }
+
+ Value *foldMallocMemset(CallInst *Memset, IRBuilderBase &B);
+
+public:
+ LibCallSimplifier(
+ const DataLayout &DL, const TargetLibraryInfo *TLI,
+ OptimizationRemarkEmitter &ORE,
+ BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
+ function_ref<void(Instruction *, Value *)> Replacer =
+ &replaceAllUsesWithDefault,
+ function_ref<void(Instruction *)> Eraser = &eraseFromParentDefault);
+
+ /// optimizeCall - Take the given call instruction and return a more
+ /// optimal value to replace the instruction with or 0 if a more
+ /// optimal form can't be found. Note that the returned value may
+ /// be equal to the instruction being optimized. In this case all
+ /// other instructions that use the given instruction were modified
+ /// and the given instruction is dead.
+ /// The call must not be an indirect call.
+ Value *optimizeCall(CallInst *CI, IRBuilderBase &B);
+
+private:
+ // String and Memory Library Call Optimizations
+ Value *optimizeStrCat(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeStrNCat(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeStrChr(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeStrRChr(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeStrCmp(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeStrNCmp(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeStrNDup(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeStrCpy(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeStpCpy(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeStrNCpy(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeStrLen(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeStrPBrk(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeStrTo(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeStrSpn(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeStrCSpn(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeStrStr(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeMemChr(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeMemRChr(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeMemCmp(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeBCmp(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeMemCmpBCmpCommon(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeMemCCpy(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeMemPCpy(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeMemCpy(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeMemMove(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeMemSet(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeRealloc(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeWcslen(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeBCopy(CallInst *CI, IRBuilderBase &B);
+ // Wrapper for all String/Memory Library Call Optimizations
+ Value *optimizeStringMemoryLibCall(CallInst *CI, IRBuilderBase &B);
+
+ // Math Library Optimizations
+ Value *optimizeCAbs(CallInst *CI, IRBuilderBase &B);
+ Value *optimizePow(CallInst *CI, IRBuilderBase &B);
+ Value *replacePowWithExp(CallInst *Pow, IRBuilderBase &B);
+ Value *replacePowWithSqrt(CallInst *Pow, IRBuilderBase &B);
+ Value *optimizeExp2(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeFMinFMax(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeLog(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeSqrt(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeSinCosPi(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeTan(CallInst *CI, IRBuilderBase &B);
+ // Wrapper for all floating point library call optimizations
+ Value *optimizeFloatingPointLibCall(CallInst *CI, LibFunc Func,
+ IRBuilderBase &B);
+
+ // Integer Library Call Optimizations
+ Value *optimizeFFS(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeFls(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeAbs(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeIsDigit(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeIsAscii(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeToAscii(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeAtoi(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeStrtol(CallInst *CI, IRBuilderBase &B);
+
+ // Formatting and IO Library Call Optimizations
+ Value *optimizeErrorReporting(CallInst *CI, IRBuilderBase &B,
+ int StreamArg = -1);
+ Value *optimizePrintF(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeSPrintF(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeSnPrintF(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeFPrintF(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeFWrite(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeFPuts(CallInst *CI, IRBuilderBase &B);
+ Value *optimizePuts(CallInst *CI, IRBuilderBase &B);
+
+ // Helper methods
+ Value *emitStrLenMemCpy(Value *Src, Value *Dst, uint64_t Len,
+ IRBuilderBase &B);
+ void classifyArgUse(Value *Val, Function *F, bool IsFloat,
+ SmallVectorImpl<CallInst *> &SinCalls,
+ SmallVectorImpl<CallInst *> &CosCalls,
+ SmallVectorImpl<CallInst *> &SinCosCalls);
+ Value *optimizePrintFString(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeSPrintFString(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeSnPrintFString(CallInst *CI, IRBuilderBase &B);
+ Value *optimizeFPrintFString(CallInst *CI, IRBuilderBase &B);
+
+ /// hasFloatVersion - Checks if there is a float version of the specified
+ /// function by checking for an existing function with name FuncName + f
+ bool hasFloatVersion(StringRef FuncName);
+
+ /// Shared code to optimize strlen+wcslen.
+ Value *optimizeStringLength(CallInst *CI, IRBuilderBase &B, unsigned CharSize);
+};
+} // End llvm namespace
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/SizeOpts.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/SizeOpts.h
new file mode 100644
index 0000000000..7c49b1755a
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/SizeOpts.h
@@ -0,0 +1,118 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/Transforms/Utils/SizeOpts.h - size optimization -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains some shared code size optimization related code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SIZEOPTS_H
+#define LLVM_TRANSFORMS_UTILS_SIZEOPTS_H
+
+#include "llvm/Analysis/BlockFrequencyInfo.h"
+#include "llvm/Analysis/ProfileSummaryInfo.h"
+#include "llvm/Support/CommandLine.h"
+
+extern llvm::cl::opt<bool> EnablePGSO;
+extern llvm::cl::opt<bool> PGSOLargeWorkingSetSizeOnly;
+extern llvm::cl::opt<bool> PGSOColdCodeOnly;
+extern llvm::cl::opt<bool> PGSOColdCodeOnlyForInstrPGO;
+extern llvm::cl::opt<bool> PGSOColdCodeOnlyForSamplePGO;
+extern llvm::cl::opt<bool> PGSOColdCodeOnlyForPartialSamplePGO;
+extern llvm::cl::opt<bool> ForcePGSO;
+extern llvm::cl::opt<int> PgsoCutoffInstrProf;
+extern llvm::cl::opt<int> PgsoCutoffSampleProf;
+
+namespace llvm {
+
+class BasicBlock;
+class BlockFrequencyInfo;
+class Function;
+
+enum class PGSOQueryType {
+ IRPass, // A query call from an IR-level transform pass.
+ Test, // A query call from a unit test.
+ Other, // Others.
+};
+
+static inline bool isPGSOColdCodeOnly(ProfileSummaryInfo *PSI) {
+ return PGSOColdCodeOnly ||
+ (PSI->hasInstrumentationProfile() && PGSOColdCodeOnlyForInstrPGO) ||
+ (PSI->hasSampleProfile() &&
+ ((!PSI->hasPartialSampleProfile() && PGSOColdCodeOnlyForSamplePGO) ||
+ (PSI->hasPartialSampleProfile() &&
+ PGSOColdCodeOnlyForPartialSamplePGO))) ||
+ (PGSOLargeWorkingSetSizeOnly && !PSI->hasLargeWorkingSetSize());
+}
+
+template<typename AdapterT, typename FuncT, typename BFIT>
+bool shouldFuncOptimizeForSizeImpl(const FuncT *F, ProfileSummaryInfo *PSI,
+ BFIT *BFI, PGSOQueryType QueryType) {
+ assert(F);
+ if (!PSI || !BFI || !PSI->hasProfileSummary())
+ return false;
+ if (ForcePGSO)
+ return true;
+ if (!EnablePGSO)
+ return false;
+ if (isPGSOColdCodeOnly(PSI))
+ return AdapterT::isFunctionColdInCallGraph(F, PSI, *BFI);
+ if (PSI->hasSampleProfile())
+ // The "isCold" check seems to work better for Sample PGO as it could have
+ // many profile-unannotated functions.
+ return AdapterT::isFunctionColdInCallGraphNthPercentile(
+ PgsoCutoffSampleProf, F, PSI, *BFI);
+ return !AdapterT::isFunctionHotInCallGraphNthPercentile(PgsoCutoffInstrProf,
+ F, PSI, *BFI);
+}
+
+template<typename AdapterT, typename BlockTOrBlockFreq, typename BFIT>
+bool shouldOptimizeForSizeImpl(BlockTOrBlockFreq BBOrBlockFreq, ProfileSummaryInfo *PSI,
+ BFIT *BFI, PGSOQueryType QueryType) {
+ if (!PSI || !BFI || !PSI->hasProfileSummary())
+ return false;
+ if (ForcePGSO)
+ return true;
+ if (!EnablePGSO)
+ return false;
+ if (isPGSOColdCodeOnly(PSI))
+ return AdapterT::isColdBlock(BBOrBlockFreq, PSI, BFI);
+ if (PSI->hasSampleProfile())
+ // The "isCold" check seems to work better for Sample PGO as it could have
+ // many profile-unannotated functions.
+ return AdapterT::isColdBlockNthPercentile(PgsoCutoffSampleProf,
+ BBOrBlockFreq, PSI, BFI);
+ return !AdapterT::isHotBlockNthPercentile(PgsoCutoffInstrProf, BBOrBlockFreq,
+ PSI, BFI);
+}
+
+/// Returns true if function \p F is suggested to be size-optimized based on the
+/// profile.
+bool shouldOptimizeForSize(const Function *F, ProfileSummaryInfo *PSI,
+ BlockFrequencyInfo *BFI,
+ PGSOQueryType QueryType = PGSOQueryType::Other);
+
+/// Returns true if basic block \p BB is suggested to be size-optimized based on
+/// the profile.
+bool shouldOptimizeForSize(const BasicBlock *BB, ProfileSummaryInfo *PSI,
+ BlockFrequencyInfo *BFI,
+ PGSOQueryType QueryType = PGSOQueryType::Other);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_SIZEOPTS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/SplitModule.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/SplitModule.h
new file mode 100644
index 0000000000..7169e126a6
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/SplitModule.h
@@ -0,0 +1,53 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- SplitModule.h - Split a module into partitions -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the function llvm::SplitModule, which splits a module
+// into multiple linkable partitions. It can be used to implement parallel code
+// generation for link-time optimization.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SPLITMODULE_H
+#define LLVM_TRANSFORMS_UTILS_SPLITMODULE_H
+
+#include "llvm/ADT/STLExtras.h"
+#include <memory>
+
+namespace llvm {
+
+class Module;
+
+/// Splits the module M into N linkable partitions. The function ModuleCallback
+/// is called N times passing each individual partition as the MPart argument.
+///
+/// FIXME: This function does not deal with the somewhat subtle symbol
+/// visibility issues around module splitting, including (but not limited to):
+///
+/// - Internal symbols should not collide with symbols defined outside the
+/// module.
+/// - Internal symbols defined in module-level inline asm should be visible to
+/// each partition.
+void SplitModule(
+ std::unique_ptr<Module> M, unsigned N,
+ function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback,
+ bool PreserveLocals = false);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_SPLITMODULE_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/StripGCRelocates.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/StripGCRelocates.h
new file mode 100644
index 0000000000..ab438af771
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/StripGCRelocates.h
@@ -0,0 +1,36 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- StripGCRelocates.h - -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_STRIPGCRELOCATES_H
+#define LLVM_TRANSFORMS_UTILS_STRIPGCRELOCATES_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Function;
+
+class StripGCRelocates : public PassInfoMixin<StripGCRelocates> {
+public:
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_STRIPGCRELOCATES_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/StripNonLineTableDebugInfo.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/StripNonLineTableDebugInfo.h
new file mode 100644
index 0000000000..bd010e3803
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/StripNonLineTableDebugInfo.h
@@ -0,0 +1,37 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- StripNonLineTableDebugInfo.h - -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_STRIPNONLINETABLEDEBUGINFO_H
+#define LLVM_TRANSFORMS_UTILS_STRIPNONLINETABLEDEBUGINFO_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class Module;
+
+class StripNonLineTableDebugInfoPass
+ : public PassInfoMixin<StripNonLineTableDebugInfoPass> {
+public:
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_STRIPNONLINETABLEDEBUGINFO_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/SymbolRewriter.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/SymbolRewriter.h
new file mode 100644
index 0000000000..f76640f3d6
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/SymbolRewriter.h
@@ -0,0 +1,152 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- SymbolRewriter.h - Symbol Rewriting Pass -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the prototypes and definitions related to the Symbol
+// Rewriter pass.
+//
+// The Symbol Rewriter pass takes a set of rewrite descriptors which define
+// transformations for symbol names. These can be either single name to name
+// trnsformation or more broad regular expression based transformations.
+//
+// All the functions are re-written at the IR level. The Symbol Rewriter itself
+// is exposed as a module level pass. All symbols at the module level are
+// iterated. For any matching symbol, the requested transformation is applied,
+// updating references to it as well (a la RAUW). The resulting binary will
+// only contain the rewritten symbols.
+//
+// By performing this operation in the compiler, we are able to catch symbols
+// that would otherwise not be possible to catch (e.g. inlined symbols).
+//
+// This makes it possible to cleanly transform symbols without resorting to
+// overly-complex macro tricks and the pre-processor. An example of where this
+// is useful is the sanitizers where we would like to intercept a well-defined
+// set of functions across the module.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SYMBOLREWRITER_H
+#define LLVM_TRANSFORMS_UTILS_SYMBOLREWRITER_H
+
+#include "llvm/IR/PassManager.h"
+#include <list>
+#include <memory>
+#include <string>
+
+namespace llvm {
+
+class MemoryBuffer;
+class Module;
+class ModulePass;
+
+namespace yaml {
+
+class KeyValueNode;
+class MappingNode;
+class ScalarNode;
+class Stream;
+
+} // end namespace yaml
+
+namespace SymbolRewriter {
+
+/// The basic entity representing a rewrite operation. It serves as the base
+/// class for any rewrite descriptor. It has a certain set of specializations
+/// which describe a particular rewrite.
+///
+/// The RewriteMapParser can be used to parse a mapping file that provides the
+/// mapping for rewriting the symbols. The descriptors individually describe
+/// whether to rewrite a function, global variable, or global alias. Each of
+/// these can be selected either by explicitly providing a name for the ones to
+/// be rewritten or providing a (posix compatible) regular expression that will
+/// select the symbols to rewrite. This descriptor list is passed to the
+/// SymbolRewriter pass.
+class RewriteDescriptor {
+public:
+ enum class Type {
+ Invalid, /// invalid
+ Function, /// function - descriptor rewrites a function
+ GlobalVariable, /// global variable - descriptor rewrites a global variable
+ NamedAlias, /// named alias - descriptor rewrites a global alias
+ };
+
+ RewriteDescriptor(const RewriteDescriptor &) = delete;
+ RewriteDescriptor &operator=(const RewriteDescriptor &) = delete;
+ virtual ~RewriteDescriptor() = default;
+
+ Type getType() const { return Kind; }
+
+ virtual bool performOnModule(Module &M) = 0;
+
+protected:
+ explicit RewriteDescriptor(Type T) : Kind(T) {}
+
+private:
+ const Type Kind;
+};
+
+using RewriteDescriptorList = std::list<std::unique_ptr<RewriteDescriptor>>;
+
+class RewriteMapParser {
+public:
+ bool parse(const std::string &MapFile, RewriteDescriptorList *Descriptors);
+
+private:
+ bool parse(std::unique_ptr<MemoryBuffer> &MapFile, RewriteDescriptorList *DL);
+ bool parseEntry(yaml::Stream &Stream, yaml::KeyValueNode &Entry,
+ RewriteDescriptorList *DL);
+ bool parseRewriteFunctionDescriptor(yaml::Stream &Stream,
+ yaml::ScalarNode *Key,
+ yaml::MappingNode *Value,
+ RewriteDescriptorList *DL);
+ bool parseRewriteGlobalVariableDescriptor(yaml::Stream &Stream,
+ yaml::ScalarNode *Key,
+ yaml::MappingNode *Value,
+ RewriteDescriptorList *DL);
+ bool parseRewriteGlobalAliasDescriptor(yaml::Stream &YS, yaml::ScalarNode *K,
+ yaml::MappingNode *V,
+ RewriteDescriptorList *DL);
+};
+
+} // end namespace SymbolRewriter
+
+ModulePass *createRewriteSymbolsPass();
+ModulePass *createRewriteSymbolsPass(SymbolRewriter::RewriteDescriptorList &);
+
+class RewriteSymbolPass : public PassInfoMixin<RewriteSymbolPass> {
+public:
+ RewriteSymbolPass() { loadAndParseMapFiles(); }
+
+ RewriteSymbolPass(SymbolRewriter::RewriteDescriptorList &DL) {
+ Descriptors.splice(Descriptors.begin(), DL);
+ }
+
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+
+ // Glue for old PM
+ bool runImpl(Module &M);
+
+private:
+ void loadAndParseMapFiles();
+
+ SymbolRewriter::RewriteDescriptorList Descriptors;
+};
+
+} // end namespace llvm
+
+#endif //LLVM_TRANSFORMS_UTILS_SYMBOLREWRITER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
new file mode 100644
index 0000000000..dcb046c476
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
@@ -0,0 +1,56 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- UnifyFunctionExitNodes.h - Ensure fn's have one return --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass is used to ensure that functions have at most one return and one
+// unreachable instruction in them.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_UNIFYFUNCTIONEXITNODES_H
+#define LLVM_TRANSFORMS_UTILS_UNIFYFUNCTIONEXITNODES_H
+
+#include "llvm/IR/PassManager.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+class BasicBlock;
+
+class UnifyFunctionExitNodesLegacyPass : public FunctionPass {
+public:
+ static char ID; // Pass identification, replacement for typeid
+ UnifyFunctionExitNodesLegacyPass();
+
+ // We can preserve non-critical-edgeness when we unify function exit nodes
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ bool runOnFunction(Function &F) override;
+};
+
+Pass *createUnifyFunctionExitNodesPass();
+
+class UnifyFunctionExitNodesPass
+ : public PassInfoMixin<UnifyFunctionExitNodesPass> {
+public:
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_UNIFYFUNCTIONEXITNODES_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/UnifyLoopExits.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/UnifyLoopExits.h
new file mode 100644
index 0000000000..ef6ef6f92b
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/UnifyLoopExits.h
@@ -0,0 +1,33 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- UnifyLoopExits.h - Redirect exiting edges to one block -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_UNIFYLOOPEXITS_H
+#define LLVM_TRANSFORMS_UTILS_UNIFYLOOPEXITS_H
+
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class UnifyLoopExitsPass : public PassInfoMixin<UnifyLoopExitsPass> {
+public:
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+} // namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_UNIFYLOOPEXITS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/UniqueInternalLinkageNames.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/UniqueInternalLinkageNames.h
new file mode 100644
index 0000000000..78ca7086c3
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/UniqueInternalLinkageNames.h
@@ -0,0 +1,42 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- UniqueInternalLinkageNames.h - Uniq. Int. Linkage Names -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements unique naming of internal linkage symbols with option
+// -funique-internal-linkage-symbols.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_UNIQUEINTERNALLINKAGENAMES_H
+#define LLVM_TRANSFORMS_UTILS_UNIQUEINTERNALLINKAGENAMES_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Simple pass that provides a name to every anonymous globals.
+class UniqueInternalLinkageNamesPass
+ : public PassInfoMixin<UniqueInternalLinkageNamesPass> {
+public:
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_UNIQUEINTERNALLINKAGENAMES_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/UnrollLoop.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/UnrollLoop.h
new file mode 100644
index 0000000000..f279ae9966
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/UnrollLoop.h
@@ -0,0 +1,149 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/Transforms/Utils/UnrollLoop.h - Unrolling utilities -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines some loop unrolling utilities. It does not define any
+// actual pass or policy, but provides a single function to perform loop
+// unrolling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
+#define LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+
+namespace llvm {
+
+class AssumptionCache;
+class BasicBlock;
+class BlockFrequencyInfo;
+class DependenceInfo;
+class DominatorTree;
+class Loop;
+class LoopInfo;
+class MDNode;
+class ProfileSummaryInfo;
+class OptimizationRemarkEmitter;
+class ScalarEvolution;
+class StringRef;
+class Value;
+
+using NewLoopsMap = SmallDenseMap<const Loop *, Loop *, 4>;
+
+/// @{
+/// Metadata attribute names
+const char *const LLVMLoopUnrollFollowupAll = "llvm.loop.unroll.followup_all";
+const char *const LLVMLoopUnrollFollowupUnrolled =
+ "llvm.loop.unroll.followup_unrolled";
+const char *const LLVMLoopUnrollFollowupRemainder =
+ "llvm.loop.unroll.followup_remainder";
+/// @}
+
+const Loop* addClonedBlockToLoopInfo(BasicBlock *OriginalBB,
+ BasicBlock *ClonedBB, LoopInfo *LI,
+ NewLoopsMap &NewLoops);
+
+/// Represents the result of a \c UnrollLoop invocation.
+enum class LoopUnrollResult {
+ /// The loop was not modified.
+ Unmodified,
+
+ /// The loop was partially unrolled -- we still have a loop, but with a
+ /// smaller trip count. We may also have emitted epilogue loop if the loop
+ /// had a non-constant trip count.
+ PartiallyUnrolled,
+
+ /// The loop was fully unrolled into straight-line code. We no longer have
+ /// any back-edges.
+ FullyUnrolled
+};
+
+struct UnrollLoopOptions {
+ unsigned Count;
+ unsigned TripCount;
+ bool Force;
+ bool AllowRuntime;
+ bool AllowExpensiveTripCount;
+ bool PreserveCondBr;
+ bool PreserveOnlyFirst;
+ unsigned TripMultiple;
+ unsigned PeelCount;
+ bool UnrollRemainder;
+ bool ForgetAllSCEV;
+};
+
+LoopUnrollResult UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI,
+ ScalarEvolution *SE, DominatorTree *DT,
+ AssumptionCache *AC,
+ const llvm::TargetTransformInfo *TTI,
+ OptimizationRemarkEmitter *ORE, bool PreserveLCSSA,
+ Loop **RemainderLoop = nullptr);
+
+bool UnrollRuntimeLoopRemainder(
+ Loop *L, unsigned Count, bool AllowExpensiveTripCount,
+ bool UseEpilogRemainder, bool UnrollRemainder, bool ForgetAllSCEV,
+ LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC,
+ const TargetTransformInfo *TTI, bool PreserveLCSSA,
+ Loop **ResultLoop = nullptr);
+
+LoopUnrollResult UnrollAndJamLoop(Loop *L, unsigned Count, unsigned TripCount,
+ unsigned TripMultiple, bool UnrollRemainder,
+ LoopInfo *LI, ScalarEvolution *SE,
+ DominatorTree *DT, AssumptionCache *AC,
+ const TargetTransformInfo *TTI,
+ OptimizationRemarkEmitter *ORE,
+ Loop **EpilogueLoop = nullptr);
+
+bool isSafeToUnrollAndJam(Loop *L, ScalarEvolution &SE, DominatorTree &DT,
+ DependenceInfo &DI, LoopInfo &LI);
+
+bool computeUnrollCount(Loop *L, const TargetTransformInfo &TTI,
+ DominatorTree &DT, LoopInfo *LI, ScalarEvolution &SE,
+ const SmallPtrSetImpl<const Value *> &EphValues,
+ OptimizationRemarkEmitter *ORE, unsigned &TripCount,
+ unsigned MaxTripCount, bool MaxOrZero,
+ unsigned &TripMultiple, unsigned LoopSize,
+ TargetTransformInfo::UnrollingPreferences &UP,
+ TargetTransformInfo::PeelingPreferences &PP,
+ bool &UseUpperBound);
+
+void simplifyLoopAfterUnroll(Loop *L, bool SimplifyIVs, LoopInfo *LI,
+ ScalarEvolution *SE, DominatorTree *DT,
+ AssumptionCache *AC,
+ const TargetTransformInfo *TTI);
+
+MDNode *GetUnrollMetadata(MDNode *LoopID, StringRef Name);
+
+TargetTransformInfo::UnrollingPreferences gatherUnrollingPreferences(
+ Loop *L, ScalarEvolution &SE, const TargetTransformInfo &TTI,
+ BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, int OptLevel,
+ Optional<unsigned> UserThreshold, Optional<unsigned> UserCount,
+ Optional<bool> UserAllowPartial, Optional<bool> UserRuntime,
+ Optional<bool> UserUpperBound, Optional<unsigned> UserFullUnrollMaxCount);
+
+unsigned ApproximateLoopSize(const Loop *L, unsigned &NumCalls,
+ bool &NotDuplicatable, bool &Convergent,
+ const TargetTransformInfo &TTI,
+ const SmallPtrSetImpl<const Value *> &EphValues,
+ unsigned BEInsns);
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/VNCoercion.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/VNCoercion.h
new file mode 100644
index 0000000000..675a9259ec
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/VNCoercion.h
@@ -0,0 +1,118 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- VNCoercion.h - Value Numbering Coercion Utilities --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file / This file provides routines used by LLVM's value numbering passes to
+/// perform various forms of value extraction from memory when the types are not
+/// identical. For example, given
+///
+/// store i32 8, i32 *%foo
+/// %a = bitcast i32 *%foo to i16
+/// %val = load i16, i16 *%a
+///
+/// It possible to extract the value of the load of %a from the store to %foo.
+/// These routines know how to tell whether they can do that (the analyze*
+/// routines), and can also insert the necessary IR to do it (the get*
+/// routines).
+
+#ifndef LLVM_TRANSFORMS_UTILS_VNCOERCION_H
+#define LLVM_TRANSFORMS_UTILS_VNCOERCION_H
+
+namespace llvm {
+class Constant;
+class StoreInst;
+class LoadInst;
+class MemIntrinsic;
+class Instruction;
+class IRBuilderBase;
+class Value;
+class Type;
+class DataLayout;
+namespace VNCoercion {
+/// Return true if CoerceAvailableValueToLoadType would succeed if it was
+/// called.
+bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy,
+ const DataLayout &DL);
+
+/// If we saw a store of a value to memory, and then a load from a must-aliased
+/// pointer of a different type, try to coerce the stored value to the loaded
+/// type. LoadedTy is the type of the load we want to replace. IRB is
+/// IRBuilder used to insert new instructions.
+///
+/// If we can't do it, return null.
+Value *coerceAvailableValueToLoadType(Value *StoredVal, Type *LoadedTy,
+ IRBuilderBase &IRB, const DataLayout &DL);
+
+/// This function determines whether a value for the pointer LoadPtr can be
+/// extracted from the store at DepSI.
+///
+/// On success, it returns the offset into DepSI that extraction would start.
+/// On failure, it returns -1.
+int analyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr,
+ StoreInst *DepSI, const DataLayout &DL);
+
+/// This function determines whether a value for the pointer LoadPtr can be
+/// extracted from the load at DepLI.
+///
+/// On success, it returns the offset into DepLI that extraction would start.
+/// On failure, it returns -1.
+int analyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI,
+ const DataLayout &DL);
+
+/// This function determines whether a value for the pointer LoadPtr can be
+/// extracted from the memory intrinsic at DepMI.
+///
+/// On success, it returns the offset into DepMI that extraction would start.
+/// On failure, it returns -1.
+int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
+ MemIntrinsic *DepMI, const DataLayout &DL);
+
+/// If analyzeLoadFromClobberingStore returned an offset, this function can be
+/// used to actually perform the extraction of the bits from the store. It
+/// inserts instructions to do so at InsertPt, and returns the extracted value.
+Value *getStoreValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy,
+ Instruction *InsertPt, const DataLayout &DL);
+// This is the same as getStoreValueForLoad, except it performs no insertion
+// It only allows constant inputs.
+Constant *getConstantStoreValueForLoad(Constant *SrcVal, unsigned Offset,
+ Type *LoadTy, const DataLayout &DL);
+
+/// If analyzeLoadFromClobberingLoad returned an offset, this function can be
+/// used to actually perform the extraction of the bits from the load, including
+/// any necessary load widening. It inserts instructions to do so at InsertPt,
+/// and returns the extracted value.
+Value *getLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, Type *LoadTy,
+ Instruction *InsertPt, const DataLayout &DL);
+// This is the same as getLoadValueForLoad, except it is given the load value as
+// a constant. It returns nullptr if it would require widening the load.
+Constant *getConstantLoadValueForLoad(Constant *SrcVal, unsigned Offset,
+ Type *LoadTy, const DataLayout &DL);
+
+/// If analyzeLoadFromClobberingMemInst returned an offset, this function can be
+/// used to actually perform the extraction of the bits from the memory
+/// intrinsic. It inserts instructions to do so at InsertPt, and returns the
+/// extracted value.
+Value *getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
+ Type *LoadTy, Instruction *InsertPt,
+ const DataLayout &DL);
+// This is the same as getStoreValueForLoad, except it performs no insertion.
+// It returns nullptr if it cannot produce a constant.
+Constant *getConstantMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
+ Type *LoadTy, const DataLayout &DL);
+}
+}
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Transforms/Utils/ValueMapper.h b/contrib/libs/llvm12/include/llvm/Transforms/Utils/ValueMapper.h
new file mode 100644
index 0000000000..44fcae641b
--- /dev/null
+++ b/contrib/libs/llvm12/include/llvm/Transforms/Utils/ValueMapper.h
@@ -0,0 +1,292 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- ValueMapper.h - Remapping for constants and metadata -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MapValue interface which is used by various parts of
+// the Transforms/Utils library to implement cloning and linking facilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_VALUEMAPPER_H
+#define LLVM_TRANSFORMS_UTILS_VALUEMAPPER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/IR/ValueMap.h"
+
+namespace llvm {
+
+class Constant;
+class Function;
+class GlobalIndirectSymbol;
+class GlobalVariable;
+class Instruction;
+class MDNode;
+class Metadata;
+class Type;
+class Value;
+
+using ValueToValueMapTy = ValueMap<const Value *, WeakTrackingVH>;
+
+/// This is a class that can be implemented by clients to remap types when
+/// cloning constants and instructions.
+class ValueMapTypeRemapper {
+ virtual void anchor(); // Out of line method.
+
+public:
+ virtual ~ValueMapTypeRemapper() = default;
+
+ /// The client should implement this method if they want to remap types while
+ /// mapping values.
+ virtual Type *remapType(Type *SrcTy) = 0;
+};
+
+/// This is a class that can be implemented by clients to materialize Values on
+/// demand.
+class ValueMaterializer {
+ virtual void anchor(); // Out of line method.
+
+protected:
+ ValueMaterializer() = default;
+ ValueMaterializer(const ValueMaterializer &) = default;
+ ValueMaterializer &operator=(const ValueMaterializer &) = default;
+ ~ValueMaterializer() = default;
+
+public:
+ /// This method can be implemented to generate a mapped Value on demand. For
+ /// example, if linking lazily. Returns null if the value is not materialized.
+ virtual Value *materialize(Value *V) = 0;
+};
+
+/// These are flags that the value mapping APIs allow.
+enum RemapFlags {
+ RF_None = 0,
+
+ /// If this flag is set, the remapper knows that only local values within a
+ /// function (such as an instruction or argument) are mapped, not global
+ /// values like functions and global metadata.
+ RF_NoModuleLevelChanges = 1,
+
+ /// If this flag is set, the remapper ignores missing function-local entries
+ /// (Argument, Instruction, BasicBlock) that are not in the value map. If it
+ /// is unset, it aborts if an operand is asked to be remapped which doesn't
+ /// exist in the mapping.
+ ///
+ /// There are no such assertions in MapValue(), whose results are almost
+ /// unchanged by this flag. This flag mainly changes the assertion behaviour
+ /// in RemapInstruction().
+ ///
+ /// Since an Instruction's metadata operands (even that point to SSA values)
+ /// aren't guaranteed to be dominated by their definitions, MapMetadata will
+ /// return "!{}" instead of "null" for \a LocalAsMetadata instances whose SSA
+ /// values are unmapped when this flag is set. Otherwise, \a MapValue()
+ /// completely ignores this flag.
+ ///
+ /// \a MapMetadata() always ignores this flag.
+ RF_IgnoreMissingLocals = 2,
+
+ /// Instruct the remapper to move distinct metadata instead of duplicating it
+ /// when there are module-level changes.
+ RF_MoveDistinctMDs = 4,
+
+ /// Any global values not in value map are mapped to null instead of mapping
+ /// to self. Illegal if RF_IgnoreMissingLocals is also set.
+ RF_NullMapMissingGlobalValues = 8,
+};
+
+inline RemapFlags operator|(RemapFlags LHS, RemapFlags RHS) {
+ return RemapFlags(unsigned(LHS) | unsigned(RHS));
+}
+
+/// Context for (re-)mapping values (and metadata).
+///
+/// A shared context used for mapping and remapping of Value and Metadata
+/// instances using \a ValueToValueMapTy, \a RemapFlags, \a
+/// ValueMapTypeRemapper, and \a ValueMaterializer.
+///
+/// There are a number of top-level entry points:
+/// - \a mapValue() (and \a mapConstant());
+/// - \a mapMetadata() (and \a mapMDNode());
+/// - \a remapInstruction(); and
+/// - \a remapFunction().
+///
+/// The \a ValueMaterializer can be used as a callback, but cannot invoke any
+/// of these top-level functions recursively. Instead, callbacks should use
+/// one of the following to schedule work lazily in the \a ValueMapper
+/// instance:
+/// - \a scheduleMapGlobalInitializer()
+/// - \a scheduleMapAppendingVariable()
+/// - \a scheduleMapGlobalIndirectSymbol()
+/// - \a scheduleRemapFunction()
+///
+/// Sometimes a callback needs a different mapping context. Such a context can
+/// be registered using \a registerAlternateMappingContext(), which takes an
+/// alternate \a ValueToValueMapTy and \a ValueMaterializer and returns a ID to
+/// pass into the schedule*() functions.
+///
+/// TODO: lib/Linker really doesn't need the \a ValueHandle in the \a
+/// ValueToValueMapTy. We should template \a ValueMapper (and its
+/// implementation classes), and explicitly instantiate on two concrete
+/// instances of \a ValueMap (one as \a ValueToValueMap, and one with raw \a
+/// Value pointers). It may be viable to do away with \a TrackingMDRef in the
+/// \a Metadata side map for the lib/Linker case as well, in which case we'll
+/// need a new template parameter on \a ValueMap.
+///
+/// TODO: Update callers of \a RemapInstruction() and \a MapValue() (etc.) to
+/// use \a ValueMapper directly.
+class ValueMapper {
+ void *pImpl;
+
+public:
+ ValueMapper(ValueToValueMapTy &VM, RemapFlags Flags = RF_None,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr);
+ ValueMapper(ValueMapper &&) = delete;
+ ValueMapper(const ValueMapper &) = delete;
+ ValueMapper &operator=(ValueMapper &&) = delete;
+ ValueMapper &operator=(const ValueMapper &) = delete;
+ ~ValueMapper();
+
+ /// Register an alternate mapping context.
+ ///
+ /// Returns a MappingContextID that can be used with the various schedule*()
+ /// API to switch in a different value map on-the-fly.
+ unsigned
+ registerAlternateMappingContext(ValueToValueMapTy &VM,
+ ValueMaterializer *Materializer = nullptr);
+
+ /// Add to the current \a RemapFlags.
+ ///
+ /// \note Like the top-level mapping functions, \a addFlags() must be called
+ /// at the top level, not during a callback in a \a ValueMaterializer.
+ void addFlags(RemapFlags Flags);
+
+ Metadata *mapMetadata(const Metadata &MD);
+ MDNode *mapMDNode(const MDNode &N);
+
+ Value *mapValue(const Value &V);
+ Constant *mapConstant(const Constant &C);
+
+ void remapInstruction(Instruction &I);
+ void remapFunction(Function &F);
+
+ void scheduleMapGlobalInitializer(GlobalVariable &GV, Constant &Init,
+ unsigned MappingContextID = 0);
+ void scheduleMapAppendingVariable(GlobalVariable &GV, Constant *InitPrefix,
+ bool IsOldCtorDtor,
+ ArrayRef<Constant *> NewMembers,
+ unsigned MappingContextID = 0);
+ void scheduleMapGlobalIndirectSymbol(GlobalIndirectSymbol &GIS,
+ Constant &Target,
+ unsigned MappingContextID = 0);
+ void scheduleRemapFunction(Function &F, unsigned MappingContextID = 0);
+};
+
+/// Look up or compute a value in the value map.
+///
+/// Return a mapped value for a function-local value (Argument, Instruction,
+/// BasicBlock), or compute and memoize a value for a Constant.
+///
+/// 1. If \c V is in VM, return the result.
+/// 2. Else if \c V can be materialized with \c Materializer, do so, memoize
+/// it in \c VM, and return it.
+/// 3. Else if \c V is a function-local value, return nullptr.
+/// 4. Else if \c V is a \a GlobalValue, return \c nullptr or \c V depending
+/// on \a RF_NullMapMissingGlobalValues.
+/// 5. Else if \c V is a \a MetadataAsValue wrapping a LocalAsMetadata,
+/// recurse on the local SSA value, and return nullptr or "metadata !{}" on
+/// missing depending on RF_IgnoreMissingValues.
+/// 6. Else if \c V is a \a MetadataAsValue, rewrap the return of \a
+/// MapMetadata().
+/// 7. Else, compute the equivalent constant, and return it.
+inline Value *MapValue(const Value *V, ValueToValueMapTy &VM,
+ RemapFlags Flags = RF_None,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr) {
+ return ValueMapper(VM, Flags, TypeMapper, Materializer).mapValue(*V);
+}
+
+/// Lookup or compute a mapping for a piece of metadata.
+///
+/// Compute and memoize a mapping for \c MD.
+///
+/// 1. If \c MD is mapped, return it.
+/// 2. Else if \a RF_NoModuleLevelChanges or \c MD is an \a MDString, return
+/// \c MD.
+/// 3. Else if \c MD is a \a ConstantAsMetadata, call \a MapValue() and
+/// re-wrap its return (returning nullptr on nullptr).
+/// 4. Else, \c MD is an \a MDNode. These are remapped, along with their
+/// transitive operands. Distinct nodes are duplicated or moved depending
+/// on \a RF_MoveDistinctNodes. Uniqued nodes are remapped like constants.
+///
+/// \note \a LocalAsMetadata is completely unsupported by \a MapMetadata.
+/// Instead, use \a MapValue() with its wrapping \a MetadataAsValue instance.
+inline Metadata *MapMetadata(const Metadata *MD, ValueToValueMapTy &VM,
+ RemapFlags Flags = RF_None,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr) {
+ return ValueMapper(VM, Flags, TypeMapper, Materializer).mapMetadata(*MD);
+}
+
+/// Version of MapMetadata with type safety for MDNode.
+inline MDNode *MapMetadata(const MDNode *MD, ValueToValueMapTy &VM,
+ RemapFlags Flags = RF_None,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr) {
+ return ValueMapper(VM, Flags, TypeMapper, Materializer).mapMDNode(*MD);
+}
+
+/// Convert the instruction operands from referencing the current values into
+/// those specified by VM.
+///
+/// If \a RF_IgnoreMissingLocals is set and an operand can't be found via \a
+/// MapValue(), use the old value. Otherwise assert that this doesn't happen.
+///
+/// Note that \a MapValue() only returns \c nullptr for SSA values missing from
+/// \c VM.
+inline void RemapInstruction(Instruction *I, ValueToValueMapTy &VM,
+ RemapFlags Flags = RF_None,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr) {
+ ValueMapper(VM, Flags, TypeMapper, Materializer).remapInstruction(*I);
+}
+
+/// Remap the operands, metadata, arguments, and instructions of a function.
+///
+/// Calls \a MapValue() on prefix data, prologue data, and personality
+/// function; calls \a MapMetadata() on each attached MDNode; remaps the
+/// argument types using the provided \c TypeMapper; and calls \a
+/// RemapInstruction() on every instruction.
+inline void RemapFunction(Function &F, ValueToValueMapTy &VM,
+ RemapFlags Flags = RF_None,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr) {
+ ValueMapper(VM, Flags, TypeMapper, Materializer).remapFunction(F);
+}
+
+/// Version of MapValue with type safety for Constant.
+inline Constant *MapValue(const Constant *V, ValueToValueMapTy &VM,
+ RemapFlags Flags = RF_None,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr) {
+ return ValueMapper(VM, Flags, TypeMapper, Materializer).mapConstant(*V);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_VALUEMAPPER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif