aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm14/include/llvm/ADT
diff options
context:
space:
mode:
authorvitalyisaev <vitalyisaev@yandex-team.com>2023-06-29 10:00:50 +0300
committervitalyisaev <vitalyisaev@yandex-team.com>2023-06-29 10:00:50 +0300
commit6ffe9e53658409f212834330e13564e4952558f6 (patch)
tree85b1e00183517648b228aafa7c8fb07f5276f419 /contrib/libs/llvm14/include/llvm/ADT
parent726057070f9c5a91fc10fde0d5024913d10f1ab9 (diff)
downloadydb-6ffe9e53658409f212834330e13564e4952558f6.tar.gz
YQ Connector: support managed ClickHouse
Со стороны dqrun можно обратиться к инстансу коннектора, который работает на streaming стенде, и извлечь данные из облачного CH.
Diffstat (limited to 'contrib/libs/llvm14/include/llvm/ADT')
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/APFixedPoint.h248
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/APFloat.h1354
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/APInt.h2292
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/APSInt.h380
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/AllocatorList.h243
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/Any.h168
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/ArrayRef.h614
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/BitVector.h867
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/Bitfields.h300
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/BitmaskEnum.h164
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/BreadthFirstIterator.h175
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/CachedHashString.h195
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/CoalescingBitVector.h462
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/CombinationGenerator.h159
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/DAGDeltaAlgorithm.h89
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/DeltaAlgorithm.h103
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/DenseMap.h1320
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/DenseMapInfo.h304
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/DenseSet.h313
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/DepthFirstIterator.h321
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/DirectedGraph.h291
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/EnumeratedArray.h62
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/EpochTracker.h110
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/EquivalenceClasses.h327
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/FloatingPointMode.h207
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/FoldingSet.h819
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/FunctionExtras.h427
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/GenericCycleImpl.h423
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/GenericCycleInfo.h345
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/GenericSSAContext.h85
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/GraphTraits.h155
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/Hashing.h701
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/ImmutableList.h257
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/ImmutableMap.h341
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/ImmutableSet.h1182
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/IndexedMap.h96
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/IntEqClasses.h99
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/IntervalMap.h2185
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/IntrusiveRefCntPtr.h321
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/MapVector.h251
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/None.h38
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/Optional.h508
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/PackedVector.h162
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/PointerEmbeddedInt.h130
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/PointerIntPair.h256
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/PointerSumType.h305
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/PointerUnion.h261
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/PostOrderIterator.h326
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/PriorityQueue.h94
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/PriorityWorklist.h275
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/SCCIterator.h383
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/STLArrayExtras.h46
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/STLExtras.h2176
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/STLForwardCompat.h94
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/STLFunctionalExtras.h87
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/ScopeExit.h77
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/ScopedHashTable.h274
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/Sequence.h388
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/SetOperations.h94
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/SetVector.h350
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/SmallBitVector.h772
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/SmallPtrSet.h529
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/SmallSet.h298
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/SmallString.h305
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/SmallVector.h1320
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/SparseBitVector.h904
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/SparseMultiSet.h534
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/SparseSet.h330
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/Statistic.h233
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/StringExtras.h612
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/StringMap.h499
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/StringMapEntry.h161
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/StringRef.h1006
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/StringSet.h67
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/StringSwitch.h209
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/TinyPtrVector.h369
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/Triple.h1016
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/Twine.h577
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/UniqueVector.h112
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/bit.h75
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/edit_distance.h114
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/fallible_iterator.h252
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/identity.h45
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/ilist.h433
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/ilist_base.h103
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/ilist_iterator.h208
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/ilist_node.h317
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/ilist_node_base.h63
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/ilist_node_options.h142
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/iterator.h389
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/iterator_range.h74
-rw-r--r--contrib/libs/llvm14/include/llvm/ADT/simple_ilist.h325
92 files changed, 37472 insertions, 0 deletions
diff --git a/contrib/libs/llvm14/include/llvm/ADT/APFixedPoint.h b/contrib/libs/llvm14/include/llvm/ADT/APFixedPoint.h
new file mode 100644
index 0000000000..b93f744a3e
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/APFixedPoint.h
@@ -0,0 +1,248 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- APFixedPoint.h - Fixed point constant handling -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Defines the fixed point number interface.
+/// This is a class for abstracting various operations performed on fixed point
+/// types.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_APFIXEDPOINT_H
+#define LLVM_ADT_APFIXEDPOINT_H
+
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+class APFloat;
+struct fltSemantics;
+
+/// The fixed point semantics work similarly to fltSemantics. The width
+/// specifies the whole bit width of the underlying scaled integer (with padding
+/// if any). The scale represents the number of fractional bits in this type.
+/// When HasUnsignedPadding is true and this type is unsigned, the first bit
+/// in the value this represents is treated as padding.
+class FixedPointSemantics {
+public:
+ FixedPointSemantics(unsigned Width, unsigned Scale, bool IsSigned,
+ bool IsSaturated, bool HasUnsignedPadding)
+ : Width(Width), Scale(Scale), IsSigned(IsSigned),
+ IsSaturated(IsSaturated), HasUnsignedPadding(HasUnsignedPadding) {
+ assert(Width >= Scale && "Not enough room for the scale");
+ assert(!(IsSigned && HasUnsignedPadding) &&
+ "Cannot have unsigned padding on a signed type.");
+ }
+
+ unsigned getWidth() const { return Width; }
+ unsigned getScale() const { return Scale; }
+ bool isSigned() const { return IsSigned; }
+ bool isSaturated() const { return IsSaturated; }
+ bool hasUnsignedPadding() const { return HasUnsignedPadding; }
+
+ void setSaturated(bool Saturated) { IsSaturated = Saturated; }
+
+ /// Return the number of integral bits represented by these semantics. These
+ /// are separate from the fractional bits and do not include the sign or
+ /// padding bit.
+ unsigned getIntegralBits() const {
+ if (IsSigned || (!IsSigned && HasUnsignedPadding))
+ return Width - Scale - 1;
+ else
+ return Width - Scale;
+ }
+
+ /// Return the FixedPointSemantics that allows for calculating the full
+ /// precision semantic that can precisely represent the precision and ranges
+ /// of both input values. This does not compute the resulting semantics for a
+ /// given binary operation.
+ FixedPointSemantics
+ getCommonSemantics(const FixedPointSemantics &Other) const;
+
+ /// Returns true if this fixed-point semantic with its value bits interpreted
+ /// as an integer can fit in the given floating point semantic without
+ /// overflowing to infinity.
+ /// For example, a signed 8-bit fixed-point semantic has a maximum and
+ /// minimum integer representation of 127 and -128, respectively. If both of
+ /// these values can be represented (possibly inexactly) in the floating
+ /// point semantic without overflowing, this returns true.
+ bool fitsInFloatSemantics(const fltSemantics &FloatSema) const;
+
+ /// Return the FixedPointSemantics for an integer type.
+ static FixedPointSemantics GetIntegerSemantics(unsigned Width,
+ bool IsSigned) {
+ return FixedPointSemantics(Width, /*Scale=*/0, IsSigned,
+ /*IsSaturated=*/false,
+ /*HasUnsignedPadding=*/false);
+ }
+
+private:
+ unsigned Width : 16;
+ unsigned Scale : 13;
+ unsigned IsSigned : 1;
+ unsigned IsSaturated : 1;
+ unsigned HasUnsignedPadding : 1;
+};
+
+/// The APFixedPoint class works similarly to APInt/APSInt in that it is a
+/// functional replacement for a scaled integer. It is meant to replicate the
+/// fixed point types proposed in ISO/IEC JTC1 SC22 WG14 N1169. The class carries
+/// info about the fixed point type's width, sign, scale, and saturation, and
+/// provides different operations that would normally be performed on fixed point
+/// types.
+class APFixedPoint {
+public:
+ APFixedPoint(const APInt &Val, const FixedPointSemantics &Sema)
+ : Val(Val, !Sema.isSigned()), Sema(Sema) {
+ assert(Val.getBitWidth() == Sema.getWidth() &&
+ "The value should have a bit width that matches the Sema width");
+ }
+
+ APFixedPoint(uint64_t Val, const FixedPointSemantics &Sema)
+ : APFixedPoint(APInt(Sema.getWidth(), Val, Sema.isSigned()), Sema) {}
+
+ // Zero initialization.
+ APFixedPoint(const FixedPointSemantics &Sema) : APFixedPoint(0, Sema) {}
+
+ APSInt getValue() const { return APSInt(Val, !Sema.isSigned()); }
+ inline unsigned getWidth() const { return Sema.getWidth(); }
+ inline unsigned getScale() const { return Sema.getScale(); }
+ inline bool isSaturated() const { return Sema.isSaturated(); }
+ inline bool isSigned() const { return Sema.isSigned(); }
+ inline bool hasPadding() const { return Sema.hasUnsignedPadding(); }
+ FixedPointSemantics getSemantics() const { return Sema; }
+
+ bool getBoolValue() const { return Val.getBoolValue(); }
+
+ // Convert this number to match the semantics provided. If the overflow
+ // parameter is provided, set this value to true or false to indicate if this
+ // operation results in an overflow.
+ APFixedPoint convert(const FixedPointSemantics &DstSema,
+ bool *Overflow = nullptr) const;
+
+ // Perform binary operations on a fixed point type. The resulting fixed point
+ // value will be in the common, full precision semantics that can represent
+ // the precision and ranges of both input values. See convert() for an
+ // explanation of the Overflow parameter.
+ APFixedPoint add(const APFixedPoint &Other, bool *Overflow = nullptr) const;
+ APFixedPoint sub(const APFixedPoint &Other, bool *Overflow = nullptr) const;
+ APFixedPoint mul(const APFixedPoint &Other, bool *Overflow = nullptr) const;
+ APFixedPoint div(const APFixedPoint &Other, bool *Overflow = nullptr) const;
+
+ // Perform shift operations on a fixed point type. Unlike the other binary
+ // operations, the resulting fixed point value will be in the original
+ // semantic.
+ APFixedPoint shl(unsigned Amt, bool *Overflow = nullptr) const;
+ APFixedPoint shr(unsigned Amt, bool *Overflow = nullptr) const {
+ // Right shift cannot overflow.
+ if (Overflow)
+ *Overflow = false;
+ return APFixedPoint(Val >> Amt, Sema);
+ }
+
+ /// Perform a unary negation (-X) on this fixed point type, taking into
+ /// account saturation if applicable.
+ APFixedPoint negate(bool *Overflow = nullptr) const;
+
+ /// Return the integral part of this fixed point number, rounded towards
+ /// zero. (-2.5k -> -2)
+ APSInt getIntPart() const {
+ if (Val < 0 && Val != -Val) // Cover the case when we have the min val
+ return -(-Val >> getScale());
+ else
+ return Val >> getScale();
+ }
+
+ /// Return the integral part of this fixed point number, rounded towards
+ /// zero. The value is stored into an APSInt with the provided width and sign.
+ /// If the overflow parameter is provided, and the integral value is not able
+ /// to be fully stored in the provided width and sign, the overflow parameter
+ /// is set to true.
+ APSInt convertToInt(unsigned DstWidth, bool DstSign,
+ bool *Overflow = nullptr) const;
+
+ /// Convert this fixed point number to a floating point value with the
+ /// provided semantics.
+ APFloat convertToFloat(const fltSemantics &FloatSema) const;
+
+ void toString(SmallVectorImpl<char> &Str) const;
+ std::string toString() const {
+ SmallString<40> S;
+ toString(S);
+ return std::string(S.str());
+ }
+
+ // If LHS > RHS, return 1. If LHS == RHS, return 0. If LHS < RHS, return -1.
+ int compare(const APFixedPoint &Other) const;
+ bool operator==(const APFixedPoint &Other) const {
+ return compare(Other) == 0;
+ }
+ bool operator!=(const APFixedPoint &Other) const {
+ return compare(Other) != 0;
+ }
+ bool operator>(const APFixedPoint &Other) const { return compare(Other) > 0; }
+ bool operator<(const APFixedPoint &Other) const { return compare(Other) < 0; }
+ bool operator>=(const APFixedPoint &Other) const {
+ return compare(Other) >= 0;
+ }
+ bool operator<=(const APFixedPoint &Other) const {
+ return compare(Other) <= 0;
+ }
+
+ static APFixedPoint getMax(const FixedPointSemantics &Sema);
+ static APFixedPoint getMin(const FixedPointSemantics &Sema);
+
+ /// Given a floating point semantic, return the next floating point semantic
+ /// with a larger exponent and larger or equal mantissa.
+ static const fltSemantics *promoteFloatSemantics(const fltSemantics *S);
+
+ /// Create an APFixedPoint with a value equal to that of the provided integer,
+ /// and in the same semantics as the provided target semantics. If the value
+ /// is not able to fit in the specified fixed point semantics, and the
+ /// overflow parameter is provided, it is set to true.
+ static APFixedPoint getFromIntValue(const APSInt &Value,
+ const FixedPointSemantics &DstFXSema,
+ bool *Overflow = nullptr);
+
+ /// Create an APFixedPoint with a value equal to that of the provided
+ /// floating point value, in the provided target semantics. If the value is
+ /// not able to fit in the specified fixed point semantics and the overflow
+ /// parameter is specified, it is set to true.
+ /// For NaN, the Overflow flag is always set. For +inf and -inf, if the
+ /// semantic is saturating, the value saturates. Otherwise, the Overflow flag
+ /// is set.
+ static APFixedPoint getFromFloatValue(const APFloat &Value,
+ const FixedPointSemantics &DstFXSema,
+ bool *Overflow = nullptr);
+
+private:
+ APSInt Val;
+ FixedPointSemantics Sema;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const APFixedPoint &FX) {
+ OS << FX.toString();
+ return OS;
+}
+
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/APFloat.h b/contrib/libs/llvm14/include/llvm/ADT/APFloat.h
new file mode 100644
index 0000000000..3fee3bba7c
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/APFloat.h
@@ -0,0 +1,1354 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/APFloat.h - Arbitrary Precision Floating Point ---*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file declares a class to represent arbitrary precision floating point
+/// values and provide a variety of arithmetic operations on them.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_APFLOAT_H
+#define LLVM_ADT_APFLOAT_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/FloatingPointMode.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <memory>
+
+#define APFLOAT_DISPATCH_ON_SEMANTICS(METHOD_CALL) \
+ do { \
+ if (usesLayout<IEEEFloat>(getSemantics())) \
+ return U.IEEE.METHOD_CALL; \
+ if (usesLayout<DoubleAPFloat>(getSemantics())) \
+ return U.Double.METHOD_CALL; \
+ llvm_unreachable("Unexpected semantics"); \
+ } while (false)
+
+namespace llvm {
+
+struct fltSemantics;
+class APSInt;
+class StringRef;
+class APFloat;
+class raw_ostream;
+
+template <typename T> class Expected;
+template <typename T> class SmallVectorImpl;
+
+/// Enum that represents what fraction of the LSB truncated bits of an fp number
+/// represent.
+///
+/// This essentially combines the roles of guard and sticky bits.
+enum lostFraction { // Example of truncated bits:
+ lfExactlyZero, // 000000
+ lfLessThanHalf, // 0xxxxx x's not all zero
+ lfExactlyHalf, // 100000
+ lfMoreThanHalf // 1xxxxx x's not all zero
+};
+
+/// A self-contained host- and target-independent arbitrary-precision
+/// floating-point software implementation.
+///
+/// APFloat uses bignum integer arithmetic as provided by static functions in
+/// the APInt class. The library will work with bignum integers whose parts are
+/// any unsigned type at least 16 bits wide, but 64 bits is recommended.
+///
+/// Written for clarity rather than speed, in particular with a view to use in
+/// the front-end of a cross compiler so that target arithmetic can be correctly
+/// performed on the host. Performance should nonetheless be reasonable,
+/// particularly for its intended use. It may be useful as a base
+/// implementation for a run-time library during development of a faster
+/// target-specific one.
+///
+/// All 5 rounding modes in the IEEE-754R draft are handled correctly for all
+/// implemented operations. Currently implemented operations are add, subtract,
+/// multiply, divide, fused-multiply-add, conversion-to-float,
+/// conversion-to-integer and conversion-from-integer. New rounding modes
+/// (e.g. away from zero) can be added with three or four lines of code.
+///
+/// Four formats are built-in: IEEE single precision, double precision,
+/// quadruple precision, and x87 80-bit extended double (when operating with
+/// full extended precision). Adding a new format that obeys IEEE semantics
+/// only requires adding two lines of code: a declaration and definition of the
+/// format.
+///
+/// All operations return the status of that operation as an exception bit-mask,
+/// so multiple operations can be done consecutively with their results or-ed
+/// together. The returned status can be useful for compiler diagnostics; e.g.,
+/// inexact, underflow and overflow can be easily diagnosed on constant folding,
+/// and compiler optimizers can determine what exceptions would be raised by
+/// folding operations and optimize, or perhaps not optimize, accordingly.
+///
+/// At present, underflow tininess is detected after rounding; it should be
+/// straight forward to add support for the before-rounding case too.
+///
+/// The library reads hexadecimal floating point numbers as per C99, and
+/// correctly rounds if necessary according to the specified rounding mode.
+/// Syntax is required to have been validated by the caller. It also converts
+/// floating point numbers to hexadecimal text as per the C99 %a and %A
+/// conversions. The output precision (or alternatively the natural minimal
+/// precision) can be specified; if the requested precision is less than the
+/// natural precision the output is correctly rounded for the specified rounding
+/// mode.
+///
+/// It also reads decimal floating point numbers and correctly rounds according
+/// to the specified rounding mode.
+///
+/// Conversion to decimal text is not currently implemented.
+///
+/// Non-zero finite numbers are represented internally as a sign bit, a 16-bit
+/// signed exponent, and the significand as an array of integer parts. After
+/// normalization of a number of precision P the exponent is within the range of
+/// the format, and if the number is not denormal the P-th bit of the
+/// significand is set as an explicit integer bit. For denormals the most
+/// significant bit is shifted right so that the exponent is maintained at the
+/// format's minimum, so that the smallest denormal has just the least
+/// significant bit of the significand set. The sign of zeroes and infinities
+/// is significant; the exponent and significand of such numbers is not stored,
+/// but has a known implicit (deterministic) value: 0 for the significands, 0
+/// for zero exponent, all 1 bits for infinity exponent. For NaNs the sign and
+/// significand are deterministic, although not really meaningful, and preserved
+/// in non-conversion operations. The exponent is implicitly all 1 bits.
+///
+/// APFloat does not provide any exception handling beyond default exception
+/// handling. We represent Signaling NaNs via IEEE-754R 2008 6.2.1 should clause
+/// by encoding Signaling NaNs with the first bit of its trailing significand as
+/// 0.
+///
+/// TODO
+/// ====
+///
+/// Some features that may or may not be worth adding:
+///
+/// Binary to decimal conversion (hard).
+///
+/// Optional ability to detect underflow tininess before rounding.
+///
+/// New formats: x87 in single and double precision mode (IEEE apart from
+/// extended exponent range) (hard).
+///
+/// New operations: sqrt, IEEE remainder, C90 fmod, nexttoward.
+///
+
+// This is the common type definitions shared by APFloat and its internal
+// implementation classes. This struct should not define any non-static data
+// members.
+struct APFloatBase {
+ typedef APInt::WordType integerPart;
+ static constexpr unsigned integerPartWidth = APInt::APINT_BITS_PER_WORD;
+
+ /// A signed type to represent a floating point numbers unbiased exponent.
+ typedef int32_t ExponentType;
+
+ /// \name Floating Point Semantics.
+ /// @{
+ enum Semantics {
+ S_IEEEhalf,
+ S_BFloat,
+ S_IEEEsingle,
+ S_IEEEdouble,
+ S_x87DoubleExtended,
+ S_IEEEquad,
+ S_PPCDoubleDouble
+ };
+
+ static const llvm::fltSemantics &EnumToSemantics(Semantics S);
+ static Semantics SemanticsToEnum(const llvm::fltSemantics &Sem);
+
+ static const fltSemantics &IEEEhalf() LLVM_READNONE;
+ static const fltSemantics &BFloat() LLVM_READNONE;
+ static const fltSemantics &IEEEsingle() LLVM_READNONE;
+ static const fltSemantics &IEEEdouble() LLVM_READNONE;
+ static const fltSemantics &IEEEquad() LLVM_READNONE;
+ static const fltSemantics &PPCDoubleDouble() LLVM_READNONE;
+ static const fltSemantics &x87DoubleExtended() LLVM_READNONE;
+
+ /// A Pseudo fltsemantic used to construct APFloats that cannot conflict with
+ /// anything real.
+ static const fltSemantics &Bogus() LLVM_READNONE;
+
+ /// @}
+
+ /// IEEE-754R 5.11: Floating Point Comparison Relations.
+ enum cmpResult {
+ cmpLessThan,
+ cmpEqual,
+ cmpGreaterThan,
+ cmpUnordered
+ };
+
+ /// IEEE-754R 4.3: Rounding-direction attributes.
+ using roundingMode = llvm::RoundingMode;
+
+ static constexpr roundingMode rmNearestTiesToEven =
+ RoundingMode::NearestTiesToEven;
+ static constexpr roundingMode rmTowardPositive = RoundingMode::TowardPositive;
+ static constexpr roundingMode rmTowardNegative = RoundingMode::TowardNegative;
+ static constexpr roundingMode rmTowardZero = RoundingMode::TowardZero;
+ static constexpr roundingMode rmNearestTiesToAway =
+ RoundingMode::NearestTiesToAway;
+
+ /// IEEE-754R 7: Default exception handling.
+ ///
+ /// opUnderflow or opOverflow are always returned or-ed with opInexact.
+ ///
+ /// APFloat models this behavior specified by IEEE-754:
+ /// "For operations producing results in floating-point format, the default
+ /// result of an operation that signals the invalid operation exception
+ /// shall be a quiet NaN."
+ enum opStatus {
+ opOK = 0x00,
+ opInvalidOp = 0x01,
+ opDivByZero = 0x02,
+ opOverflow = 0x04,
+ opUnderflow = 0x08,
+ opInexact = 0x10
+ };
+
+ /// Category of internally-represented number.
+ enum fltCategory {
+ fcInfinity,
+ fcNaN,
+ fcNormal,
+ fcZero
+ };
+
+ /// Convenience enum used to construct an uninitialized APFloat.
+ enum uninitializedTag {
+ uninitialized
+ };
+
+ /// Enumeration of \c ilogb error results.
+ enum IlogbErrorKinds {
+ IEK_Zero = INT_MIN + 1,
+ IEK_NaN = INT_MIN,
+ IEK_Inf = INT_MAX
+ };
+
+ static unsigned int semanticsPrecision(const fltSemantics &);
+ static ExponentType semanticsMinExponent(const fltSemantics &);
+ static ExponentType semanticsMaxExponent(const fltSemantics &);
+ static unsigned int semanticsSizeInBits(const fltSemantics &);
+
+ /// Returns the size of the floating point number (in bits) in the given
+ /// semantics.
+ static unsigned getSizeInBits(const fltSemantics &Sem);
+};
+
+namespace detail {
+
+class IEEEFloat final : public APFloatBase {
+public:
+ /// \name Constructors
+ /// @{
+
+ IEEEFloat(const fltSemantics &); // Default construct to +0.0
+ IEEEFloat(const fltSemantics &, integerPart);
+ IEEEFloat(const fltSemantics &, uninitializedTag);
+ IEEEFloat(const fltSemantics &, const APInt &);
+ explicit IEEEFloat(double d);
+ explicit IEEEFloat(float f);
+ IEEEFloat(const IEEEFloat &);
+ IEEEFloat(IEEEFloat &&);
+ ~IEEEFloat();
+
+ /// @}
+
+ /// Returns whether this instance allocated memory.
+ bool needsCleanup() const { return partCount() > 1; }
+
+ /// \name Convenience "constructors"
+ /// @{
+
+ /// @}
+
+ /// \name Arithmetic
+ /// @{
+
+ opStatus add(const IEEEFloat &, roundingMode);
+ opStatus subtract(const IEEEFloat &, roundingMode);
+ opStatus multiply(const IEEEFloat &, roundingMode);
+ opStatus divide(const IEEEFloat &, roundingMode);
+ /// IEEE remainder.
+ opStatus remainder(const IEEEFloat &);
+ /// C fmod, or llvm frem.
+ opStatus mod(const IEEEFloat &);
+ opStatus fusedMultiplyAdd(const IEEEFloat &, const IEEEFloat &, roundingMode);
+ opStatus roundToIntegral(roundingMode);
+ /// IEEE-754R 5.3.1: nextUp/nextDown.
+ opStatus next(bool nextDown);
+
+ /// @}
+
+ /// \name Sign operations.
+ /// @{
+
+ void changeSign();
+
+ /// @}
+
+ /// \name Conversions
+ /// @{
+
+ opStatus convert(const fltSemantics &, roundingMode, bool *);
+ opStatus convertToInteger(MutableArrayRef<integerPart>, unsigned int, bool,
+ roundingMode, bool *) const;
+ opStatus convertFromAPInt(const APInt &, bool, roundingMode);
+ opStatus convertFromSignExtendedInteger(const integerPart *, unsigned int,
+ bool, roundingMode);
+ opStatus convertFromZeroExtendedInteger(const integerPart *, unsigned int,
+ bool, roundingMode);
+ Expected<opStatus> convertFromString(StringRef, roundingMode);
+ APInt bitcastToAPInt() const;
+ double convertToDouble() const;
+ float convertToFloat() const;
+
+ /// @}
+
+ /// The definition of equality is not straightforward for floating point, so
+ /// we won't use operator==. Use one of the following, or write whatever it
+ /// is you really mean.
+ bool operator==(const IEEEFloat &) const = delete;
+
+ /// IEEE comparison with another floating point number (NaNs compare
+ /// unordered, 0==-0).
+ cmpResult compare(const IEEEFloat &) const;
+
+ /// Bitwise comparison for equality (QNaNs compare equal, 0!=-0).
+ bool bitwiseIsEqual(const IEEEFloat &) const;
+
+ /// Write out a hexadecimal representation of the floating point value to DST,
+ /// which must be of sufficient size, in the C99 form [-]0xh.hhhhp[+-]d.
+ /// Return the number of characters written, excluding the terminating NUL.
+ unsigned int convertToHexString(char *dst, unsigned int hexDigits,
+ bool upperCase, roundingMode) const;
+
+ /// \name IEEE-754R 5.7.2 General operations.
+ /// @{
+
+ /// IEEE-754R isSignMinus: Returns true if and only if the current value is
+ /// negative.
+ ///
+ /// This applies to zeros and NaNs as well.
+ bool isNegative() const { return sign; }
+
+ /// IEEE-754R isNormal: Returns true if and only if the current value is normal.
+ ///
+ /// This implies that the current value of the float is not zero, subnormal,
+ /// infinite, or NaN following the definition of normality from IEEE-754R.
+ bool isNormal() const { return !isDenormal() && isFiniteNonZero(); }
+
+ /// Returns true if and only if the current value is zero, subnormal, or
+ /// normal.
+ ///
+ /// This means that the value is not infinite or NaN.
+ bool isFinite() const { return !isNaN() && !isInfinity(); }
+
+ /// Returns true if and only if the float is plus or minus zero.
+ bool isZero() const { return category == fcZero; }
+
+ /// IEEE-754R isSubnormal(): Returns true if and only if the float is a
+ /// denormal.
+ bool isDenormal() const;
+
+ /// IEEE-754R isInfinite(): Returns true if and only if the float is infinity.
+ bool isInfinity() const { return category == fcInfinity; }
+
+ /// Returns true if and only if the float is a quiet or signaling NaN.
+ bool isNaN() const { return category == fcNaN; }
+
+ /// Returns true if and only if the float is a signaling NaN.
+ bool isSignaling() const;
+
+ /// @}
+
+ /// \name Simple Queries
+ /// @{
+
+ fltCategory getCategory() const { return category; }
+ const fltSemantics &getSemantics() const { return *semantics; }
+ bool isNonZero() const { return category != fcZero; }
+ bool isFiniteNonZero() const { return isFinite() && !isZero(); }
+ bool isPosZero() const { return isZero() && !isNegative(); }
+ bool isNegZero() const { return isZero() && isNegative(); }
+
+ /// Returns true if and only if the number has the smallest possible non-zero
+ /// magnitude in the current semantics.
+ bool isSmallest() const;
+
+ /// Returns true if and only if the number has the largest possible finite
+ /// magnitude in the current semantics.
+ bool isLargest() const;
+
+ /// Returns true if and only if the number is an exact integer.
+ bool isInteger() const;
+
+ /// @}
+
+ IEEEFloat &operator=(const IEEEFloat &);
+ IEEEFloat &operator=(IEEEFloat &&);
+
+ /// Overload to compute a hash code for an APFloat value.
+ ///
+ /// Note that the use of hash codes for floating point values is in general
+ /// frought with peril. Equality is hard to define for these values. For
+ /// example, should negative and positive zero hash to different codes? Are
+ /// they equal or not? This hash value implementation specifically
+ /// emphasizes producing different codes for different inputs in order to
+ /// be used in canonicalization and memoization. As such, equality is
+ /// bitwiseIsEqual, and 0 != -0.
+ friend hash_code hash_value(const IEEEFloat &Arg);
+
+ /// Converts this value into a decimal string.
+ ///
+ /// \param FormatPrecision The maximum number of digits of
+ /// precision to output. If there are fewer digits available,
+ /// zero padding will not be used unless the value is
+ /// integral and small enough to be expressed in
+ /// FormatPrecision digits. 0 means to use the natural
+ /// precision of the number.
+ /// \param FormatMaxPadding The maximum number of zeros to
+ /// consider inserting before falling back to scientific
+ /// notation. 0 means to always use scientific notation.
+ ///
+ /// \param TruncateZero Indicate whether to remove the trailing zero in
+ /// fraction part or not. Also setting this parameter to false forcing
+ /// producing of output more similar to default printf behavior.
+ /// Specifically the lower e is used as exponent delimiter and exponent
+ /// always contains no less than two digits.
+ ///
+ /// Number Precision MaxPadding Result
+ /// ------ --------- ---------- ------
+ /// 1.01E+4 5 2 10100
+ /// 1.01E+4 4 2 1.01E+4
+ /// 1.01E+4 5 1 1.01E+4
+ /// 1.01E-2 5 2 0.0101
+ /// 1.01E-2 4 2 0.0101
+ /// 1.01E-2 4 1 1.01E-2
+ void toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision = 0,
+ unsigned FormatMaxPadding = 3, bool TruncateZero = true) const;
+
+ /// If this value has an exact multiplicative inverse, store it in inv and
+ /// return true.
+ bool getExactInverse(APFloat *inv) const;
+
+ /// Returns the exponent of the internal representation of the APFloat.
+ ///
+ /// Because the radix of APFloat is 2, this is equivalent to floor(log2(x)).
+ /// For special APFloat values, this returns special error codes:
+ ///
+ /// NaN -> \c IEK_NaN
+ /// 0 -> \c IEK_Zero
+ /// Inf -> \c IEK_Inf
+ ///
+ friend int ilogb(const IEEEFloat &Arg);
+
+ /// Returns: X * 2^Exp for integral exponents.
+ friend IEEEFloat scalbn(IEEEFloat X, int Exp, roundingMode);
+
+ friend IEEEFloat frexp(const IEEEFloat &X, int &Exp, roundingMode);
+
+ /// \name Special value setters.
+ /// @{
+
+ void makeLargest(bool Neg = false);
+ void makeSmallest(bool Neg = false);
+ void makeNaN(bool SNaN = false, bool Neg = false,
+ const APInt *fill = nullptr);
+ void makeInf(bool Neg = false);
+ void makeZero(bool Neg = false);
+ void makeQuiet();
+
+ /// Returns the smallest (by magnitude) normalized finite number in the given
+ /// semantics.
+ ///
+ /// \param Negative - True iff the number should be negative
+ void makeSmallestNormalized(bool Negative = false);
+
+ /// @}
+
+ cmpResult compareAbsoluteValue(const IEEEFloat &) const;
+
+private:
+ /// \name Simple Queries
+ /// @{
+
+ integerPart *significandParts();
+ const integerPart *significandParts() const;
+ unsigned int partCount() const;
+
+ /// @}
+
+ /// \name Significand operations.
+ /// @{
+
+ integerPart addSignificand(const IEEEFloat &);
+ integerPart subtractSignificand(const IEEEFloat &, integerPart);
+ lostFraction addOrSubtractSignificand(const IEEEFloat &, bool subtract);
+ lostFraction multiplySignificand(const IEEEFloat &, IEEEFloat);
+ lostFraction multiplySignificand(const IEEEFloat&);
+ lostFraction divideSignificand(const IEEEFloat &);
+ void incrementSignificand();
+ void initialize(const fltSemantics *);
+ void shiftSignificandLeft(unsigned int);
+ lostFraction shiftSignificandRight(unsigned int);
+ unsigned int significandLSB() const;
+ unsigned int significandMSB() const;
+ void zeroSignificand();
+ /// Return true if the significand excluding the integral bit is all ones.
+ bool isSignificandAllOnes() const;
+ /// Return true if the significand excluding the integral bit is all zeros.
+ bool isSignificandAllZeros() const;
+
+ /// @}
+
+ /// \name Arithmetic on special values.
+ /// @{
+
+ opStatus addOrSubtractSpecials(const IEEEFloat &, bool subtract);
+ opStatus divideSpecials(const IEEEFloat &);
+ opStatus multiplySpecials(const IEEEFloat &);
+ opStatus modSpecials(const IEEEFloat &);
+ opStatus remainderSpecials(const IEEEFloat&);
+
+ /// @}
+
+ /// \name Miscellany
+ /// @{
+
+ bool convertFromStringSpecials(StringRef str);
+ opStatus normalize(roundingMode, lostFraction);
+ opStatus addOrSubtract(const IEEEFloat &, roundingMode, bool subtract);
+ opStatus handleOverflow(roundingMode);
+ bool roundAwayFromZero(roundingMode, lostFraction, unsigned int) const;
+ opStatus convertToSignExtendedInteger(MutableArrayRef<integerPart>,
+ unsigned int, bool, roundingMode,
+ bool *) const;
+ opStatus convertFromUnsignedParts(const integerPart *, unsigned int,
+ roundingMode);
+ Expected<opStatus> convertFromHexadecimalString(StringRef, roundingMode);
+ Expected<opStatus> convertFromDecimalString(StringRef, roundingMode);
+ char *convertNormalToHexString(char *, unsigned int, bool,
+ roundingMode) const;
+ opStatus roundSignificandWithExponent(const integerPart *, unsigned int, int,
+ roundingMode);
+ ExponentType exponentNaN() const;
+ ExponentType exponentInf() const;
+ ExponentType exponentZero() const;
+
+ /// @}
+
+ APInt convertHalfAPFloatToAPInt() const;
+ APInt convertBFloatAPFloatToAPInt() const;
+ APInt convertFloatAPFloatToAPInt() const;
+ APInt convertDoubleAPFloatToAPInt() const;
+ APInt convertQuadrupleAPFloatToAPInt() const;
+ APInt convertF80LongDoubleAPFloatToAPInt() const;
+ APInt convertPPCDoubleDoubleAPFloatToAPInt() const;
+ void initFromAPInt(const fltSemantics *Sem, const APInt &api);
+ void initFromHalfAPInt(const APInt &api);
+ void initFromBFloatAPInt(const APInt &api);
+ void initFromFloatAPInt(const APInt &api);
+ void initFromDoubleAPInt(const APInt &api);
+ void initFromQuadrupleAPInt(const APInt &api);
+ void initFromF80LongDoubleAPInt(const APInt &api);
+ void initFromPPCDoubleDoubleAPInt(const APInt &api);
+
+ void assign(const IEEEFloat &);
+ void copySignificand(const IEEEFloat &);
+ void freeSignificand();
+
+ /// Note: this must be the first data member.
+ /// The semantics that this value obeys.
+ const fltSemantics *semantics;
+
+ /// A binary fraction with an explicit integer bit.
+ ///
+ /// The significand must be at least one bit wider than the target precision.
+ union Significand {
+ integerPart part;
+ integerPart *parts;
+ } significand;
+
+ /// The signed unbiased exponent of the value.
+ ExponentType exponent;
+
+ /// What kind of floating point number this is.
+ ///
+ /// Only 2 bits are required, but VisualStudio incorrectly sign extends it.
+ /// Using the extra bit keeps it from failing under VisualStudio.
+ fltCategory category : 3;
+
+ /// Sign bit of the number.
+ unsigned int sign : 1;
+};
+
+hash_code hash_value(const IEEEFloat &Arg);
+int ilogb(const IEEEFloat &Arg);
+IEEEFloat scalbn(IEEEFloat X, int Exp, IEEEFloat::roundingMode);
+IEEEFloat frexp(const IEEEFloat &Val, int &Exp, IEEEFloat::roundingMode RM);
+
+// This mode implements more precise float in terms of two APFloats.
+// The interface and layout is designed for arbitrary underlying semantics,
+// though currently only PPCDoubleDouble semantics are supported, whose
+// corresponding underlying semantics are IEEEdouble.
+class DoubleAPFloat final : public APFloatBase {
+ // Note: this must be the first data member.
+ const fltSemantics *Semantics;
+ std::unique_ptr<APFloat[]> Floats;
+
+ opStatus addImpl(const APFloat &a, const APFloat &aa, const APFloat &c,
+ const APFloat &cc, roundingMode RM);
+
+ opStatus addWithSpecial(const DoubleAPFloat &LHS, const DoubleAPFloat &RHS,
+ DoubleAPFloat &Out, roundingMode RM);
+
+public:
+ DoubleAPFloat(const fltSemantics &S);
+ DoubleAPFloat(const fltSemantics &S, uninitializedTag);
+ DoubleAPFloat(const fltSemantics &S, integerPart);
+ DoubleAPFloat(const fltSemantics &S, const APInt &I);
+ DoubleAPFloat(const fltSemantics &S, APFloat &&First, APFloat &&Second);
+ DoubleAPFloat(const DoubleAPFloat &RHS);
+ DoubleAPFloat(DoubleAPFloat &&RHS);
+
+ DoubleAPFloat &operator=(const DoubleAPFloat &RHS);
+
+ DoubleAPFloat &operator=(DoubleAPFloat &&RHS) {
+ if (this != &RHS) {
+ this->~DoubleAPFloat();
+ new (this) DoubleAPFloat(std::move(RHS));
+ }
+ return *this;
+ }
+
+ bool needsCleanup() const { return Floats != nullptr; }
+
+ APFloat &getFirst() { return Floats[0]; }
+ const APFloat &getFirst() const { return Floats[0]; }
+ APFloat &getSecond() { return Floats[1]; }
+ const APFloat &getSecond() const { return Floats[1]; }
+
+ opStatus add(const DoubleAPFloat &RHS, roundingMode RM);
+ opStatus subtract(const DoubleAPFloat &RHS, roundingMode RM);
+ opStatus multiply(const DoubleAPFloat &RHS, roundingMode RM);
+ opStatus divide(const DoubleAPFloat &RHS, roundingMode RM);
+ opStatus remainder(const DoubleAPFloat &RHS);
+ opStatus mod(const DoubleAPFloat &RHS);
+ opStatus fusedMultiplyAdd(const DoubleAPFloat &Multiplicand,
+ const DoubleAPFloat &Addend, roundingMode RM);
+ opStatus roundToIntegral(roundingMode RM);
+ void changeSign();
+ cmpResult compareAbsoluteValue(const DoubleAPFloat &RHS) const;
+
+ fltCategory getCategory() const;
+ bool isNegative() const;
+
+ void makeInf(bool Neg);
+ void makeZero(bool Neg);
+ void makeLargest(bool Neg);
+ void makeSmallest(bool Neg);
+ void makeSmallestNormalized(bool Neg);
+ void makeNaN(bool SNaN, bool Neg, const APInt *fill);
+
+ cmpResult compare(const DoubleAPFloat &RHS) const;
+ bool bitwiseIsEqual(const DoubleAPFloat &RHS) const;
+ APInt bitcastToAPInt() const;
+ Expected<opStatus> convertFromString(StringRef, roundingMode);
+ opStatus next(bool nextDown);
+
+ opStatus convertToInteger(MutableArrayRef<integerPart> Input,
+ unsigned int Width, bool IsSigned, roundingMode RM,
+ bool *IsExact) const;
+ opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM);
+ opStatus convertFromSignExtendedInteger(const integerPart *Input,
+ unsigned int InputSize, bool IsSigned,
+ roundingMode RM);
+ opStatus convertFromZeroExtendedInteger(const integerPart *Input,
+ unsigned int InputSize, bool IsSigned,
+ roundingMode RM);
+ unsigned int convertToHexString(char *DST, unsigned int HexDigits,
+ bool UpperCase, roundingMode RM) const;
+
+ bool isDenormal() const;
+ bool isSmallest() const;
+ bool isLargest() const;
+ bool isInteger() const;
+
+ void toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision,
+ unsigned FormatMaxPadding, bool TruncateZero = true) const;
+
+ bool getExactInverse(APFloat *inv) const;
+
+ friend DoubleAPFloat scalbn(const DoubleAPFloat &X, int Exp, roundingMode);
+ friend DoubleAPFloat frexp(const DoubleAPFloat &X, int &Exp, roundingMode);
+ friend hash_code hash_value(const DoubleAPFloat &Arg);
+};
+
+hash_code hash_value(const DoubleAPFloat &Arg);
+
+} // End detail namespace
+
+// This is a interface class that is currently forwarding functionalities from
+// detail::IEEEFloat.
+class APFloat : public APFloatBase {
+ typedef detail::IEEEFloat IEEEFloat;
+ typedef detail::DoubleAPFloat DoubleAPFloat;
+
+ static_assert(std::is_standard_layout<IEEEFloat>::value, "");
+
+ union Storage {
+ const fltSemantics *semantics;
+ IEEEFloat IEEE;
+ DoubleAPFloat Double;
+
+ explicit Storage(IEEEFloat F, const fltSemantics &S);
+ explicit Storage(DoubleAPFloat F, const fltSemantics &S)
+ : Double(std::move(F)) {
+ assert(&S == &PPCDoubleDouble());
+ }
+
+ template <typename... ArgTypes>
+ Storage(const fltSemantics &Semantics, ArgTypes &&... Args) {
+ if (usesLayout<IEEEFloat>(Semantics)) {
+ new (&IEEE) IEEEFloat(Semantics, std::forward<ArgTypes>(Args)...);
+ return;
+ }
+ if (usesLayout<DoubleAPFloat>(Semantics)) {
+ new (&Double) DoubleAPFloat(Semantics, std::forward<ArgTypes>(Args)...);
+ return;
+ }
+ llvm_unreachable("Unexpected semantics");
+ }
+
+ ~Storage() {
+ if (usesLayout<IEEEFloat>(*semantics)) {
+ IEEE.~IEEEFloat();
+ return;
+ }
+ if (usesLayout<DoubleAPFloat>(*semantics)) {
+ Double.~DoubleAPFloat();
+ return;
+ }
+ llvm_unreachable("Unexpected semantics");
+ }
+
+ Storage(const Storage &RHS) {
+ if (usesLayout<IEEEFloat>(*RHS.semantics)) {
+ new (this) IEEEFloat(RHS.IEEE);
+ return;
+ }
+ if (usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+ new (this) DoubleAPFloat(RHS.Double);
+ return;
+ }
+ llvm_unreachable("Unexpected semantics");
+ }
+
+ Storage(Storage &&RHS) {
+ if (usesLayout<IEEEFloat>(*RHS.semantics)) {
+ new (this) IEEEFloat(std::move(RHS.IEEE));
+ return;
+ }
+ if (usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+ new (this) DoubleAPFloat(std::move(RHS.Double));
+ return;
+ }
+ llvm_unreachable("Unexpected semantics");
+ }
+
+ Storage &operator=(const Storage &RHS) {
+ if (usesLayout<IEEEFloat>(*semantics) &&
+ usesLayout<IEEEFloat>(*RHS.semantics)) {
+ IEEE = RHS.IEEE;
+ } else if (usesLayout<DoubleAPFloat>(*semantics) &&
+ usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+ Double = RHS.Double;
+ } else if (this != &RHS) {
+ this->~Storage();
+ new (this) Storage(RHS);
+ }
+ return *this;
+ }
+
+ Storage &operator=(Storage &&RHS) {
+ if (usesLayout<IEEEFloat>(*semantics) &&
+ usesLayout<IEEEFloat>(*RHS.semantics)) {
+ IEEE = std::move(RHS.IEEE);
+ } else if (usesLayout<DoubleAPFloat>(*semantics) &&
+ usesLayout<DoubleAPFloat>(*RHS.semantics)) {
+ Double = std::move(RHS.Double);
+ } else if (this != &RHS) {
+ this->~Storage();
+ new (this) Storage(std::move(RHS));
+ }
+ return *this;
+ }
+ } U;
+
+ template <typename T> static bool usesLayout(const fltSemantics &Semantics) {
+ static_assert(std::is_same<T, IEEEFloat>::value ||
+ std::is_same<T, DoubleAPFloat>::value, "");
+ if (std::is_same<T, DoubleAPFloat>::value) {
+ return &Semantics == &PPCDoubleDouble();
+ }
+ return &Semantics != &PPCDoubleDouble();
+ }
+
+ IEEEFloat &getIEEE() {
+ if (usesLayout<IEEEFloat>(*U.semantics))
+ return U.IEEE;
+ if (usesLayout<DoubleAPFloat>(*U.semantics))
+ return U.Double.getFirst().U.IEEE;
+ llvm_unreachable("Unexpected semantics");
+ }
+
+ const IEEEFloat &getIEEE() const {
+ if (usesLayout<IEEEFloat>(*U.semantics))
+ return U.IEEE;
+ if (usesLayout<DoubleAPFloat>(*U.semantics))
+ return U.Double.getFirst().U.IEEE;
+ llvm_unreachable("Unexpected semantics");
+ }
+
+ void makeZero(bool Neg) { APFLOAT_DISPATCH_ON_SEMANTICS(makeZero(Neg)); }
+
+ void makeInf(bool Neg) { APFLOAT_DISPATCH_ON_SEMANTICS(makeInf(Neg)); }
+
+ void makeNaN(bool SNaN, bool Neg, const APInt *fill) {
+ APFLOAT_DISPATCH_ON_SEMANTICS(makeNaN(SNaN, Neg, fill));
+ }
+
+ void makeLargest(bool Neg) {
+ APFLOAT_DISPATCH_ON_SEMANTICS(makeLargest(Neg));
+ }
+
+ void makeSmallest(bool Neg) {
+ APFLOAT_DISPATCH_ON_SEMANTICS(makeSmallest(Neg));
+ }
+
+ void makeSmallestNormalized(bool Neg) {
+ APFLOAT_DISPATCH_ON_SEMANTICS(makeSmallestNormalized(Neg));
+ }
+
+ // FIXME: This is due to clang 3.3 (or older version) always checks for the
+ // default constructor in an array aggregate initialization, even if no
+ // elements in the array is default initialized.
+ APFloat() : U(IEEEdouble()) {
+ llvm_unreachable("This is a workaround for old clang.");
+ }
+
+ explicit APFloat(IEEEFloat F, const fltSemantics &S) : U(std::move(F), S) {}
+ explicit APFloat(DoubleAPFloat F, const fltSemantics &S)
+ : U(std::move(F), S) {}
+
+ cmpResult compareAbsoluteValue(const APFloat &RHS) const {
+ assert(&getSemantics() == &RHS.getSemantics() &&
+ "Should only compare APFloats with the same semantics");
+ if (usesLayout<IEEEFloat>(getSemantics()))
+ return U.IEEE.compareAbsoluteValue(RHS.U.IEEE);
+ if (usesLayout<DoubleAPFloat>(getSemantics()))
+ return U.Double.compareAbsoluteValue(RHS.U.Double);
+ llvm_unreachable("Unexpected semantics");
+ }
+
+public:
+ APFloat(const fltSemantics &Semantics) : U(Semantics) {}
+ APFloat(const fltSemantics &Semantics, StringRef S);
+ APFloat(const fltSemantics &Semantics, integerPart I) : U(Semantics, I) {}
+ template <typename T,
+ typename = std::enable_if_t<std::is_floating_point<T>::value>>
+ APFloat(const fltSemantics &Semantics, T V) = delete;
+ // TODO: Remove this constructor. This isn't faster than the first one.
+ APFloat(const fltSemantics &Semantics, uninitializedTag)
+ : U(Semantics, uninitialized) {}
+ APFloat(const fltSemantics &Semantics, const APInt &I) : U(Semantics, I) {}
+ explicit APFloat(double d) : U(IEEEFloat(d), IEEEdouble()) {}
+ explicit APFloat(float f) : U(IEEEFloat(f), IEEEsingle()) {}
+ APFloat(const APFloat &RHS) = default;
+ APFloat(APFloat &&RHS) = default;
+
+ ~APFloat() = default;
+
+ bool needsCleanup() const { APFLOAT_DISPATCH_ON_SEMANTICS(needsCleanup()); }
+
+ /// Factory for Positive and Negative Zero.
+ ///
+ /// \param Negative True iff the number should be negative.
+ static APFloat getZero(const fltSemantics &Sem, bool Negative = false) {
+ APFloat Val(Sem, uninitialized);
+ Val.makeZero(Negative);
+ return Val;
+ }
+
+ /// Factory for Positive and Negative Infinity.
+ ///
+ /// \param Negative True iff the number should be negative.
+ static APFloat getInf(const fltSemantics &Sem, bool Negative = false) {
+ APFloat Val(Sem, uninitialized);
+ Val.makeInf(Negative);
+ return Val;
+ }
+
+ /// Factory for NaN values.
+ ///
+ /// \param Negative - True iff the NaN generated should be negative.
+ /// \param payload - The unspecified fill bits for creating the NaN, 0 by
+ /// default. The value is truncated as necessary.
+ static APFloat getNaN(const fltSemantics &Sem, bool Negative = false,
+ uint64_t payload = 0) {
+ if (payload) {
+ APInt intPayload(64, payload);
+ return getQNaN(Sem, Negative, &intPayload);
+ } else {
+ return getQNaN(Sem, Negative, nullptr);
+ }
+ }
+
+ /// Factory for QNaN values.
+ static APFloat getQNaN(const fltSemantics &Sem, bool Negative = false,
+ const APInt *payload = nullptr) {
+ APFloat Val(Sem, uninitialized);
+ Val.makeNaN(false, Negative, payload);
+ return Val;
+ }
+
+ /// Factory for SNaN values.
+ static APFloat getSNaN(const fltSemantics &Sem, bool Negative = false,
+ const APInt *payload = nullptr) {
+ APFloat Val(Sem, uninitialized);
+ Val.makeNaN(true, Negative, payload);
+ return Val;
+ }
+
+ /// Returns the largest finite number in the given semantics.
+ ///
+ /// \param Negative - True iff the number should be negative
+ static APFloat getLargest(const fltSemantics &Sem, bool Negative = false) {
+ APFloat Val(Sem, uninitialized);
+ Val.makeLargest(Negative);
+ return Val;
+ }
+
+ /// Returns the smallest (by magnitude) finite number in the given semantics.
+ /// Might be denormalized, which implies a relative loss of precision.
+ ///
+ /// \param Negative - True iff the number should be negative
+ static APFloat getSmallest(const fltSemantics &Sem, bool Negative = false) {
+ APFloat Val(Sem, uninitialized);
+ Val.makeSmallest(Negative);
+ return Val;
+ }
+
+ /// Returns the smallest (by magnitude) normalized finite number in the given
+ /// semantics.
+ ///
+ /// \param Negative - True iff the number should be negative
+ static APFloat getSmallestNormalized(const fltSemantics &Sem,
+ bool Negative = false) {
+ APFloat Val(Sem, uninitialized);
+ Val.makeSmallestNormalized(Negative);
+ return Val;
+ }
+
+ /// Returns a float which is bitcasted from an all one value int.
+ ///
+ /// \param Semantics - type float semantics
+ static APFloat getAllOnesValue(const fltSemantics &Semantics);
+
+ /// Used to insert APFloat objects, or objects that contain APFloat objects,
+ /// into FoldingSets.
+ void Profile(FoldingSetNodeID &NID) const;
+
+ opStatus add(const APFloat &RHS, roundingMode RM) {
+ assert(&getSemantics() == &RHS.getSemantics() &&
+ "Should only call on two APFloats with the same semantics");
+ if (usesLayout<IEEEFloat>(getSemantics()))
+ return U.IEEE.add(RHS.U.IEEE, RM);
+ if (usesLayout<DoubleAPFloat>(getSemantics()))
+ return U.Double.add(RHS.U.Double, RM);
+ llvm_unreachable("Unexpected semantics");
+ }
+ opStatus subtract(const APFloat &RHS, roundingMode RM) {
+ assert(&getSemantics() == &RHS.getSemantics() &&
+ "Should only call on two APFloats with the same semantics");
+ if (usesLayout<IEEEFloat>(getSemantics()))
+ return U.IEEE.subtract(RHS.U.IEEE, RM);
+ if (usesLayout<DoubleAPFloat>(getSemantics()))
+ return U.Double.subtract(RHS.U.Double, RM);
+ llvm_unreachable("Unexpected semantics");
+ }
+ opStatus multiply(const APFloat &RHS, roundingMode RM) {
+ assert(&getSemantics() == &RHS.getSemantics() &&
+ "Should only call on two APFloats with the same semantics");
+ if (usesLayout<IEEEFloat>(getSemantics()))
+ return U.IEEE.multiply(RHS.U.IEEE, RM);
+ if (usesLayout<DoubleAPFloat>(getSemantics()))
+ return U.Double.multiply(RHS.U.Double, RM);
+ llvm_unreachable("Unexpected semantics");
+ }
+ opStatus divide(const APFloat &RHS, roundingMode RM) {
+ assert(&getSemantics() == &RHS.getSemantics() &&
+ "Should only call on two APFloats with the same semantics");
+ if (usesLayout<IEEEFloat>(getSemantics()))
+ return U.IEEE.divide(RHS.U.IEEE, RM);
+ if (usesLayout<DoubleAPFloat>(getSemantics()))
+ return U.Double.divide(RHS.U.Double, RM);
+ llvm_unreachable("Unexpected semantics");
+ }
+ opStatus remainder(const APFloat &RHS) {
+ assert(&getSemantics() == &RHS.getSemantics() &&
+ "Should only call on two APFloats with the same semantics");
+ if (usesLayout<IEEEFloat>(getSemantics()))
+ return U.IEEE.remainder(RHS.U.IEEE);
+ if (usesLayout<DoubleAPFloat>(getSemantics()))
+ return U.Double.remainder(RHS.U.Double);
+ llvm_unreachable("Unexpected semantics");
+ }
+ opStatus mod(const APFloat &RHS) {
+ assert(&getSemantics() == &RHS.getSemantics() &&
+ "Should only call on two APFloats with the same semantics");
+ if (usesLayout<IEEEFloat>(getSemantics()))
+ return U.IEEE.mod(RHS.U.IEEE);
+ if (usesLayout<DoubleAPFloat>(getSemantics()))
+ return U.Double.mod(RHS.U.Double);
+ llvm_unreachable("Unexpected semantics");
+ }
+ opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend,
+ roundingMode RM) {
+ assert(&getSemantics() == &Multiplicand.getSemantics() &&
+ "Should only call on APFloats with the same semantics");
+ assert(&getSemantics() == &Addend.getSemantics() &&
+ "Should only call on APFloats with the same semantics");
+ if (usesLayout<IEEEFloat>(getSemantics()))
+ return U.IEEE.fusedMultiplyAdd(Multiplicand.U.IEEE, Addend.U.IEEE, RM);
+ if (usesLayout<DoubleAPFloat>(getSemantics()))
+ return U.Double.fusedMultiplyAdd(Multiplicand.U.Double, Addend.U.Double,
+ RM);
+ llvm_unreachable("Unexpected semantics");
+ }
+ opStatus roundToIntegral(roundingMode RM) {
+ APFLOAT_DISPATCH_ON_SEMANTICS(roundToIntegral(RM));
+ }
+
+ // TODO: bool parameters are not readable and a source of bugs.
+ // Do something.
+ opStatus next(bool nextDown) {
+ APFLOAT_DISPATCH_ON_SEMANTICS(next(nextDown));
+ }
+
+ /// Negate an APFloat.
+ APFloat operator-() const {
+ APFloat Result(*this);
+ Result.changeSign();
+ return Result;
+ }
+
+ /// Add two APFloats, rounding ties to the nearest even.
+ /// No error checking.
+ APFloat operator+(const APFloat &RHS) const {
+ APFloat Result(*this);
+ (void)Result.add(RHS, rmNearestTiesToEven);
+ return Result;
+ }
+
+ /// Subtract two APFloats, rounding ties to the nearest even.
+ /// No error checking.
+ APFloat operator-(const APFloat &RHS) const {
+ APFloat Result(*this);
+ (void)Result.subtract(RHS, rmNearestTiesToEven);
+ return Result;
+ }
+
+ /// Multiply two APFloats, rounding ties to the nearest even.
+ /// No error checking.
+ APFloat operator*(const APFloat &RHS) const {
+ APFloat Result(*this);
+ (void)Result.multiply(RHS, rmNearestTiesToEven);
+ return Result;
+ }
+
+ /// Divide the first APFloat by the second, rounding ties to the nearest even.
+ /// No error checking.
+ APFloat operator/(const APFloat &RHS) const {
+ APFloat Result(*this);
+ (void)Result.divide(RHS, rmNearestTiesToEven);
+ return Result;
+ }
+
+ void changeSign() { APFLOAT_DISPATCH_ON_SEMANTICS(changeSign()); }
+ void clearSign() {
+ if (isNegative())
+ changeSign();
+ }
+ void copySign(const APFloat &RHS) {
+ if (isNegative() != RHS.isNegative())
+ changeSign();
+ }
+
+ /// A static helper to produce a copy of an APFloat value with its sign
+ /// copied from some other APFloat.
+ static APFloat copySign(APFloat Value, const APFloat &Sign) {
+ Value.copySign(Sign);
+ return Value;
+ }
+
+ opStatus convert(const fltSemantics &ToSemantics, roundingMode RM,
+ bool *losesInfo);
+ opStatus convertToInteger(MutableArrayRef<integerPart> Input,
+ unsigned int Width, bool IsSigned, roundingMode RM,
+ bool *IsExact) const {
+ APFLOAT_DISPATCH_ON_SEMANTICS(
+ convertToInteger(Input, Width, IsSigned, RM, IsExact));
+ }
+ opStatus convertToInteger(APSInt &Result, roundingMode RM,
+ bool *IsExact) const;
+ opStatus convertFromAPInt(const APInt &Input, bool IsSigned,
+ roundingMode RM) {
+ APFLOAT_DISPATCH_ON_SEMANTICS(convertFromAPInt(Input, IsSigned, RM));
+ }
+ opStatus convertFromSignExtendedInteger(const integerPart *Input,
+ unsigned int InputSize, bool IsSigned,
+ roundingMode RM) {
+ APFLOAT_DISPATCH_ON_SEMANTICS(
+ convertFromSignExtendedInteger(Input, InputSize, IsSigned, RM));
+ }
+ opStatus convertFromZeroExtendedInteger(const integerPart *Input,
+ unsigned int InputSize, bool IsSigned,
+ roundingMode RM) {
+ APFLOAT_DISPATCH_ON_SEMANTICS(
+ convertFromZeroExtendedInteger(Input, InputSize, IsSigned, RM));
+ }
+ Expected<opStatus> convertFromString(StringRef, roundingMode);
+ APInt bitcastToAPInt() const {
+ APFLOAT_DISPATCH_ON_SEMANTICS(bitcastToAPInt());
+ }
+
+ /// Converts this APFloat to host double value.
+ ///
+ /// \pre The APFloat must be built using semantics, that can be represented by
+ /// the host double type without loss of precision. It can be IEEEdouble and
+ /// shorter semantics, like IEEEsingle and others.
+ double convertToDouble() const;
+
+ /// Converts this APFloat to host float value.
+ ///
+ /// \pre The APFloat must be built using semantics, that can be represented by
+ /// the host float type without loss of precision. It can be IEEEsingle and
+ /// shorter semantics, like IEEEhalf.
+ float convertToFloat() const;
+
+ bool operator==(const APFloat &RHS) const { return compare(RHS) == cmpEqual; }
+
+ bool operator!=(const APFloat &RHS) const { return compare(RHS) != cmpEqual; }
+
+ bool operator<(const APFloat &RHS) const {
+ return compare(RHS) == cmpLessThan;
+ }
+
+ bool operator>(const APFloat &RHS) const {
+ return compare(RHS) == cmpGreaterThan;
+ }
+
+ bool operator<=(const APFloat &RHS) const {
+ cmpResult Res = compare(RHS);
+ return Res == cmpLessThan || Res == cmpEqual;
+ }
+
+ bool operator>=(const APFloat &RHS) const {
+ cmpResult Res = compare(RHS);
+ return Res == cmpGreaterThan || Res == cmpEqual;
+ }
+
+ cmpResult compare(const APFloat &RHS) const {
+ assert(&getSemantics() == &RHS.getSemantics() &&
+ "Should only compare APFloats with the same semantics");
+ if (usesLayout<IEEEFloat>(getSemantics()))
+ return U.IEEE.compare(RHS.U.IEEE);
+ if (usesLayout<DoubleAPFloat>(getSemantics()))
+ return U.Double.compare(RHS.U.Double);
+ llvm_unreachable("Unexpected semantics");
+ }
+
+ bool bitwiseIsEqual(const APFloat &RHS) const {
+ if (&getSemantics() != &RHS.getSemantics())
+ return false;
+ if (usesLayout<IEEEFloat>(getSemantics()))
+ return U.IEEE.bitwiseIsEqual(RHS.U.IEEE);
+ if (usesLayout<DoubleAPFloat>(getSemantics()))
+ return U.Double.bitwiseIsEqual(RHS.U.Double);
+ llvm_unreachable("Unexpected semantics");
+ }
+
+ /// We don't rely on operator== working on double values, as
+ /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
+ /// As such, this method can be used to do an exact bit-for-bit comparison of
+ /// two floating point values.
+ ///
+ /// We leave the version with the double argument here because it's just so
+ /// convenient to write "2.0" and the like. Without this function we'd
+ /// have to duplicate its logic everywhere it's called.
+ bool isExactlyValue(double V) const {
+ bool ignored;
+ APFloat Tmp(V);
+ Tmp.convert(getSemantics(), APFloat::rmNearestTiesToEven, &ignored);
+ return bitwiseIsEqual(Tmp);
+ }
+
+ unsigned int convertToHexString(char *DST, unsigned int HexDigits,
+ bool UpperCase, roundingMode RM) const {
+ APFLOAT_DISPATCH_ON_SEMANTICS(
+ convertToHexString(DST, HexDigits, UpperCase, RM));
+ }
+
+ bool isZero() const { return getCategory() == fcZero; }
+ bool isInfinity() const { return getCategory() == fcInfinity; }
+ bool isNaN() const { return getCategory() == fcNaN; }
+
+ bool isNegative() const { return getIEEE().isNegative(); }
+ bool isDenormal() const { APFLOAT_DISPATCH_ON_SEMANTICS(isDenormal()); }
+ bool isSignaling() const { return getIEEE().isSignaling(); }
+
+ bool isNormal() const { return !isDenormal() && isFiniteNonZero(); }
+ bool isFinite() const { return !isNaN() && !isInfinity(); }
+
+ fltCategory getCategory() const { return getIEEE().getCategory(); }
+ const fltSemantics &getSemantics() const { return *U.semantics; }
+ bool isNonZero() const { return !isZero(); }
+ bool isFiniteNonZero() const { return isFinite() && !isZero(); }
+ bool isPosZero() const { return isZero() && !isNegative(); }
+ bool isNegZero() const { return isZero() && isNegative(); }
+ bool isSmallest() const { APFLOAT_DISPATCH_ON_SEMANTICS(isSmallest()); }
+ bool isLargest() const { APFLOAT_DISPATCH_ON_SEMANTICS(isLargest()); }
+ bool isInteger() const { APFLOAT_DISPATCH_ON_SEMANTICS(isInteger()); }
+ bool isIEEE() const { return usesLayout<IEEEFloat>(getSemantics()); }
+
+ APFloat &operator=(const APFloat &RHS) = default;
+ APFloat &operator=(APFloat &&RHS) = default;
+
+ void toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision = 0,
+ unsigned FormatMaxPadding = 3, bool TruncateZero = true) const {
+ APFLOAT_DISPATCH_ON_SEMANTICS(
+ toString(Str, FormatPrecision, FormatMaxPadding, TruncateZero));
+ }
+
+ void print(raw_ostream &) const;
+ void dump() const;
+
+ bool getExactInverse(APFloat *inv) const {
+ APFLOAT_DISPATCH_ON_SEMANTICS(getExactInverse(inv));
+ }
+
+ friend hash_code hash_value(const APFloat &Arg);
+ friend int ilogb(const APFloat &Arg) { return ilogb(Arg.getIEEE()); }
+ friend APFloat scalbn(APFloat X, int Exp, roundingMode RM);
+ friend APFloat frexp(const APFloat &X, int &Exp, roundingMode RM);
+ friend IEEEFloat;
+ friend DoubleAPFloat;
+};
+
+/// See friend declarations above.
+///
+/// These additional declarations are required in order to compile LLVM with IBM
+/// xlC compiler.
+hash_code hash_value(const APFloat &Arg);
+inline APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM) {
+ if (APFloat::usesLayout<detail::IEEEFloat>(X.getSemantics()))
+ return APFloat(scalbn(X.U.IEEE, Exp, RM), X.getSemantics());
+ if (APFloat::usesLayout<detail::DoubleAPFloat>(X.getSemantics()))
+ return APFloat(scalbn(X.U.Double, Exp, RM), X.getSemantics());
+ llvm_unreachable("Unexpected semantics");
+}
+
+/// Equivalent of C standard library function.
+///
+/// While the C standard says Exp is an unspecified value for infinity and nan,
+/// this returns INT_MAX for infinities, and INT_MIN for NaNs.
+inline APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM) {
+ if (APFloat::usesLayout<detail::IEEEFloat>(X.getSemantics()))
+ return APFloat(frexp(X.U.IEEE, Exp, RM), X.getSemantics());
+ if (APFloat::usesLayout<detail::DoubleAPFloat>(X.getSemantics()))
+ return APFloat(frexp(X.U.Double, Exp, RM), X.getSemantics());
+ llvm_unreachable("Unexpected semantics");
+}
+/// Returns the absolute value of the argument.
+inline APFloat abs(APFloat X) {
+ X.clearSign();
+ return X;
+}
+
+/// Returns the negated value of the argument.
+inline APFloat neg(APFloat X) {
+ X.changeSign();
+ return X;
+}
+
+/// Implements IEEE minNum semantics. Returns the smaller of the 2 arguments if
+/// both are not NaN. If either argument is a NaN, returns the other argument.
+LLVM_READONLY
+inline APFloat minnum(const APFloat &A, const APFloat &B) {
+ if (A.isNaN())
+ return B;
+ if (B.isNaN())
+ return A;
+ return B < A ? B : A;
+}
+
+/// Implements IEEE maxNum semantics. Returns the larger of the 2 arguments if
+/// both are not NaN. If either argument is a NaN, returns the other argument.
+LLVM_READONLY
+inline APFloat maxnum(const APFloat &A, const APFloat &B) {
+ if (A.isNaN())
+ return B;
+ if (B.isNaN())
+ return A;
+ return A < B ? B : A;
+}
+
+/// Implements IEEE 754-2018 minimum semantics. Returns the smaller of 2
+/// arguments, propagating NaNs and treating -0 as less than +0.
+LLVM_READONLY
+inline APFloat minimum(const APFloat &A, const APFloat &B) {
+ if (A.isNaN())
+ return A;
+ if (B.isNaN())
+ return B;
+ if (A.isZero() && B.isZero() && (A.isNegative() != B.isNegative()))
+ return A.isNegative() ? A : B;
+ return B < A ? B : A;
+}
+
+/// Implements IEEE 754-2018 maximum semantics. Returns the larger of 2
+/// arguments, propagating NaNs and treating -0 as less than +0.
+LLVM_READONLY
+inline APFloat maximum(const APFloat &A, const APFloat &B) {
+ if (A.isNaN())
+ return A;
+ if (B.isNaN())
+ return B;
+ if (A.isZero() && B.isZero() && (A.isNegative() != B.isNegative()))
+ return A.isNegative() ? B : A;
+ return A < B ? B : A;
+}
+
+} // namespace llvm
+
+#undef APFLOAT_DISPATCH_ON_SEMANTICS
+#endif // LLVM_ADT_APFLOAT_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/APInt.h b/contrib/libs/llvm14/include/llvm/ADT/APInt.h
new file mode 100644
index 0000000000..cf6cdfb515
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/APInt.h
@@ -0,0 +1,2292 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- llvm/ADT/APInt.h - For Arbitrary Precision Integer -----*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements a class to represent arbitrary precision
+/// integral constant values and operations on them.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_APINT_H
+#define LLVM_ADT_APINT_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MathExtras.h"
+#include <cassert>
+#include <climits>
+#include <cstring>
+#include <utility>
+
+namespace llvm {
+class FoldingSetNodeID;
+class StringRef;
+class hash_code;
+class raw_ostream;
+
+template <typename T> class SmallVectorImpl;
+template <typename T> class ArrayRef;
+template <typename T> class Optional;
+template <typename T, typename Enable> struct DenseMapInfo;
+
+class APInt;
+
+inline APInt operator-(APInt);
+
+//===----------------------------------------------------------------------===//
+// APInt Class
+//===----------------------------------------------------------------------===//
+
+/// Class for arbitrary precision integers.
+///
+/// APInt is a functional replacement for common case unsigned integer type like
+/// "unsigned", "unsigned long" or "uint64_t", but also allows non-byte-width
+/// integer sizes and large integer value types such as 3-bits, 15-bits, or more
+/// than 64-bits of precision. APInt provides a variety of arithmetic operators
+/// and methods to manipulate integer values of any bit-width. It supports both
+/// the typical integer arithmetic and comparison operations as well as bitwise
+/// manipulation.
+///
+/// The class has several invariants worth noting:
+/// * All bit, byte, and word positions are zero-based.
+/// * Once the bit width is set, it doesn't change except by the Truncate,
+/// SignExtend, or ZeroExtend operations.
+/// * All binary operators must be on APInt instances of the same bit width.
+/// Attempting to use these operators on instances with different bit
+/// widths will yield an assertion.
+/// * The value is stored canonically as an unsigned value. For operations
+/// where it makes a difference, there are both signed and unsigned variants
+/// of the operation. For example, sdiv and udiv. However, because the bit
+/// widths must be the same, operations such as Mul and Add produce the same
+/// results regardless of whether the values are interpreted as signed or
+/// not.
+/// * In general, the class tries to follow the style of computation that LLVM
+/// uses in its IR. This simplifies its use for LLVM.
+/// * APInt supports zero-bit-width values, but operations that require bits
+/// are not defined on it (e.g. you cannot ask for the sign of a zero-bit
+/// integer). This means that operations like zero extension and logical
+/// shifts are defined, but sign extension and ashr is not. Zero bit values
+/// compare and hash equal to themselves, and countLeadingZeros returns 0.
+///
+class LLVM_NODISCARD APInt {
+public:
+ typedef uint64_t WordType;
+
+ /// This enum is used to hold the constants we needed for APInt.
+ enum : unsigned {
+ /// Byte size of a word.
+ APINT_WORD_SIZE = sizeof(WordType),
+ /// Bits in a word.
+ APINT_BITS_PER_WORD = APINT_WORD_SIZE * CHAR_BIT
+ };
+
+ enum class Rounding {
+ DOWN,
+ TOWARD_ZERO,
+ UP,
+ };
+
+ static constexpr WordType WORDTYPE_MAX = ~WordType(0);
+
+ /// \name Constructors
+ /// @{
+
+ /// Create a new APInt of numBits width, initialized as val.
+ ///
+ /// If isSigned is true then val is treated as if it were a signed value
+ /// (i.e. as an int64_t) and the appropriate sign extension to the bit width
+ /// will be done. Otherwise, no sign extension occurs (high order bits beyond
+ /// the range of val are zero filled).
+ ///
+ /// \param numBits the bit width of the constructed APInt
+ /// \param val the initial value of the APInt
+ /// \param isSigned how to treat signedness of val
+ APInt(unsigned numBits, uint64_t val, bool isSigned = false)
+ : BitWidth(numBits) {
+ if (isSingleWord()) {
+ U.VAL = val;
+ clearUnusedBits();
+ } else {
+ initSlowCase(val, isSigned);
+ }
+ }
+
+ /// Construct an APInt of numBits width, initialized as bigVal[].
+ ///
+ /// Note that bigVal.size() can be smaller or larger than the corresponding
+ /// bit width but any extraneous bits will be dropped.
+ ///
+ /// \param numBits the bit width of the constructed APInt
+ /// \param bigVal a sequence of words to form the initial value of the APInt
+ APInt(unsigned numBits, ArrayRef<uint64_t> bigVal);
+
+ /// Equivalent to APInt(numBits, ArrayRef<uint64_t>(bigVal, numWords)), but
+ /// deprecated because this constructor is prone to ambiguity with the
+ /// APInt(unsigned, uint64_t, bool) constructor.
+ ///
+ /// If this overload is ever deleted, care should be taken to prevent calls
+ /// from being incorrectly captured by the APInt(unsigned, uint64_t, bool)
+ /// constructor.
+ APInt(unsigned numBits, unsigned numWords, const uint64_t bigVal[]);
+
+ /// Construct an APInt from a string representation.
+ ///
+ /// This constructor interprets the string \p str in the given radix. The
+ /// interpretation stops when the first character that is not suitable for the
+ /// radix is encountered, or the end of the string. Acceptable radix values
+ /// are 2, 8, 10, 16, and 36. It is an error for the value implied by the
+ /// string to require more bits than numBits.
+ ///
+ /// \param numBits the bit width of the constructed APInt
+ /// \param str the string to be interpreted
+ /// \param radix the radix to use for the conversion
+ APInt(unsigned numBits, StringRef str, uint8_t radix);
+
+ /// Default constructor that creates an APInt with a 1-bit zero value.
+ explicit APInt() : BitWidth(1) { U.VAL = 0; }
+
+ /// Copy Constructor.
+ APInt(const APInt &that) : BitWidth(that.BitWidth) {
+ if (isSingleWord())
+ U.VAL = that.U.VAL;
+ else
+ initSlowCase(that);
+ }
+
+ /// Move Constructor.
+ APInt(APInt &&that) : BitWidth(that.BitWidth) {
+ memcpy(&U, &that.U, sizeof(U));
+ that.BitWidth = 0;
+ }
+
+ /// Destructor.
+ ~APInt() {
+ if (needsCleanup())
+ delete[] U.pVal;
+ }
+
+ /// @}
+ /// \name Value Generators
+ /// @{
+
+ /// Get the '0' value for the specified bit-width.
+ static APInt getZero(unsigned numBits) { return APInt(numBits, 0); }
+
+ /// NOTE: This is soft-deprecated. Please use `getZero()` instead.
+ static APInt getNullValue(unsigned numBits) { return getZero(numBits); }
+
+ /// Return an APInt zero bits wide.
+ static APInt getZeroWidth() { return getZero(0); }
+
+ /// Gets maximum unsigned value of APInt for specific bit width.
+ static APInt getMaxValue(unsigned numBits) { return getAllOnes(numBits); }
+
+ /// Gets maximum signed value of APInt for a specific bit width.
+ static APInt getSignedMaxValue(unsigned numBits) {
+ APInt API = getAllOnes(numBits);
+ API.clearBit(numBits - 1);
+ return API;
+ }
+
+ /// Gets minimum unsigned value of APInt for a specific bit width.
+ static APInt getMinValue(unsigned numBits) { return APInt(numBits, 0); }
+
+ /// Gets minimum signed value of APInt for a specific bit width.
+ static APInt getSignedMinValue(unsigned numBits) {
+ APInt API(numBits, 0);
+ API.setBit(numBits - 1);
+ return API;
+ }
+
+ /// Get the SignMask for a specific bit width.
+ ///
+ /// This is just a wrapper function of getSignedMinValue(), and it helps code
+ /// readability when we want to get a SignMask.
+ static APInt getSignMask(unsigned BitWidth) {
+ return getSignedMinValue(BitWidth);
+ }
+
+ /// Return an APInt of a specified width with all bits set.
+ static APInt getAllOnes(unsigned numBits) {
+ return APInt(numBits, WORDTYPE_MAX, true);
+ }
+
+ /// NOTE: This is soft-deprecated. Please use `getAllOnes()` instead.
+ static APInt getAllOnesValue(unsigned numBits) { return getAllOnes(numBits); }
+
+ /// Return an APInt with exactly one bit set in the result.
+ static APInt getOneBitSet(unsigned numBits, unsigned BitNo) {
+ APInt Res(numBits, 0);
+ Res.setBit(BitNo);
+ return Res;
+ }
+
+ /// Get a value with a block of bits set.
+ ///
+ /// Constructs an APInt value that has a contiguous range of bits set. The
+ /// bits from loBit (inclusive) to hiBit (exclusive) will be set. All other
+ /// bits will be zero. For example, with parameters(32, 0, 16) you would get
+ /// 0x0000FFFF. Please call getBitsSetWithWrap if \p loBit may be greater than
+ /// \p hiBit.
+ ///
+ /// \param numBits the intended bit width of the result
+ /// \param loBit the index of the lowest bit set.
+ /// \param hiBit the index of the highest bit set.
+ ///
+ /// \returns An APInt value with the requested bits set.
+ static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit) {
+ APInt Res(numBits, 0);
+ Res.setBits(loBit, hiBit);
+ return Res;
+ }
+
+ /// Wrap version of getBitsSet.
+ /// If \p hiBit is bigger than \p loBit, this is same with getBitsSet.
+ /// If \p hiBit is not bigger than \p loBit, the set bits "wrap". For example,
+ /// with parameters (32, 28, 4), you would get 0xF000000F.
+ /// If \p hiBit is equal to \p loBit, you would get a result with all bits
+ /// set.
+ static APInt getBitsSetWithWrap(unsigned numBits, unsigned loBit,
+ unsigned hiBit) {
+ APInt Res(numBits, 0);
+ Res.setBitsWithWrap(loBit, hiBit);
+ return Res;
+ }
+
+ /// Constructs an APInt value that has a contiguous range of bits set. The
+ /// bits from loBit (inclusive) to numBits (exclusive) will be set. All other
+ /// bits will be zero. For example, with parameters(32, 12) you would get
+ /// 0xFFFFF000.
+ ///
+ /// \param numBits the intended bit width of the result
+ /// \param loBit the index of the lowest bit to set.
+ ///
+ /// \returns An APInt value with the requested bits set.
+ static APInt getBitsSetFrom(unsigned numBits, unsigned loBit) {
+ APInt Res(numBits, 0);
+ Res.setBitsFrom(loBit);
+ return Res;
+ }
+
+ /// Constructs an APInt value that has the top hiBitsSet bits set.
+ ///
+ /// \param numBits the bitwidth of the result
+ /// \param hiBitsSet the number of high-order bits set in the result.
+ static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet) {
+ APInt Res(numBits, 0);
+ Res.setHighBits(hiBitsSet);
+ return Res;
+ }
+
+ /// Constructs an APInt value that has the bottom loBitsSet bits set.
+ ///
+ /// \param numBits the bitwidth of the result
+ /// \param loBitsSet the number of low-order bits set in the result.
+ static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet) {
+ APInt Res(numBits, 0);
+ Res.setLowBits(loBitsSet);
+ return Res;
+ }
+
+ /// Return a value containing V broadcasted over NewLen bits.
+ static APInt getSplat(unsigned NewLen, const APInt &V);
+
+ /// @}
+ /// \name Value Tests
+ /// @{
+
+ /// Determine if this APInt just has one word to store value.
+ ///
+ /// \returns true if the number of bits <= 64, false otherwise.
+ bool isSingleWord() const { return BitWidth <= APINT_BITS_PER_WORD; }
+
+ /// Determine sign of this APInt.
+ ///
+ /// This tests the high bit of this APInt to determine if it is set.
+ ///
+ /// \returns true if this APInt is negative, false otherwise
+ bool isNegative() const { return (*this)[BitWidth - 1]; }
+
+ /// Determine if this APInt Value is non-negative (>= 0)
+ ///
+ /// This tests the high bit of the APInt to determine if it is unset.
+ bool isNonNegative() const { return !isNegative(); }
+
+ /// Determine if sign bit of this APInt is set.
+ ///
+ /// This tests the high bit of this APInt to determine if it is set.
+ ///
+ /// \returns true if this APInt has its sign bit set, false otherwise.
+ bool isSignBitSet() const { return (*this)[BitWidth - 1]; }
+
+ /// Determine if sign bit of this APInt is clear.
+ ///
+ /// This tests the high bit of this APInt to determine if it is clear.
+ ///
+ /// \returns true if this APInt has its sign bit clear, false otherwise.
+ bool isSignBitClear() const { return !isSignBitSet(); }
+
+ /// Determine if this APInt Value is positive.
+ ///
+ /// This tests if the value of this APInt is positive (> 0). Note
+ /// that 0 is not a positive value.
+ ///
+ /// \returns true if this APInt is positive.
+ bool isStrictlyPositive() const { return isNonNegative() && !isZero(); }
+
+ /// Determine if this APInt Value is non-positive (<= 0).
+ ///
+ /// \returns true if this APInt is non-positive.
+ bool isNonPositive() const { return !isStrictlyPositive(); }
+
+ /// Determine if all bits are set. This is true for zero-width values.
+ bool isAllOnes() const {
+ if (BitWidth == 0)
+ return true;
+ if (isSingleWord())
+ return U.VAL == WORDTYPE_MAX >> (APINT_BITS_PER_WORD - BitWidth);
+ return countTrailingOnesSlowCase() == BitWidth;
+ }
+
+ /// NOTE: This is soft-deprecated. Please use `isAllOnes()` instead.
+ bool isAllOnesValue() const { return isAllOnes(); }
+
+ /// Determine if this value is zero, i.e. all bits are clear.
+ bool isZero() const {
+ if (isSingleWord())
+ return U.VAL == 0;
+ return countLeadingZerosSlowCase() == BitWidth;
+ }
+
+ /// NOTE: This is soft-deprecated. Please use `isZero()` instead.
+ bool isNullValue() const { return isZero(); }
+
+ /// Determine if this is a value of 1.
+ ///
+ /// This checks to see if the value of this APInt is one.
+ bool isOne() const {
+ if (isSingleWord())
+ return U.VAL == 1;
+ return countLeadingZerosSlowCase() == BitWidth - 1;
+ }
+
+ /// NOTE: This is soft-deprecated. Please use `isOne()` instead.
+ bool isOneValue() const { return isOne(); }
+
+ /// Determine if this is the largest unsigned value.
+ ///
+ /// This checks to see if the value of this APInt is the maximum unsigned
+ /// value for the APInt's bit width.
+ bool isMaxValue() const { return isAllOnes(); }
+
+ /// Determine if this is the largest signed value.
+ ///
+ /// This checks to see if the value of this APInt is the maximum signed
+ /// value for the APInt's bit width.
+ bool isMaxSignedValue() const {
+ if (isSingleWord()) {
+ assert(BitWidth && "zero width values not allowed");
+ return U.VAL == ((WordType(1) << (BitWidth - 1)) - 1);
+ }
+ return !isNegative() && countTrailingOnesSlowCase() == BitWidth - 1;
+ }
+
+ /// Determine if this is the smallest unsigned value.
+ ///
+ /// This checks to see if the value of this APInt is the minimum unsigned
+ /// value for the APInt's bit width.
+ bool isMinValue() const { return isZero(); }
+
+ /// Determine if this is the smallest signed value.
+ ///
+ /// This checks to see if the value of this APInt is the minimum signed
+ /// value for the APInt's bit width.
+ bool isMinSignedValue() const {
+ if (isSingleWord()) {
+ assert(BitWidth && "zero width values not allowed");
+ return U.VAL == (WordType(1) << (BitWidth - 1));
+ }
+ return isNegative() && countTrailingZerosSlowCase() == BitWidth - 1;
+ }
+
+ /// Check if this APInt has an N-bits unsigned integer value.
+ bool isIntN(unsigned N) const { return getActiveBits() <= N; }
+
+ /// Check if this APInt has an N-bits signed integer value.
+ bool isSignedIntN(unsigned N) const { return getSignificantBits() <= N; }
+
+ /// Check if this APInt's value is a power of two greater than zero.
+ ///
+ /// \returns true if the argument APInt value is a power of two > 0.
+ bool isPowerOf2() const {
+ if (isSingleWord()) {
+ assert(BitWidth && "zero width values not allowed");
+ return isPowerOf2_64(U.VAL);
+ }
+ return countPopulationSlowCase() == 1;
+ }
+
+ /// Check if this APInt's negated value is a power of two greater than zero.
+ bool isNegatedPowerOf2() const {
+ assert(BitWidth && "zero width values not allowed");
+ if (isNonNegative())
+ return false;
+ // NegatedPowerOf2 - shifted mask in the top bits.
+ unsigned LO = countLeadingOnes();
+ unsigned TZ = countTrailingZeros();
+ return (LO + TZ) == BitWidth;
+ }
+
+ /// Check if the APInt's value is returned by getSignMask.
+ ///
+ /// \returns true if this is the value returned by getSignMask.
+ bool isSignMask() const { return isMinSignedValue(); }
+
+ /// Convert APInt to a boolean value.
+ ///
+ /// This converts the APInt to a boolean value as a test against zero.
+ bool getBoolValue() const { return !isZero(); }
+
+ /// If this value is smaller than the specified limit, return it, otherwise
+ /// return the limit value. This causes the value to saturate to the limit.
+ uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX) const {
+ return ugt(Limit) ? Limit : getZExtValue();
+ }
+
+ /// Check if the APInt consists of a repeated bit pattern.
+ ///
+ /// e.g. 0x01010101 satisfies isSplat(8).
+ /// \param SplatSizeInBits The size of the pattern in bits. Must divide bit
+ /// width without remainder.
+ bool isSplat(unsigned SplatSizeInBits) const;
+
+ /// \returns true if this APInt value is a sequence of \param numBits ones
+ /// starting at the least significant bit with the remainder zero.
+ bool isMask(unsigned numBits) const {
+ assert(numBits != 0 && "numBits must be non-zero");
+ assert(numBits <= BitWidth && "numBits out of range");
+ if (isSingleWord())
+ return U.VAL == (WORDTYPE_MAX >> (APINT_BITS_PER_WORD - numBits));
+ unsigned Ones = countTrailingOnesSlowCase();
+ return (numBits == Ones) &&
+ ((Ones + countLeadingZerosSlowCase()) == BitWidth);
+ }
+
+ /// \returns true if this APInt is a non-empty sequence of ones starting at
+ /// the least significant bit with the remainder zero.
+ /// Ex. isMask(0x0000FFFFU) == true.
+ bool isMask() const {
+ if (isSingleWord())
+ return isMask_64(U.VAL);
+ unsigned Ones = countTrailingOnesSlowCase();
+ return (Ones > 0) && ((Ones + countLeadingZerosSlowCase()) == BitWidth);
+ }
+
+ /// Return true if this APInt value contains a sequence of ones with
+ /// the remainder zero.
+ bool isShiftedMask() const {
+ if (isSingleWord())
+ return isShiftedMask_64(U.VAL);
+ unsigned Ones = countPopulationSlowCase();
+ unsigned LeadZ = countLeadingZerosSlowCase();
+ return (Ones + LeadZ + countTrailingZeros()) == BitWidth;
+ }
+
+ /// Compute an APInt containing numBits highbits from this APInt.
+ ///
+ /// Get an APInt with the same BitWidth as this APInt, just zero mask the low
+ /// bits and right shift to the least significant bit.
+ ///
+ /// \returns the high "numBits" bits of this APInt.
+ APInt getHiBits(unsigned numBits) const;
+
+ /// Compute an APInt containing numBits lowbits from this APInt.
+ ///
+ /// Get an APInt with the same BitWidth as this APInt, just zero mask the high
+ /// bits.
+ ///
+ /// \returns the low "numBits" bits of this APInt.
+ APInt getLoBits(unsigned numBits) const;
+
+ /// Determine if two APInts have the same value, after zero-extending
+ /// one of them (if needed!) to ensure that the bit-widths match.
+ static bool isSameValue(const APInt &I1, const APInt &I2) {
+ if (I1.getBitWidth() == I2.getBitWidth())
+ return I1 == I2;
+
+ if (I1.getBitWidth() > I2.getBitWidth())
+ return I1 == I2.zext(I1.getBitWidth());
+
+ return I1.zext(I2.getBitWidth()) == I2;
+ }
+
+ /// Overload to compute a hash_code for an APInt value.
+ friend hash_code hash_value(const APInt &Arg);
+
+ /// This function returns a pointer to the internal storage of the APInt.
+ /// This is useful for writing out the APInt in binary form without any
+ /// conversions.
+ const uint64_t *getRawData() const {
+ if (isSingleWord())
+ return &U.VAL;
+ return &U.pVal[0];
+ }
+
+ /// @}
+ /// \name Unary Operators
+ /// @{
+
+ /// Postfix increment operator. Increment *this by 1.
+ ///
+ /// \returns a new APInt value representing the original value of *this.
+ APInt operator++(int) {
+ APInt API(*this);
+ ++(*this);
+ return API;
+ }
+
+ /// Prefix increment operator.
+ ///
+ /// \returns *this incremented by one
+ APInt &operator++();
+
+ /// Postfix decrement operator. Decrement *this by 1.
+ ///
+ /// \returns a new APInt value representing the original value of *this.
+ APInt operator--(int) {
+ APInt API(*this);
+ --(*this);
+ return API;
+ }
+
+ /// Prefix decrement operator.
+ ///
+ /// \returns *this decremented by one.
+ APInt &operator--();
+
+ /// Logical negation operation on this APInt returns true if zero, like normal
+ /// integers.
+ bool operator!() const { return isZero(); }
+
+ /// @}
+ /// \name Assignment Operators
+ /// @{
+
+ /// Copy assignment operator.
+ ///
+ /// \returns *this after assignment of RHS.
+ APInt &operator=(const APInt &RHS) {
+ // The common case (both source or dest being inline) doesn't require
+ // allocation or deallocation.
+ if (isSingleWord() && RHS.isSingleWord()) {
+ U.VAL = RHS.U.VAL;
+ BitWidth = RHS.BitWidth;
+ return *this;
+ }
+
+ assignSlowCase(RHS);
+ return *this;
+ }
+
+ /// Move assignment operator.
+ APInt &operator=(APInt &&that) {
+#ifdef EXPENSIVE_CHECKS
+ // Some std::shuffle implementations still do self-assignment.
+ if (this == &that)
+ return *this;
+#endif
+ assert(this != &that && "Self-move not supported");
+ if (!isSingleWord())
+ delete[] U.pVal;
+
+ // Use memcpy so that type based alias analysis sees both VAL and pVal
+ // as modified.
+ memcpy(&U, &that.U, sizeof(U));
+
+ BitWidth = that.BitWidth;
+ that.BitWidth = 0;
+ return *this;
+ }
+
+ /// Assignment operator.
+ ///
+ /// The RHS value is assigned to *this. If the significant bits in RHS exceed
+ /// the bit width, the excess bits are truncated. If the bit width is larger
+ /// than 64, the value is zero filled in the unspecified high order bits.
+ ///
+ /// \returns *this after assignment of RHS value.
+ APInt &operator=(uint64_t RHS) {
+ if (isSingleWord()) {
+ U.VAL = RHS;
+ return clearUnusedBits();
+ }
+ U.pVal[0] = RHS;
+ memset(U.pVal + 1, 0, (getNumWords() - 1) * APINT_WORD_SIZE);
+ return *this;
+ }
+
+ /// Bitwise AND assignment operator.
+ ///
+ /// Performs a bitwise AND operation on this APInt and RHS. The result is
+ /// assigned to *this.
+ ///
+ /// \returns *this after ANDing with RHS.
+ APInt &operator&=(const APInt &RHS) {
+ assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
+ if (isSingleWord())
+ U.VAL &= RHS.U.VAL;
+ else
+ andAssignSlowCase(RHS);
+ return *this;
+ }
+
+ /// Bitwise AND assignment operator.
+ ///
+ /// Performs a bitwise AND operation on this APInt and RHS. RHS is
+ /// logically zero-extended or truncated to match the bit-width of
+ /// the LHS.
+ APInt &operator&=(uint64_t RHS) {
+ if (isSingleWord()) {
+ U.VAL &= RHS;
+ return *this;
+ }
+ U.pVal[0] &= RHS;
+ memset(U.pVal + 1, 0, (getNumWords() - 1) * APINT_WORD_SIZE);
+ return *this;
+ }
+
+ /// Bitwise OR assignment operator.
+ ///
+ /// Performs a bitwise OR operation on this APInt and RHS. The result is
+ /// assigned *this;
+ ///
+ /// \returns *this after ORing with RHS.
+ APInt &operator|=(const APInt &RHS) {
+ assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
+ if (isSingleWord())
+ U.VAL |= RHS.U.VAL;
+ else
+ orAssignSlowCase(RHS);
+ return *this;
+ }
+
+ /// Bitwise OR assignment operator.
+ ///
+ /// Performs a bitwise OR operation on this APInt and RHS. RHS is
+ /// logically zero-extended or truncated to match the bit-width of
+ /// the LHS.
+ APInt &operator|=(uint64_t RHS) {
+ if (isSingleWord()) {
+ U.VAL |= RHS;
+ return clearUnusedBits();
+ }
+ U.pVal[0] |= RHS;
+ return *this;
+ }
+
+ /// Bitwise XOR assignment operator.
+ ///
+ /// Performs a bitwise XOR operation on this APInt and RHS. The result is
+ /// assigned to *this.
+ ///
+ /// \returns *this after XORing with RHS.
+ APInt &operator^=(const APInt &RHS) {
+ assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
+ if (isSingleWord())
+ U.VAL ^= RHS.U.VAL;
+ else
+ xorAssignSlowCase(RHS);
+ return *this;
+ }
+
+ /// Bitwise XOR assignment operator.
+ ///
+ /// Performs a bitwise XOR operation on this APInt and RHS. RHS is
+ /// logically zero-extended or truncated to match the bit-width of
+ /// the LHS.
+ APInt &operator^=(uint64_t RHS) {
+ if (isSingleWord()) {
+ U.VAL ^= RHS;
+ return clearUnusedBits();
+ }
+ U.pVal[0] ^= RHS;
+ return *this;
+ }
+
+ /// Multiplication assignment operator.
+ ///
+ /// Multiplies this APInt by RHS and assigns the result to *this.
+ ///
+ /// \returns *this
+ APInt &operator*=(const APInt &RHS);
+ APInt &operator*=(uint64_t RHS);
+
+ /// Addition assignment operator.
+ ///
+ /// Adds RHS to *this and assigns the result to *this.
+ ///
+ /// \returns *this
+ APInt &operator+=(const APInt &RHS);
+ APInt &operator+=(uint64_t RHS);
+
+ /// Subtraction assignment operator.
+ ///
+ /// Subtracts RHS from *this and assigns the result to *this.
+ ///
+ /// \returns *this
+ APInt &operator-=(const APInt &RHS);
+ APInt &operator-=(uint64_t RHS);
+
+ /// Left-shift assignment function.
+ ///
+ /// Shifts *this left by shiftAmt and assigns the result to *this.
+ ///
+ /// \returns *this after shifting left by ShiftAmt
+ APInt &operator<<=(unsigned ShiftAmt) {
+ assert(ShiftAmt <= BitWidth && "Invalid shift amount");
+ if (isSingleWord()) {
+ if (ShiftAmt == BitWidth)
+ U.VAL = 0;
+ else
+ U.VAL <<= ShiftAmt;
+ return clearUnusedBits();
+ }
+ shlSlowCase(ShiftAmt);
+ return *this;
+ }
+
+ /// Left-shift assignment function.
+ ///
+ /// Shifts *this left by shiftAmt and assigns the result to *this.
+ ///
+ /// \returns *this after shifting left by ShiftAmt
+ APInt &operator<<=(const APInt &ShiftAmt);
+
+ /// @}
+ /// \name Binary Operators
+ /// @{
+
+ /// Multiplication operator.
+ ///
+ /// Multiplies this APInt by RHS and returns the result.
+ APInt operator*(const APInt &RHS) const;
+
+ /// Left logical shift operator.
+ ///
+ /// Shifts this APInt left by \p Bits and returns the result.
+ APInt operator<<(unsigned Bits) const { return shl(Bits); }
+
+ /// Left logical shift operator.
+ ///
+ /// Shifts this APInt left by \p Bits and returns the result.
+ APInt operator<<(const APInt &Bits) const { return shl(Bits); }
+
+ /// Arithmetic right-shift function.
+ ///
+ /// Arithmetic right-shift this APInt by shiftAmt.
+ APInt ashr(unsigned ShiftAmt) const {
+ APInt R(*this);
+ R.ashrInPlace(ShiftAmt);
+ return R;
+ }
+
+ /// Arithmetic right-shift this APInt by ShiftAmt in place.
+ void ashrInPlace(unsigned ShiftAmt) {
+ assert(ShiftAmt <= BitWidth && "Invalid shift amount");
+ if (isSingleWord()) {
+ int64_t SExtVAL = SignExtend64(U.VAL, BitWidth);
+ if (ShiftAmt == BitWidth)
+ U.VAL = SExtVAL >> (APINT_BITS_PER_WORD - 1); // Fill with sign bit.
+ else
+ U.VAL = SExtVAL >> ShiftAmt;
+ clearUnusedBits();
+ return;
+ }
+ ashrSlowCase(ShiftAmt);
+ }
+
+ /// Logical right-shift function.
+ ///
+ /// Logical right-shift this APInt by shiftAmt.
+ APInt lshr(unsigned shiftAmt) const {
+ APInt R(*this);
+ R.lshrInPlace(shiftAmt);
+ return R;
+ }
+
+ /// Logical right-shift this APInt by ShiftAmt in place.
+ void lshrInPlace(unsigned ShiftAmt) {
+ assert(ShiftAmt <= BitWidth && "Invalid shift amount");
+ if (isSingleWord()) {
+ if (ShiftAmt == BitWidth)
+ U.VAL = 0;
+ else
+ U.VAL >>= ShiftAmt;
+ return;
+ }
+ lshrSlowCase(ShiftAmt);
+ }
+
+ /// Left-shift function.
+ ///
+ /// Left-shift this APInt by shiftAmt.
+ APInt shl(unsigned shiftAmt) const {
+ APInt R(*this);
+ R <<= shiftAmt;
+ return R;
+ }
+
+ /// Rotate left by rotateAmt.
+ APInt rotl(unsigned rotateAmt) const;
+
+ /// Rotate right by rotateAmt.
+ APInt rotr(unsigned rotateAmt) const;
+
+ /// Arithmetic right-shift function.
+ ///
+ /// Arithmetic right-shift this APInt by shiftAmt.
+ APInt ashr(const APInt &ShiftAmt) const {
+ APInt R(*this);
+ R.ashrInPlace(ShiftAmt);
+ return R;
+ }
+
+ /// Arithmetic right-shift this APInt by shiftAmt in place.
+ void ashrInPlace(const APInt &shiftAmt);
+
+ /// Logical right-shift function.
+ ///
+ /// Logical right-shift this APInt by shiftAmt.
+ APInt lshr(const APInt &ShiftAmt) const {
+ APInt R(*this);
+ R.lshrInPlace(ShiftAmt);
+ return R;
+ }
+
+ /// Logical right-shift this APInt by ShiftAmt in place.
+ void lshrInPlace(const APInt &ShiftAmt);
+
+ /// Left-shift function.
+ ///
+ /// Left-shift this APInt by shiftAmt.
+ APInt shl(const APInt &ShiftAmt) const {
+ APInt R(*this);
+ R <<= ShiftAmt;
+ return R;
+ }
+
+ /// Rotate left by rotateAmt.
+ APInt rotl(const APInt &rotateAmt) const;
+
+ /// Rotate right by rotateAmt.
+ APInt rotr(const APInt &rotateAmt) const;
+
+ /// Concatenate the bits from "NewLSB" onto the bottom of *this. This is
+ /// equivalent to:
+ /// (this->zext(NewWidth) << NewLSB.getBitWidth()) | NewLSB.zext(NewWidth)
+ APInt concat(const APInt &NewLSB) const {
+ /// If the result will be small, then both the merged values are small.
+ unsigned NewWidth = getBitWidth() + NewLSB.getBitWidth();
+ if (NewWidth <= APINT_BITS_PER_WORD)
+ return APInt(NewWidth, (U.VAL << NewLSB.getBitWidth()) | NewLSB.U.VAL);
+ return concatSlowCase(NewLSB);
+ }
+
+ /// Unsigned division operation.
+ ///
+ /// Perform an unsigned divide operation on this APInt by RHS. Both this and
+ /// RHS are treated as unsigned quantities for purposes of this division.
+ ///
+ /// \returns a new APInt value containing the division result, rounded towards
+ /// zero.
+ APInt udiv(const APInt &RHS) const;
+ APInt udiv(uint64_t RHS) const;
+
+ /// Signed division function for APInt.
+ ///
+ /// Signed divide this APInt by APInt RHS.
+ ///
+ /// The result is rounded towards zero.
+ APInt sdiv(const APInt &RHS) const;
+ APInt sdiv(int64_t RHS) const;
+
+ /// Unsigned remainder operation.
+ ///
+ /// Perform an unsigned remainder operation on this APInt with RHS being the
+ /// divisor. Both this and RHS are treated as unsigned quantities for purposes
+ /// of this operation. Note that this is a true remainder operation and not a
+ /// modulo operation because the sign follows the sign of the dividend which
+ /// is *this.
+ ///
+ /// \returns a new APInt value containing the remainder result
+ APInt urem(const APInt &RHS) const;
+ uint64_t urem(uint64_t RHS) const;
+
+ /// Function for signed remainder operation.
+ ///
+ /// Signed remainder operation on APInt.
+ APInt srem(const APInt &RHS) const;
+ int64_t srem(int64_t RHS) const;
+
+ /// Dual division/remainder interface.
+ ///
+ /// Sometimes it is convenient to divide two APInt values and obtain both the
+ /// quotient and remainder. This function does both operations in the same
+ /// computation making it a little more efficient. The pair of input arguments
+ /// may overlap with the pair of output arguments. It is safe to call
+ /// udivrem(X, Y, X, Y), for example.
+ static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient,
+ APInt &Remainder);
+ static void udivrem(const APInt &LHS, uint64_t RHS, APInt &Quotient,
+ uint64_t &Remainder);
+
+ static void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient,
+ APInt &Remainder);
+ static void sdivrem(const APInt &LHS, int64_t RHS, APInt &Quotient,
+ int64_t &Remainder);
+
+ // Operations that return overflow indicators.
+ APInt sadd_ov(const APInt &RHS, bool &Overflow) const;
+ APInt uadd_ov(const APInt &RHS, bool &Overflow) const;
+ APInt ssub_ov(const APInt &RHS, bool &Overflow) const;
+ APInt usub_ov(const APInt &RHS, bool &Overflow) const;
+ APInt sdiv_ov(const APInt &RHS, bool &Overflow) const;
+ APInt smul_ov(const APInt &RHS, bool &Overflow) const;
+ APInt umul_ov(const APInt &RHS, bool &Overflow) const;
+ APInt sshl_ov(const APInt &Amt, bool &Overflow) const;
+ APInt ushl_ov(const APInt &Amt, bool &Overflow) const;
+
+ // Operations that saturate
+ APInt sadd_sat(const APInt &RHS) const;
+ APInt uadd_sat(const APInt &RHS) const;
+ APInt ssub_sat(const APInt &RHS) const;
+ APInt usub_sat(const APInt &RHS) const;
+ APInt smul_sat(const APInt &RHS) const;
+ APInt umul_sat(const APInt &RHS) const;
+ APInt sshl_sat(const APInt &RHS) const;
+ APInt ushl_sat(const APInt &RHS) const;
+
+ /// Array-indexing support.
+ ///
+ /// \returns the bit value at bitPosition
+ bool operator[](unsigned bitPosition) const {
+ assert(bitPosition < getBitWidth() && "Bit position out of bounds!");
+ return (maskBit(bitPosition) & getWord(bitPosition)) != 0;
+ }
+
+ /// @}
+ /// \name Comparison Operators
+ /// @{
+
+ /// Equality operator.
+ ///
+ /// Compares this APInt with RHS for the validity of the equality
+ /// relationship.
+ bool operator==(const APInt &RHS) const {
+ assert(BitWidth == RHS.BitWidth && "Comparison requires equal bit widths");
+ if (isSingleWord())
+ return U.VAL == RHS.U.VAL;
+ return equalSlowCase(RHS);
+ }
+
+ /// Equality operator.
+ ///
+ /// Compares this APInt with a uint64_t for the validity of the equality
+ /// relationship.
+ ///
+ /// \returns true if *this == Val
+ bool operator==(uint64_t Val) const {
+ return (isSingleWord() || getActiveBits() <= 64) && getZExtValue() == Val;
+ }
+
+ /// Equality comparison.
+ ///
+ /// Compares this APInt with RHS for the validity of the equality
+ /// relationship.
+ ///
+ /// \returns true if *this == Val
+ bool eq(const APInt &RHS) const { return (*this) == RHS; }
+
+ /// Inequality operator.
+ ///
+ /// Compares this APInt with RHS for the validity of the inequality
+ /// relationship.
+ ///
+ /// \returns true if *this != Val
+ bool operator!=(const APInt &RHS) const { return !((*this) == RHS); }
+
+ /// Inequality operator.
+ ///
+ /// Compares this APInt with a uint64_t for the validity of the inequality
+ /// relationship.
+ ///
+ /// \returns true if *this != Val
+ bool operator!=(uint64_t Val) const { return !((*this) == Val); }
+
+ /// Inequality comparison
+ ///
+ /// Compares this APInt with RHS for the validity of the inequality
+ /// relationship.
+ ///
+ /// \returns true if *this != Val
+ bool ne(const APInt &RHS) const { return !((*this) == RHS); }
+
+ /// Unsigned less than comparison
+ ///
+ /// Regards both *this and RHS as unsigned quantities and compares them for
+ /// the validity of the less-than relationship.
+ ///
+ /// \returns true if *this < RHS when both are considered unsigned.
+ bool ult(const APInt &RHS) const { return compare(RHS) < 0; }
+
+ /// Unsigned less than comparison
+ ///
+ /// Regards both *this as an unsigned quantity and compares it with RHS for
+ /// the validity of the less-than relationship.
+ ///
+ /// \returns true if *this < RHS when considered unsigned.
+ bool ult(uint64_t RHS) const {
+ // Only need to check active bits if not a single word.
+ return (isSingleWord() || getActiveBits() <= 64) && getZExtValue() < RHS;
+ }
+
+ /// Signed less than comparison
+ ///
+ /// Regards both *this and RHS as signed quantities and compares them for
+ /// validity of the less-than relationship.
+ ///
+ /// \returns true if *this < RHS when both are considered signed.
+ bool slt(const APInt &RHS) const { return compareSigned(RHS) < 0; }
+
+ /// Signed less than comparison
+ ///
+ /// Regards both *this as a signed quantity and compares it with RHS for
+ /// the validity of the less-than relationship.
+ ///
+ /// \returns true if *this < RHS when considered signed.
+ bool slt(int64_t RHS) const {
+ return (!isSingleWord() && getSignificantBits() > 64)
+ ? isNegative()
+ : getSExtValue() < RHS;
+ }
+
+ /// Unsigned less or equal comparison
+ ///
+ /// Regards both *this and RHS as unsigned quantities and compares them for
+ /// validity of the less-or-equal relationship.
+ ///
+ /// \returns true if *this <= RHS when both are considered unsigned.
+ bool ule(const APInt &RHS) const { return compare(RHS) <= 0; }
+
+ /// Unsigned less or equal comparison
+ ///
+ /// Regards both *this as an unsigned quantity and compares it with RHS for
+ /// the validity of the less-or-equal relationship.
+ ///
+ /// \returns true if *this <= RHS when considered unsigned.
+ bool ule(uint64_t RHS) const { return !ugt(RHS); }
+
+ /// Signed less or equal comparison
+ ///
+ /// Regards both *this and RHS as signed quantities and compares them for
+ /// validity of the less-or-equal relationship.
+ ///
+ /// \returns true if *this <= RHS when both are considered signed.
+ bool sle(const APInt &RHS) const { return compareSigned(RHS) <= 0; }
+
+ /// Signed less or equal comparison
+ ///
+ /// Regards both *this as a signed quantity and compares it with RHS for the
+ /// validity of the less-or-equal relationship.
+ ///
+ /// \returns true if *this <= RHS when considered signed.
+ bool sle(uint64_t RHS) const { return !sgt(RHS); }
+
+ /// Unsigned greater than comparison
+ ///
+ /// Regards both *this and RHS as unsigned quantities and compares them for
+ /// the validity of the greater-than relationship.
+ ///
+ /// \returns true if *this > RHS when both are considered unsigned.
+ bool ugt(const APInt &RHS) const { return !ule(RHS); }
+
+ /// Unsigned greater than comparison
+ ///
+ /// Regards both *this as an unsigned quantity and compares it with RHS for
+ /// the validity of the greater-than relationship.
+ ///
+ /// \returns true if *this > RHS when considered unsigned.
+ bool ugt(uint64_t RHS) const {
+ // Only need to check active bits if not a single word.
+ return (!isSingleWord() && getActiveBits() > 64) || getZExtValue() > RHS;
+ }
+
+ /// Signed greater than comparison
+ ///
+ /// Regards both *this and RHS as signed quantities and compares them for the
+ /// validity of the greater-than relationship.
+ ///
+ /// \returns true if *this > RHS when both are considered signed.
+ bool sgt(const APInt &RHS) const { return !sle(RHS); }
+
+ /// Signed greater than comparison
+ ///
+ /// Regards both *this as a signed quantity and compares it with RHS for
+ /// the validity of the greater-than relationship.
+ ///
+ /// \returns true if *this > RHS when considered signed.
+ bool sgt(int64_t RHS) const {
+ return (!isSingleWord() && getSignificantBits() > 64)
+ ? !isNegative()
+ : getSExtValue() > RHS;
+ }
+
+ /// Unsigned greater or equal comparison
+ ///
+ /// Regards both *this and RHS as unsigned quantities and compares them for
+ /// validity of the greater-or-equal relationship.
+ ///
+ /// \returns true if *this >= RHS when both are considered unsigned.
+ bool uge(const APInt &RHS) const { return !ult(RHS); }
+
+ /// Unsigned greater or equal comparison
+ ///
+ /// Regards both *this as an unsigned quantity and compares it with RHS for
+ /// the validity of the greater-or-equal relationship.
+ ///
+ /// \returns true if *this >= RHS when considered unsigned.
+ bool uge(uint64_t RHS) const { return !ult(RHS); }
+
+ /// Signed greater or equal comparison
+ ///
+ /// Regards both *this and RHS as signed quantities and compares them for
+ /// validity of the greater-or-equal relationship.
+ ///
+ /// \returns true if *this >= RHS when both are considered signed.
+ bool sge(const APInt &RHS) const { return !slt(RHS); }
+
+ /// Signed greater or equal comparison
+ ///
+ /// Regards both *this as a signed quantity and compares it with RHS for
+ /// the validity of the greater-or-equal relationship.
+ ///
+ /// \returns true if *this >= RHS when considered signed.
+ bool sge(int64_t RHS) const { return !slt(RHS); }
+
+ /// This operation tests if there are any pairs of corresponding bits
+ /// between this APInt and RHS that are both set.
+ bool intersects(const APInt &RHS) const {
+ assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
+ if (isSingleWord())
+ return (U.VAL & RHS.U.VAL) != 0;
+ return intersectsSlowCase(RHS);
+ }
+
+ /// This operation checks that all bits set in this APInt are also set in RHS.
+ bool isSubsetOf(const APInt &RHS) const {
+ assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
+ if (isSingleWord())
+ return (U.VAL & ~RHS.U.VAL) == 0;
+ return isSubsetOfSlowCase(RHS);
+ }
+
+ /// @}
+ /// \name Resizing Operators
+ /// @{
+
+ /// Truncate to new width.
+ ///
+ /// Truncate the APInt to a specified width. It is an error to specify a width
+ /// that is greater than or equal to the current width.
+ APInt trunc(unsigned width) const;
+
+ /// Truncate to new width with unsigned saturation.
+ ///
+ /// If the APInt, treated as unsigned integer, can be losslessly truncated to
+ /// the new bitwidth, then return truncated APInt. Else, return max value.
+ APInt truncUSat(unsigned width) const;
+
+ /// Truncate to new width with signed saturation.
+ ///
+ /// If this APInt, treated as signed integer, can be losslessly truncated to
+ /// the new bitwidth, then return truncated APInt. Else, return either
+ /// signed min value if the APInt was negative, or signed max value.
+ APInt truncSSat(unsigned width) const;
+
+ /// Sign extend to a new width.
+ ///
+ /// This operation sign extends the APInt to a new width. If the high order
+ /// bit is set, the fill on the left will be done with 1 bits, otherwise zero.
+ /// It is an error to specify a width that is less than or equal to the
+ /// current width.
+ APInt sext(unsigned width) const;
+
+ /// Zero extend to a new width.
+ ///
+ /// This operation zero extends the APInt to a new width. The high order bits
+ /// are filled with 0 bits. It is an error to specify a width that is less
+ /// than or equal to the current width.
+ APInt zext(unsigned width) const;
+
+ /// Sign extend or truncate to width
+ ///
+ /// Make this APInt have the bit width given by \p width. The value is sign
+ /// extended, truncated, or left alone to make it that width.
+ APInt sextOrTrunc(unsigned width) const;
+
+ /// Zero extend or truncate to width
+ ///
+ /// Make this APInt have the bit width given by \p width. The value is zero
+ /// extended, truncated, or left alone to make it that width.
+ APInt zextOrTrunc(unsigned width) const;
+
+ /// Truncate to width
+ ///
+ /// Make this APInt have the bit width given by \p width. The value is
+ /// truncated or left alone to make it that width.
+ APInt truncOrSelf(unsigned width) const;
+
+ /// Sign extend or truncate to width
+ ///
+ /// Make this APInt have the bit width given by \p width. The value is sign
+ /// extended, or left alone to make it that width.
+ APInt sextOrSelf(unsigned width) const;
+
+ /// Zero extend or truncate to width
+ ///
+ /// Make this APInt have the bit width given by \p width. The value is zero
+ /// extended, or left alone to make it that width.
+ APInt zextOrSelf(unsigned width) const;
+
+ /// @}
+ /// \name Bit Manipulation Operators
+ /// @{
+
+ /// Set every bit to 1.
+ void setAllBits() {
+ if (isSingleWord())
+ U.VAL = WORDTYPE_MAX;
+ else
+ // Set all the bits in all the words.
+ memset(U.pVal, -1, getNumWords() * APINT_WORD_SIZE);
+ // Clear the unused ones
+ clearUnusedBits();
+ }
+
+ /// Set the given bit to 1 whose position is given as "bitPosition".
+ void setBit(unsigned BitPosition) {
+ assert(BitPosition < BitWidth && "BitPosition out of range");
+ WordType Mask = maskBit(BitPosition);
+ if (isSingleWord())
+ U.VAL |= Mask;
+ else
+ U.pVal[whichWord(BitPosition)] |= Mask;
+ }
+
+ /// Set the sign bit to 1.
+ void setSignBit() { setBit(BitWidth - 1); }
+
+ /// Set a given bit to a given value.
+ void setBitVal(unsigned BitPosition, bool BitValue) {
+ if (BitValue)
+ setBit(BitPosition);
+ else
+ clearBit(BitPosition);
+ }
+
+ /// Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
+ /// This function handles "wrap" case when \p loBit >= \p hiBit, and calls
+ /// setBits when \p loBit < \p hiBit.
+ /// For \p loBit == \p hiBit wrap case, set every bit to 1.
+ void setBitsWithWrap(unsigned loBit, unsigned hiBit) {
+ assert(hiBit <= BitWidth && "hiBit out of range");
+ assert(loBit <= BitWidth && "loBit out of range");
+ if (loBit < hiBit) {
+ setBits(loBit, hiBit);
+ return;
+ }
+ setLowBits(hiBit);
+ setHighBits(BitWidth - loBit);
+ }
+
+ /// Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
+ /// This function handles case when \p loBit <= \p hiBit.
+ void setBits(unsigned loBit, unsigned hiBit) {
+ assert(hiBit <= BitWidth && "hiBit out of range");
+ assert(loBit <= BitWidth && "loBit out of range");
+ assert(loBit <= hiBit && "loBit greater than hiBit");
+ if (loBit == hiBit)
+ return;
+ if (loBit < APINT_BITS_PER_WORD && hiBit <= APINT_BITS_PER_WORD) {
+ uint64_t mask = WORDTYPE_MAX >> (APINT_BITS_PER_WORD - (hiBit - loBit));
+ mask <<= loBit;
+ if (isSingleWord())
+ U.VAL |= mask;
+ else
+ U.pVal[0] |= mask;
+ } else {
+ setBitsSlowCase(loBit, hiBit);
+ }
+ }
+
+ /// Set the top bits starting from loBit.
+ void setBitsFrom(unsigned loBit) { return setBits(loBit, BitWidth); }
+
+ /// Set the bottom loBits bits.
+ void setLowBits(unsigned loBits) { return setBits(0, loBits); }
+
+ /// Set the top hiBits bits.
+ void setHighBits(unsigned hiBits) {
+ return setBits(BitWidth - hiBits, BitWidth);
+ }
+
+ /// Set every bit to 0.
+ void clearAllBits() {
+ if (isSingleWord())
+ U.VAL = 0;
+ else
+ memset(U.pVal, 0, getNumWords() * APINT_WORD_SIZE);
+ }
+
+ /// Set a given bit to 0.
+ ///
+ /// Set the given bit to 0 whose position is given as "bitPosition".
+ void clearBit(unsigned BitPosition) {
+ assert(BitPosition < BitWidth && "BitPosition out of range");
+ WordType Mask = ~maskBit(BitPosition);
+ if (isSingleWord())
+ U.VAL &= Mask;
+ else
+ U.pVal[whichWord(BitPosition)] &= Mask;
+ }
+
+ /// Set bottom loBits bits to 0.
+ void clearLowBits(unsigned loBits) {
+ assert(loBits <= BitWidth && "More bits than bitwidth");
+ APInt Keep = getHighBitsSet(BitWidth, BitWidth - loBits);
+ *this &= Keep;
+ }
+
+ /// Set the sign bit to 0.
+ void clearSignBit() { clearBit(BitWidth - 1); }
+
+ /// Toggle every bit to its opposite value.
+ void flipAllBits() {
+ if (isSingleWord()) {
+ U.VAL ^= WORDTYPE_MAX;
+ clearUnusedBits();
+ } else {
+ flipAllBitsSlowCase();
+ }
+ }
+
+ /// Toggles a given bit to its opposite value.
+ ///
+ /// Toggle a given bit to its opposite value whose position is given
+ /// as "bitPosition".
+ void flipBit(unsigned bitPosition);
+
+ /// Negate this APInt in place.
+ void negate() {
+ flipAllBits();
+ ++(*this);
+ }
+
+ /// Insert the bits from a smaller APInt starting at bitPosition.
+ void insertBits(const APInt &SubBits, unsigned bitPosition);
+ void insertBits(uint64_t SubBits, unsigned bitPosition, unsigned numBits);
+
+ /// Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
+ APInt extractBits(unsigned numBits, unsigned bitPosition) const;
+ uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const;
+
+ /// @}
+ /// \name Value Characterization Functions
+ /// @{
+
+ /// Return the number of bits in the APInt.
+ unsigned getBitWidth() const { return BitWidth; }
+
+ /// Get the number of words.
+ ///
+ /// Here one word's bitwidth equals to that of uint64_t.
+ ///
+ /// \returns the number of words to hold the integer value of this APInt.
+ unsigned getNumWords() const { return getNumWords(BitWidth); }
+
+ /// Get the number of words.
+ ///
+ /// *NOTE* Here one word's bitwidth equals to that of uint64_t.
+ ///
+ /// \returns the number of words to hold the integer value with a given bit
+ /// width.
+ static unsigned getNumWords(unsigned BitWidth) {
+ return ((uint64_t)BitWidth + APINT_BITS_PER_WORD - 1) / APINT_BITS_PER_WORD;
+ }
+
+ /// Compute the number of active bits in the value
+ ///
+ /// This function returns the number of active bits which is defined as the
+ /// bit width minus the number of leading zeros. This is used in several
+ /// computations to see how "wide" the value is.
+ unsigned getActiveBits() const { return BitWidth - countLeadingZeros(); }
+
+ /// Compute the number of active words in the value of this APInt.
+ ///
+ /// This is used in conjunction with getActiveData to extract the raw value of
+ /// the APInt.
+ unsigned getActiveWords() const {
+ unsigned numActiveBits = getActiveBits();
+ return numActiveBits ? whichWord(numActiveBits - 1) + 1 : 1;
+ }
+
+ /// Get the minimum bit size for this signed APInt
+ ///
+ /// Computes the minimum bit width for this APInt while considering it to be a
+ /// signed (and probably negative) value. If the value is not negative, this
+ /// function returns the same value as getActiveBits()+1. Otherwise, it
+ /// returns the smallest bit width that will retain the negative value. For
+ /// example, -1 can be written as 0b1 or 0xFFFFFFFFFF. 0b1 is shorter and so
+ /// for -1, this function will always return 1.
+ unsigned getSignificantBits() const {
+ return BitWidth - getNumSignBits() + 1;
+ }
+
+ /// NOTE: This is soft-deprecated. Please use `getSignificantBits()` instead.
+ unsigned getMinSignedBits() const { return getSignificantBits(); }
+
+ /// Get zero extended value
+ ///
+ /// This method attempts to return the value of this APInt as a zero extended
+ /// uint64_t. The bitwidth must be <= 64 or the value must fit within a
+ /// uint64_t. Otherwise an assertion will result.
+ uint64_t getZExtValue() const {
+ if (isSingleWord())
+ return U.VAL;
+ assert(getActiveBits() <= 64 && "Too many bits for uint64_t");
+ return U.pVal[0];
+ }
+
+ /// Get sign extended value
+ ///
+ /// This method attempts to return the value of this APInt as a sign extended
+ /// int64_t. The bit width must be <= 64 or the value must fit within an
+ /// int64_t. Otherwise an assertion will result.
+ int64_t getSExtValue() const {
+ if (isSingleWord())
+ return SignExtend64(U.VAL, BitWidth);
+ assert(getSignificantBits() <= 64 && "Too many bits for int64_t");
+ return int64_t(U.pVal[0]);
+ }
+
+ /// Get bits required for string value.
+ ///
+ /// This method determines how many bits are required to hold the APInt
+ /// equivalent of the string given by \p str.
+ static unsigned getBitsNeeded(StringRef str, uint8_t radix);
+
+ /// The APInt version of the countLeadingZeros functions in
+ /// MathExtras.h.
+ ///
+ /// It counts the number of zeros from the most significant bit to the first
+ /// one bit.
+ ///
+ /// \returns BitWidth if the value is zero, otherwise returns the number of
+ /// zeros from the most significant bit to the first one bits.
+ unsigned countLeadingZeros() const {
+ if (isSingleWord()) {
+ unsigned unusedBits = APINT_BITS_PER_WORD - BitWidth;
+ return llvm::countLeadingZeros(U.VAL) - unusedBits;
+ }
+ return countLeadingZerosSlowCase();
+ }
+
+ /// Count the number of leading one bits.
+ ///
+ /// This function is an APInt version of the countLeadingOnes
+ /// functions in MathExtras.h. It counts the number of ones from the most
+ /// significant bit to the first zero bit.
+ ///
+ /// \returns 0 if the high order bit is not set, otherwise returns the number
+ /// of 1 bits from the most significant to the least
+ unsigned countLeadingOnes() const {
+ if (isSingleWord()) {
+ if (LLVM_UNLIKELY(BitWidth == 0))
+ return 0;
+ return llvm::countLeadingOnes(U.VAL << (APINT_BITS_PER_WORD - BitWidth));
+ }
+ return countLeadingOnesSlowCase();
+ }
+
+ /// Computes the number of leading bits of this APInt that are equal to its
+ /// sign bit.
+ unsigned getNumSignBits() const {
+ return isNegative() ? countLeadingOnes() : countLeadingZeros();
+ }
+
+ /// Count the number of trailing zero bits.
+ ///
+ /// This function is an APInt version of the countTrailingZeros
+ /// functions in MathExtras.h. It counts the number of zeros from the least
+ /// significant bit to the first set bit.
+ ///
+ /// \returns BitWidth if the value is zero, otherwise returns the number of
+ /// zeros from the least significant bit to the first one bit.
+ unsigned countTrailingZeros() const {
+ if (isSingleWord()) {
+ unsigned TrailingZeros = llvm::countTrailingZeros(U.VAL);
+ return (TrailingZeros > BitWidth ? BitWidth : TrailingZeros);
+ }
+ return countTrailingZerosSlowCase();
+ }
+
+ /// Count the number of trailing one bits.
+ ///
+ /// This function is an APInt version of the countTrailingOnes
+ /// functions in MathExtras.h. It counts the number of ones from the least
+ /// significant bit to the first zero bit.
+ ///
+ /// \returns BitWidth if the value is all ones, otherwise returns the number
+ /// of ones from the least significant bit to the first zero bit.
+ unsigned countTrailingOnes() const {
+ if (isSingleWord())
+ return llvm::countTrailingOnes(U.VAL);
+ return countTrailingOnesSlowCase();
+ }
+
+ /// Count the number of bits set.
+ ///
+ /// This function is an APInt version of the countPopulation functions
+ /// in MathExtras.h. It counts the number of 1 bits in the APInt value.
+ ///
+ /// \returns 0 if the value is zero, otherwise returns the number of set bits.
+ unsigned countPopulation() const {
+ if (isSingleWord())
+ return llvm::countPopulation(U.VAL);
+ return countPopulationSlowCase();
+ }
+
+ /// @}
+ /// \name Conversion Functions
+ /// @{
+ void print(raw_ostream &OS, bool isSigned) const;
+
+ /// Converts an APInt to a string and append it to Str. Str is commonly a
+ /// SmallString.
+ void toString(SmallVectorImpl<char> &Str, unsigned Radix, bool Signed,
+ bool formatAsCLiteral = false) const;
+
+ /// Considers the APInt to be unsigned and converts it into a string in the
+ /// radix given. The radix can be 2, 8, 10 16, or 36.
+ void toStringUnsigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
+ toString(Str, Radix, false, false);
+ }
+
+ /// Considers the APInt to be signed and converts it into a string in the
+ /// radix given. The radix can be 2, 8, 10, 16, or 36.
+ void toStringSigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
+ toString(Str, Radix, true, false);
+ }
+
+ /// \returns a byte-swapped representation of this APInt Value.
+ APInt byteSwap() const;
+
+ /// \returns the value with the bit representation reversed of this APInt
+ /// Value.
+ APInt reverseBits() const;
+
+ /// Converts this APInt to a double value.
+ double roundToDouble(bool isSigned) const;
+
+ /// Converts this unsigned APInt to a double value.
+ double roundToDouble() const { return roundToDouble(false); }
+
+ /// Converts this signed APInt to a double value.
+ double signedRoundToDouble() const { return roundToDouble(true); }
+
+ /// Converts APInt bits to a double
+ ///
+ /// The conversion does not do a translation from integer to double, it just
+ /// re-interprets the bits as a double. Note that it is valid to do this on
+ /// any bit width. Exactly 64 bits will be translated.
+ double bitsToDouble() const { return BitsToDouble(getWord(0)); }
+
+ /// Converts APInt bits to a float
+ ///
+ /// The conversion does not do a translation from integer to float, it just
+ /// re-interprets the bits as a float. Note that it is valid to do this on
+ /// any bit width. Exactly 32 bits will be translated.
+ float bitsToFloat() const {
+ return BitsToFloat(static_cast<uint32_t>(getWord(0)));
+ }
+
+ /// Converts a double to APInt bits.
+ ///
+ /// The conversion does not do a translation from double to integer, it just
+ /// re-interprets the bits of the double.
+ static APInt doubleToBits(double V) {
+ return APInt(sizeof(double) * CHAR_BIT, DoubleToBits(V));
+ }
+
+ /// Converts a float to APInt bits.
+ ///
+ /// The conversion does not do a translation from float to integer, it just
+ /// re-interprets the bits of the float.
+ static APInt floatToBits(float V) {
+ return APInt(sizeof(float) * CHAR_BIT, FloatToBits(V));
+ }
+
+ /// @}
+ /// \name Mathematics Operations
+ /// @{
+
+ /// \returns the floor log base 2 of this APInt.
+ unsigned logBase2() const { return getActiveBits() - 1; }
+
+ /// \returns the ceil log base 2 of this APInt.
+ unsigned ceilLogBase2() const {
+ APInt temp(*this);
+ --temp;
+ return temp.getActiveBits();
+ }
+
+ /// \returns the nearest log base 2 of this APInt. Ties round up.
+ ///
+ /// NOTE: When we have a BitWidth of 1, we define:
+ ///
+ /// log2(0) = UINT32_MAX
+ /// log2(1) = 0
+ ///
+ /// to get around any mathematical concerns resulting from
+ /// referencing 2 in a space where 2 does no exist.
+ unsigned nearestLogBase2() const;
+
+ /// \returns the log base 2 of this APInt if its an exact power of two, -1
+ /// otherwise
+ int32_t exactLogBase2() const {
+ if (!isPowerOf2())
+ return -1;
+ return logBase2();
+ }
+
+ /// Compute the square root.
+ APInt sqrt() const;
+
+ /// Get the absolute value. If *this is < 0 then return -(*this), otherwise
+ /// *this. Note that the "most negative" signed number (e.g. -128 for 8 bit
+ /// wide APInt) is unchanged due to how negation works.
+ APInt abs() const {
+ if (isNegative())
+ return -(*this);
+ return *this;
+ }
+
+ /// \returns the multiplicative inverse for a given modulo.
+ APInt multiplicativeInverse(const APInt &modulo) const;
+
+ /// @}
+ /// \name Building-block Operations for APInt and APFloat
+ /// @{
+
+ // These building block operations operate on a representation of arbitrary
+ // precision, two's-complement, bignum integer values. They should be
+ // sufficient to implement APInt and APFloat bignum requirements. Inputs are
+ // generally a pointer to the base of an array of integer parts, representing
+ // an unsigned bignum, and a count of how many parts there are.
+
+ /// Sets the least significant part of a bignum to the input value, and zeroes
+ /// out higher parts.
+ static void tcSet(WordType *, WordType, unsigned);
+
+ /// Assign one bignum to another.
+ static void tcAssign(WordType *, const WordType *, unsigned);
+
+ /// Returns true if a bignum is zero, false otherwise.
+ static bool tcIsZero(const WordType *, unsigned);
+
+ /// Extract the given bit of a bignum; returns 0 or 1. Zero-based.
+ static int tcExtractBit(const WordType *, unsigned bit);
+
+ /// Copy the bit vector of width srcBITS from SRC, starting at bit srcLSB, to
+ /// DST, of dstCOUNT parts, such that the bit srcLSB becomes the least
+ /// significant bit of DST. All high bits above srcBITS in DST are
+ /// zero-filled.
+ static void tcExtract(WordType *, unsigned dstCount, const WordType *,
+ unsigned srcBits, unsigned srcLSB);
+
+ /// Set the given bit of a bignum. Zero-based.
+ static void tcSetBit(WordType *, unsigned bit);
+
+ /// Clear the given bit of a bignum. Zero-based.
+ static void tcClearBit(WordType *, unsigned bit);
+
+ /// Returns the bit number of the least or most significant set bit of a
+ /// number. If the input number has no bits set -1U is returned.
+ static unsigned tcLSB(const WordType *, unsigned n);
+ static unsigned tcMSB(const WordType *parts, unsigned n);
+
+ /// Negate a bignum in-place.
+ static void tcNegate(WordType *, unsigned);
+
+ /// DST += RHS + CARRY where CARRY is zero or one. Returns the carry flag.
+ static WordType tcAdd(WordType *, const WordType *, WordType carry, unsigned);
+ /// DST += RHS. Returns the carry flag.
+ static WordType tcAddPart(WordType *, WordType, unsigned);
+
+ /// DST -= RHS + CARRY where CARRY is zero or one. Returns the carry flag.
+ static WordType tcSubtract(WordType *, const WordType *, WordType carry,
+ unsigned);
+ /// DST -= RHS. Returns the carry flag.
+ static WordType tcSubtractPart(WordType *, WordType, unsigned);
+
+ /// DST += SRC * MULTIPLIER + PART if add is true
+ /// DST = SRC * MULTIPLIER + PART if add is false
+ ///
+ /// Requires 0 <= DSTPARTS <= SRCPARTS + 1. If DST overlaps SRC they must
+ /// start at the same point, i.e. DST == SRC.
+ ///
+ /// If DSTPARTS == SRC_PARTS + 1 no overflow occurs and zero is returned.
+ /// Otherwise DST is filled with the least significant DSTPARTS parts of the
+ /// result, and if all of the omitted higher parts were zero return zero,
+ /// otherwise overflow occurred and return one.
+ static int tcMultiplyPart(WordType *dst, const WordType *src,
+ WordType multiplier, WordType carry,
+ unsigned srcParts, unsigned dstParts, bool add);
+
+ /// DST = LHS * RHS, where DST has the same width as the operands and is
+ /// filled with the least significant parts of the result. Returns one if
+ /// overflow occurred, otherwise zero. DST must be disjoint from both
+ /// operands.
+ static int tcMultiply(WordType *, const WordType *, const WordType *,
+ unsigned);
+
+ /// DST = LHS * RHS, where DST has width the sum of the widths of the
+ /// operands. No overflow occurs. DST must be disjoint from both operands.
+ static void tcFullMultiply(WordType *, const WordType *, const WordType *,
+ unsigned, unsigned);
+
+ /// If RHS is zero LHS and REMAINDER are left unchanged, return one.
+ /// Otherwise set LHS to LHS / RHS with the fractional part discarded, set
+ /// REMAINDER to the remainder, return zero. i.e.
+ ///
+ /// OLD_LHS = RHS * LHS + REMAINDER
+ ///
+ /// SCRATCH is a bignum of the same size as the operands and result for use by
+ /// the routine; its contents need not be initialized and are destroyed. LHS,
+ /// REMAINDER and SCRATCH must be distinct.
+ static int tcDivide(WordType *lhs, const WordType *rhs, WordType *remainder,
+ WordType *scratch, unsigned parts);
+
+ /// Shift a bignum left Count bits. Shifted in bits are zero. There are no
+ /// restrictions on Count.
+ static void tcShiftLeft(WordType *, unsigned Words, unsigned Count);
+
+ /// Shift a bignum right Count bits. Shifted in bits are zero. There are no
+ /// restrictions on Count.
+ static void tcShiftRight(WordType *, unsigned Words, unsigned Count);
+
+ /// Comparison (unsigned) of two bignums.
+ static int tcCompare(const WordType *, const WordType *, unsigned);
+
+ /// Increment a bignum in-place. Return the carry flag.
+ static WordType tcIncrement(WordType *dst, unsigned parts) {
+ return tcAddPart(dst, 1, parts);
+ }
+
+ /// Decrement a bignum in-place. Return the borrow flag.
+ static WordType tcDecrement(WordType *dst, unsigned parts) {
+ return tcSubtractPart(dst, 1, parts);
+ }
+
+ /// Used to insert APInt objects, or objects that contain APInt objects, into
+ /// FoldingSets.
+ void Profile(FoldingSetNodeID &id) const;
+
+ /// debug method
+ void dump() const;
+
+ /// Returns whether this instance allocated memory.
+ bool needsCleanup() const { return !isSingleWord(); }
+
+private:
+ /// This union is used to store the integer value. When the
+ /// integer bit-width <= 64, it uses VAL, otherwise it uses pVal.
+ union {
+ uint64_t VAL; ///< Used to store the <= 64 bits integer value.
+ uint64_t *pVal; ///< Used to store the >64 bits integer value.
+ } U;
+
+ unsigned BitWidth; ///< The number of bits in this APInt.
+
+ friend struct DenseMapInfo<APInt, void>;
+ friend class APSInt;
+
+ /// This constructor is used only internally for speed of construction of
+ /// temporaries. It is unsafe since it takes ownership of the pointer, so it
+ /// is not public.
+ APInt(uint64_t *val, unsigned bits) : BitWidth(bits) { U.pVal = val; }
+
+ /// Determine which word a bit is in.
+ ///
+ /// \returns the word position for the specified bit position.
+ static unsigned whichWord(unsigned bitPosition) {
+ return bitPosition / APINT_BITS_PER_WORD;
+ }
+
+ /// Determine which bit in a word the specified bit position is in.
+ static unsigned whichBit(unsigned bitPosition) {
+ return bitPosition % APINT_BITS_PER_WORD;
+ }
+
+ /// Get a single bit mask.
+ ///
+ /// \returns a uint64_t with only bit at "whichBit(bitPosition)" set
+ /// This method generates and returns a uint64_t (word) mask for a single
+ /// bit at a specific bit position. This is used to mask the bit in the
+ /// corresponding word.
+ static uint64_t maskBit(unsigned bitPosition) {
+ return 1ULL << whichBit(bitPosition);
+ }
+
+ /// Clear unused high order bits
+ ///
+ /// This method is used internally to clear the top "N" bits in the high order
+ /// word that are not used by the APInt. This is needed after the most
+ /// significant word is assigned a value to ensure that those bits are
+ /// zero'd out.
+ APInt &clearUnusedBits() {
+ // Compute how many bits are used in the final word.
+ unsigned WordBits = ((BitWidth - 1) % APINT_BITS_PER_WORD) + 1;
+
+ // Mask out the high bits.
+ uint64_t mask = WORDTYPE_MAX >> (APINT_BITS_PER_WORD - WordBits);
+ if (LLVM_UNLIKELY(BitWidth == 0))
+ mask = 0;
+
+ if (isSingleWord())
+ U.VAL &= mask;
+ else
+ U.pVal[getNumWords() - 1] &= mask;
+ return *this;
+ }
+
+ /// Get the word corresponding to a bit position
+ /// \returns the corresponding word for the specified bit position.
+ uint64_t getWord(unsigned bitPosition) const {
+ return isSingleWord() ? U.VAL : U.pVal[whichWord(bitPosition)];
+ }
+
+ /// Utility method to change the bit width of this APInt to new bit width,
+ /// allocating and/or deallocating as necessary. There is no guarantee on the
+ /// value of any bits upon return. Caller should populate the bits after.
+ void reallocate(unsigned NewBitWidth);
+
+ /// Convert a char array into an APInt
+ ///
+ /// \param radix 2, 8, 10, 16, or 36
+ /// Converts a string into a number. The string must be non-empty
+ /// and well-formed as a number of the given base. The bit-width
+ /// must be sufficient to hold the result.
+ ///
+ /// This is used by the constructors that take string arguments.
+ ///
+ /// StringRef::getAsInteger is superficially similar but (1) does
+ /// not assume that the string is well-formed and (2) grows the
+ /// result to hold the input.
+ void fromString(unsigned numBits, StringRef str, uint8_t radix);
+
+ /// An internal division function for dividing APInts.
+ ///
+ /// This is used by the toString method to divide by the radix. It simply
+ /// provides a more convenient form of divide for internal use since KnuthDiv
+ /// has specific constraints on its inputs. If those constraints are not met
+ /// then it provides a simpler form of divide.
+ static void divide(const WordType *LHS, unsigned lhsWords,
+ const WordType *RHS, unsigned rhsWords, WordType *Quotient,
+ WordType *Remainder);
+
+ /// out-of-line slow case for inline constructor
+ void initSlowCase(uint64_t val, bool isSigned);
+
+ /// shared code between two array constructors
+ void initFromArray(ArrayRef<uint64_t> array);
+
+ /// out-of-line slow case for inline copy constructor
+ void initSlowCase(const APInt &that);
+
+ /// out-of-line slow case for shl
+ void shlSlowCase(unsigned ShiftAmt);
+
+ /// out-of-line slow case for lshr.
+ void lshrSlowCase(unsigned ShiftAmt);
+
+ /// out-of-line slow case for ashr.
+ void ashrSlowCase(unsigned ShiftAmt);
+
+ /// out-of-line slow case for operator=
+ void assignSlowCase(const APInt &RHS);
+
+ /// out-of-line slow case for operator==
+ bool equalSlowCase(const APInt &RHS) const LLVM_READONLY;
+
+ /// out-of-line slow case for countLeadingZeros
+ unsigned countLeadingZerosSlowCase() const LLVM_READONLY;
+
+ /// out-of-line slow case for countLeadingOnes.
+ unsigned countLeadingOnesSlowCase() const LLVM_READONLY;
+
+ /// out-of-line slow case for countTrailingZeros.
+ unsigned countTrailingZerosSlowCase() const LLVM_READONLY;
+
+ /// out-of-line slow case for countTrailingOnes
+ unsigned countTrailingOnesSlowCase() const LLVM_READONLY;
+
+ /// out-of-line slow case for countPopulation
+ unsigned countPopulationSlowCase() const LLVM_READONLY;
+
+ /// out-of-line slow case for intersects.
+ bool intersectsSlowCase(const APInt &RHS) const LLVM_READONLY;
+
+ /// out-of-line slow case for isSubsetOf.
+ bool isSubsetOfSlowCase(const APInt &RHS) const LLVM_READONLY;
+
+ /// out-of-line slow case for setBits.
+ void setBitsSlowCase(unsigned loBit, unsigned hiBit);
+
+ /// out-of-line slow case for flipAllBits.
+ void flipAllBitsSlowCase();
+
+ /// out-of-line slow case for concat.
+ APInt concatSlowCase(const APInt &NewLSB) const;
+
+ /// out-of-line slow case for operator&=.
+ void andAssignSlowCase(const APInt &RHS);
+
+ /// out-of-line slow case for operator|=.
+ void orAssignSlowCase(const APInt &RHS);
+
+ /// out-of-line slow case for operator^=.
+ void xorAssignSlowCase(const APInt &RHS);
+
+ /// Unsigned comparison. Returns -1, 0, or 1 if this APInt is less than, equal
+ /// to, or greater than RHS.
+ int compare(const APInt &RHS) const LLVM_READONLY;
+
+ /// Signed comparison. Returns -1, 0, or 1 if this APInt is less than, equal
+ /// to, or greater than RHS.
+ int compareSigned(const APInt &RHS) const LLVM_READONLY;
+
+ /// @}
+};
+
+inline bool operator==(uint64_t V1, const APInt &V2) { return V2 == V1; }
+
+inline bool operator!=(uint64_t V1, const APInt &V2) { return V2 != V1; }
+
+/// Unary bitwise complement operator.
+///
+/// \returns an APInt that is the bitwise complement of \p v.
+inline APInt operator~(APInt v) {
+ v.flipAllBits();
+ return v;
+}
+
+inline APInt operator&(APInt a, const APInt &b) {
+ a &= b;
+ return a;
+}
+
+inline APInt operator&(const APInt &a, APInt &&b) {
+ b &= a;
+ return std::move(b);
+}
+
+inline APInt operator&(APInt a, uint64_t RHS) {
+ a &= RHS;
+ return a;
+}
+
+inline APInt operator&(uint64_t LHS, APInt b) {
+ b &= LHS;
+ return b;
+}
+
+inline APInt operator|(APInt a, const APInt &b) {
+ a |= b;
+ return a;
+}
+
+inline APInt operator|(const APInt &a, APInt &&b) {
+ b |= a;
+ return std::move(b);
+}
+
+inline APInt operator|(APInt a, uint64_t RHS) {
+ a |= RHS;
+ return a;
+}
+
+inline APInt operator|(uint64_t LHS, APInt b) {
+ b |= LHS;
+ return b;
+}
+
+inline APInt operator^(APInt a, const APInt &b) {
+ a ^= b;
+ return a;
+}
+
+inline APInt operator^(const APInt &a, APInt &&b) {
+ b ^= a;
+ return std::move(b);
+}
+
+inline APInt operator^(APInt a, uint64_t RHS) {
+ a ^= RHS;
+ return a;
+}
+
+inline APInt operator^(uint64_t LHS, APInt b) {
+ b ^= LHS;
+ return b;
+}
+
+inline raw_ostream &operator<<(raw_ostream &OS, const APInt &I) {
+ I.print(OS, true);
+ return OS;
+}
+
+inline APInt operator-(APInt v) {
+ v.negate();
+ return v;
+}
+
+inline APInt operator+(APInt a, const APInt &b) {
+ a += b;
+ return a;
+}
+
+inline APInt operator+(const APInt &a, APInt &&b) {
+ b += a;
+ return std::move(b);
+}
+
+inline APInt operator+(APInt a, uint64_t RHS) {
+ a += RHS;
+ return a;
+}
+
+inline APInt operator+(uint64_t LHS, APInt b) {
+ b += LHS;
+ return b;
+}
+
+inline APInt operator-(APInt a, const APInt &b) {
+ a -= b;
+ return a;
+}
+
+inline APInt operator-(const APInt &a, APInt &&b) {
+ b.negate();
+ b += a;
+ return std::move(b);
+}
+
+inline APInt operator-(APInt a, uint64_t RHS) {
+ a -= RHS;
+ return a;
+}
+
+inline APInt operator-(uint64_t LHS, APInt b) {
+ b.negate();
+ b += LHS;
+ return b;
+}
+
+inline APInt operator*(APInt a, uint64_t RHS) {
+ a *= RHS;
+ return a;
+}
+
+inline APInt operator*(uint64_t LHS, APInt b) {
+ b *= LHS;
+ return b;
+}
+
+namespace APIntOps {
+
+/// Determine the smaller of two APInts considered to be signed.
+inline const APInt &smin(const APInt &A, const APInt &B) {
+ return A.slt(B) ? A : B;
+}
+
+/// Determine the larger of two APInts considered to be signed.
+inline const APInt &smax(const APInt &A, const APInt &B) {
+ return A.sgt(B) ? A : B;
+}
+
+/// Determine the smaller of two APInts considered to be unsigned.
+inline const APInt &umin(const APInt &A, const APInt &B) {
+ return A.ult(B) ? A : B;
+}
+
+/// Determine the larger of two APInts considered to be unsigned.
+inline const APInt &umax(const APInt &A, const APInt &B) {
+ return A.ugt(B) ? A : B;
+}
+
+/// Compute GCD of two unsigned APInt values.
+///
+/// This function returns the greatest common divisor of the two APInt values
+/// using Stein's algorithm.
+///
+/// \returns the greatest common divisor of A and B.
+APInt GreatestCommonDivisor(APInt A, APInt B);
+
+/// Converts the given APInt to a double value.
+///
+/// Treats the APInt as an unsigned value for conversion purposes.
+inline double RoundAPIntToDouble(const APInt &APIVal) {
+ return APIVal.roundToDouble();
+}
+
+/// Converts the given APInt to a double value.
+///
+/// Treats the APInt as a signed value for conversion purposes.
+inline double RoundSignedAPIntToDouble(const APInt &APIVal) {
+ return APIVal.signedRoundToDouble();
+}
+
+/// Converts the given APInt to a float value.
+inline float RoundAPIntToFloat(const APInt &APIVal) {
+ return float(RoundAPIntToDouble(APIVal));
+}
+
+/// Converts the given APInt to a float value.
+///
+/// Treats the APInt as a signed value for conversion purposes.
+inline float RoundSignedAPIntToFloat(const APInt &APIVal) {
+ return float(APIVal.signedRoundToDouble());
+}
+
+/// Converts the given double value into a APInt.
+///
+/// This function convert a double value to an APInt value.
+APInt RoundDoubleToAPInt(double Double, unsigned width);
+
+/// Converts a float value into a APInt.
+///
+/// Converts a float value into an APInt value.
+inline APInt RoundFloatToAPInt(float Float, unsigned width) {
+ return RoundDoubleToAPInt(double(Float), width);
+}
+
+/// Return A unsign-divided by B, rounded by the given rounding mode.
+APInt RoundingUDiv(const APInt &A, const APInt &B, APInt::Rounding RM);
+
+/// Return A sign-divided by B, rounded by the given rounding mode.
+APInt RoundingSDiv(const APInt &A, const APInt &B, APInt::Rounding RM);
+
+/// Let q(n) = An^2 + Bn + C, and BW = bit width of the value range
+/// (e.g. 32 for i32).
+/// This function finds the smallest number n, such that
+/// (a) n >= 0 and q(n) = 0, or
+/// (b) n >= 1 and q(n-1) and q(n), when evaluated in the set of all
+/// integers, belong to two different intervals [Rk, Rk+R),
+/// where R = 2^BW, and k is an integer.
+/// The idea here is to find when q(n) "overflows" 2^BW, while at the
+/// same time "allowing" subtraction. In unsigned modulo arithmetic a
+/// subtraction (treated as addition of negated numbers) would always
+/// count as an overflow, but here we want to allow values to decrease
+/// and increase as long as they are within the same interval.
+/// Specifically, adding of two negative numbers should not cause an
+/// overflow (as long as the magnitude does not exceed the bit width).
+/// On the other hand, given a positive number, adding a negative
+/// number to it can give a negative result, which would cause the
+/// value to go from [-2^BW, 0) to [0, 2^BW). In that sense, zero is
+/// treated as a special case of an overflow.
+///
+/// This function returns None if after finding k that minimizes the
+/// positive solution to q(n) = kR, both solutions are contained between
+/// two consecutive integers.
+///
+/// There are cases where q(n) > T, and q(n+1) < T (assuming evaluation
+/// in arithmetic modulo 2^BW, and treating the values as signed) by the
+/// virtue of *signed* overflow. This function will *not* find such an n,
+/// however it may find a value of n satisfying the inequalities due to
+/// an *unsigned* overflow (if the values are treated as unsigned).
+/// To find a solution for a signed overflow, treat it as a problem of
+/// finding an unsigned overflow with a range with of BW-1.
+///
+/// The returned value may have a different bit width from the input
+/// coefficients.
+Optional<APInt> SolveQuadraticEquationWrap(APInt A, APInt B, APInt C,
+ unsigned RangeWidth);
+
+/// Compare two values, and if they are different, return the position of the
+/// most significant bit that is different in the values.
+Optional<unsigned> GetMostSignificantDifferentBit(const APInt &A,
+ const APInt &B);
+
+/// Splat/Merge neighboring bits to widen/narrow the bitmask represented
+/// by \param A to \param NewBitWidth bits.
+///
+/// e.g. ScaleBitMask(0b0101, 8) -> 0b00110011
+/// e.g. ScaleBitMask(0b00011011, 4) -> 0b0111
+/// A.getBitwidth() or NewBitWidth must be a whole multiples of the other.
+///
+/// TODO: Do we need a mode where all bits must be set when merging down?
+APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth);
+} // namespace APIntOps
+
+// See friend declaration above. This additional declaration is required in
+// order to compile LLVM with IBM xlC compiler.
+hash_code hash_value(const APInt &Arg);
+
+/// StoreIntToMemory - Fills the StoreBytes bytes of memory starting from Dst
+/// with the integer held in IntVal.
+void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst, unsigned StoreBytes);
+
+/// LoadIntFromMemory - Loads the integer stored in the LoadBytes bytes starting
+/// from Src into IntVal, which is assumed to be wide enough and to hold zero.
+void LoadIntFromMemory(APInt &IntVal, const uint8_t *Src, unsigned LoadBytes);
+
+/// Provide DenseMapInfo for APInt.
+template <> struct DenseMapInfo<APInt, void> {
+ static inline APInt getEmptyKey() {
+ APInt V(nullptr, 0);
+ V.U.VAL = 0;
+ return V;
+ }
+
+ static inline APInt getTombstoneKey() {
+ APInt V(nullptr, 0);
+ V.U.VAL = 1;
+ return V;
+ }
+
+ static unsigned getHashValue(const APInt &Key);
+
+ static bool isEqual(const APInt &LHS, const APInt &RHS) {
+ return LHS.getBitWidth() == RHS.getBitWidth() && LHS == RHS;
+ }
+};
+
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/APSInt.h b/contrib/libs/llvm14/include/llvm/ADT/APSInt.h
new file mode 100644
index 0000000000..d0b04575a5
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/APSInt.h
@@ -0,0 +1,380 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- llvm/ADT/APSInt.h - Arbitrary Precision Signed Int -----*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the APSInt class, which is a simple class that
+/// represents an arbitrary sized integer that knows its signedness.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_APSINT_H
+#define LLVM_ADT_APSINT_H
+
+#include "llvm/ADT/APInt.h"
+
+namespace llvm {
+
+/// An arbitrary precision integer that knows its signedness.
+class LLVM_NODISCARD APSInt : public APInt {
+ bool IsUnsigned;
+
+public:
+ /// Default constructor that creates an uninitialized APInt.
+ explicit APSInt() : IsUnsigned(false) {}
+
+ /// Create an APSInt with the specified width, default to unsigned.
+ explicit APSInt(uint32_t BitWidth, bool isUnsigned = true)
+ : APInt(BitWidth, 0), IsUnsigned(isUnsigned) {}
+
+ explicit APSInt(APInt I, bool isUnsigned = true)
+ : APInt(std::move(I)), IsUnsigned(isUnsigned) {}
+
+ /// Construct an APSInt from a string representation.
+ ///
+ /// This constructor interprets the string \p Str using the radix of 10.
+ /// The interpretation stops at the end of the string. The bit width of the
+ /// constructed APSInt is determined automatically.
+ ///
+ /// \param Str the string to be interpreted.
+ explicit APSInt(StringRef Str);
+
+ /// Determine sign of this APSInt.
+ ///
+ /// \returns true if this APSInt is negative, false otherwise
+ bool isNegative() const { return isSigned() && APInt::isNegative(); }
+
+ /// Determine if this APSInt Value is non-negative (>= 0)
+ ///
+ /// \returns true if this APSInt is non-negative, false otherwise
+ bool isNonNegative() const { return !isNegative(); }
+
+ /// Determine if this APSInt Value is positive.
+ ///
+ /// This tests if the value of this APSInt is positive (> 0). Note
+ /// that 0 is not a positive value.
+ ///
+ /// \returns true if this APSInt is positive.
+ bool isStrictlyPositive() const { return isNonNegative() && !isZero(); }
+
+ APSInt &operator=(APInt RHS) {
+ // Retain our current sign.
+ APInt::operator=(std::move(RHS));
+ return *this;
+ }
+
+ APSInt &operator=(uint64_t RHS) {
+ // Retain our current sign.
+ APInt::operator=(RHS);
+ return *this;
+ }
+
+ // Query sign information.
+ bool isSigned() const { return !IsUnsigned; }
+ bool isUnsigned() const { return IsUnsigned; }
+ void setIsUnsigned(bool Val) { IsUnsigned = Val; }
+ void setIsSigned(bool Val) { IsUnsigned = !Val; }
+
+ /// Append this APSInt to the specified SmallString.
+ void toString(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
+ APInt::toString(Str, Radix, isSigned());
+ }
+ using APInt::toString;
+
+ /// Get the correctly-extended \c int64_t value.
+ int64_t getExtValue() const {
+ assert(getMinSignedBits() <= 64 && "Too many bits for int64_t");
+ return isSigned() ? getSExtValue() : getZExtValue();
+ }
+
+ APSInt trunc(uint32_t width) const {
+ return APSInt(APInt::trunc(width), IsUnsigned);
+ }
+
+ APSInt extend(uint32_t width) const {
+ if (IsUnsigned)
+ return APSInt(zext(width), IsUnsigned);
+ else
+ return APSInt(sext(width), IsUnsigned);
+ }
+
+ APSInt extOrTrunc(uint32_t width) const {
+ if (IsUnsigned)
+ return APSInt(zextOrTrunc(width), IsUnsigned);
+ else
+ return APSInt(sextOrTrunc(width), IsUnsigned);
+ }
+
+ const APSInt &operator%=(const APSInt &RHS) {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ if (IsUnsigned)
+ *this = urem(RHS);
+ else
+ *this = srem(RHS);
+ return *this;
+ }
+ const APSInt &operator/=(const APSInt &RHS) {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ if (IsUnsigned)
+ *this = udiv(RHS);
+ else
+ *this = sdiv(RHS);
+ return *this;
+ }
+ APSInt operator%(const APSInt &RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return IsUnsigned ? APSInt(urem(RHS), true) : APSInt(srem(RHS), false);
+ }
+ APSInt operator/(const APSInt &RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return IsUnsigned ? APSInt(udiv(RHS), true) : APSInt(sdiv(RHS), false);
+ }
+
+ APSInt operator>>(unsigned Amt) const {
+ return IsUnsigned ? APSInt(lshr(Amt), true) : APSInt(ashr(Amt), false);
+ }
+ APSInt& operator>>=(unsigned Amt) {
+ if (IsUnsigned)
+ lshrInPlace(Amt);
+ else
+ ashrInPlace(Amt);
+ return *this;
+ }
+
+ inline bool operator<(const APSInt& RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return IsUnsigned ? ult(RHS) : slt(RHS);
+ }
+ inline bool operator>(const APSInt& RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return IsUnsigned ? ugt(RHS) : sgt(RHS);
+ }
+ inline bool operator<=(const APSInt& RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return IsUnsigned ? ule(RHS) : sle(RHS);
+ }
+ inline bool operator>=(const APSInt& RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return IsUnsigned ? uge(RHS) : sge(RHS);
+ }
+ inline bool operator==(const APSInt& RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return eq(RHS);
+ }
+ inline bool operator!=(const APSInt& RHS) const {
+ return !((*this) == RHS);
+ }
+
+ bool operator==(int64_t RHS) const {
+ return compareValues(*this, get(RHS)) == 0;
+ }
+ bool operator!=(int64_t RHS) const {
+ return compareValues(*this, get(RHS)) != 0;
+ }
+ bool operator<=(int64_t RHS) const {
+ return compareValues(*this, get(RHS)) <= 0;
+ }
+ bool operator>=(int64_t RHS) const {
+ return compareValues(*this, get(RHS)) >= 0;
+ }
+ bool operator<(int64_t RHS) const {
+ return compareValues(*this, get(RHS)) < 0;
+ }
+ bool operator>(int64_t RHS) const {
+ return compareValues(*this, get(RHS)) > 0;
+ }
+
+ // The remaining operators just wrap the logic of APInt, but retain the
+ // signedness information.
+
+ APSInt operator<<(unsigned Bits) const {
+ return APSInt(static_cast<const APInt&>(*this) << Bits, IsUnsigned);
+ }
+ APSInt& operator<<=(unsigned Amt) {
+ static_cast<APInt&>(*this) <<= Amt;
+ return *this;
+ }
+
+ APSInt& operator++() {
+ ++(static_cast<APInt&>(*this));
+ return *this;
+ }
+ APSInt& operator--() {
+ --(static_cast<APInt&>(*this));
+ return *this;
+ }
+ APSInt operator++(int) {
+ return APSInt(++static_cast<APInt&>(*this), IsUnsigned);
+ }
+ APSInt operator--(int) {
+ return APSInt(--static_cast<APInt&>(*this), IsUnsigned);
+ }
+ APSInt operator-() const {
+ return APSInt(-static_cast<const APInt&>(*this), IsUnsigned);
+ }
+ APSInt& operator+=(const APSInt& RHS) {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ static_cast<APInt&>(*this) += RHS;
+ return *this;
+ }
+ APSInt& operator-=(const APSInt& RHS) {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ static_cast<APInt&>(*this) -= RHS;
+ return *this;
+ }
+ APSInt& operator*=(const APSInt& RHS) {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ static_cast<APInt&>(*this) *= RHS;
+ return *this;
+ }
+ APSInt& operator&=(const APSInt& RHS) {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ static_cast<APInt&>(*this) &= RHS;
+ return *this;
+ }
+ APSInt& operator|=(const APSInt& RHS) {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ static_cast<APInt&>(*this) |= RHS;
+ return *this;
+ }
+ APSInt& operator^=(const APSInt& RHS) {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ static_cast<APInt&>(*this) ^= RHS;
+ return *this;
+ }
+
+ APSInt operator&(const APSInt& RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return APSInt(static_cast<const APInt&>(*this) & RHS, IsUnsigned);
+ }
+
+ APSInt operator|(const APSInt& RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return APSInt(static_cast<const APInt&>(*this) | RHS, IsUnsigned);
+ }
+
+ APSInt operator^(const APSInt &RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return APSInt(static_cast<const APInt&>(*this) ^ RHS, IsUnsigned);
+ }
+
+ APSInt operator*(const APSInt& RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return APSInt(static_cast<const APInt&>(*this) * RHS, IsUnsigned);
+ }
+ APSInt operator+(const APSInt& RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return APSInt(static_cast<const APInt&>(*this) + RHS, IsUnsigned);
+ }
+ APSInt operator-(const APSInt& RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return APSInt(static_cast<const APInt&>(*this) - RHS, IsUnsigned);
+ }
+ APSInt operator~() const {
+ return APSInt(~static_cast<const APInt&>(*this), IsUnsigned);
+ }
+
+ /// Return the APSInt representing the maximum integer value with the given
+ /// bit width and signedness.
+ static APSInt getMaxValue(uint32_t numBits, bool Unsigned) {
+ return APSInt(Unsigned ? APInt::getMaxValue(numBits)
+ : APInt::getSignedMaxValue(numBits), Unsigned);
+ }
+
+ /// Return the APSInt representing the minimum integer value with the given
+ /// bit width and signedness.
+ static APSInt getMinValue(uint32_t numBits, bool Unsigned) {
+ return APSInt(Unsigned ? APInt::getMinValue(numBits)
+ : APInt::getSignedMinValue(numBits), Unsigned);
+ }
+
+ /// Determine if two APSInts have the same value, zero- or
+ /// sign-extending as needed.
+ static bool isSameValue(const APSInt &I1, const APSInt &I2) {
+ return !compareValues(I1, I2);
+ }
+
+ /// Compare underlying values of two numbers.
+ static int compareValues(const APSInt &I1, const APSInt &I2) {
+ if (I1.getBitWidth() == I2.getBitWidth() && I1.isSigned() == I2.isSigned())
+ return I1.IsUnsigned ? I1.compare(I2) : I1.compareSigned(I2);
+
+ // Check for a bit-width mismatch.
+ if (I1.getBitWidth() > I2.getBitWidth())
+ return compareValues(I1, I2.extend(I1.getBitWidth()));
+ if (I2.getBitWidth() > I1.getBitWidth())
+ return compareValues(I1.extend(I2.getBitWidth()), I2);
+
+ // We have a signedness mismatch. Check for negative values and do an
+ // unsigned compare if both are positive.
+ if (I1.isSigned()) {
+ assert(!I2.isSigned() && "Expected signed mismatch");
+ if (I1.isNegative())
+ return -1;
+ } else {
+ assert(I2.isSigned() && "Expected signed mismatch");
+ if (I2.isNegative())
+ return 1;
+ }
+
+ return I1.compare(I2);
+ }
+
+ static APSInt get(int64_t X) { return APSInt(APInt(64, X), false); }
+ static APSInt getUnsigned(uint64_t X) { return APSInt(APInt(64, X), true); }
+
+ /// Used to insert APSInt objects, or objects that contain APSInt objects,
+ /// into FoldingSets.
+ void Profile(FoldingSetNodeID& ID) const;
+};
+
+inline bool operator==(int64_t V1, const APSInt &V2) { return V2 == V1; }
+inline bool operator!=(int64_t V1, const APSInt &V2) { return V2 != V1; }
+inline bool operator<=(int64_t V1, const APSInt &V2) { return V2 >= V1; }
+inline bool operator>=(int64_t V1, const APSInt &V2) { return V2 <= V1; }
+inline bool operator<(int64_t V1, const APSInt &V2) { return V2 > V1; }
+inline bool operator>(int64_t V1, const APSInt &V2) { return V2 < V1; }
+
+inline raw_ostream &operator<<(raw_ostream &OS, const APSInt &I) {
+ I.print(OS, I.isSigned());
+ return OS;
+}
+
+/// Provide DenseMapInfo for APSInt, using the DenseMapInfo for APInt.
+template <> struct DenseMapInfo<APSInt, void> {
+ static inline APSInt getEmptyKey() {
+ return APSInt(DenseMapInfo<APInt, void>::getEmptyKey());
+ }
+
+ static inline APSInt getTombstoneKey() {
+ return APSInt(DenseMapInfo<APInt, void>::getTombstoneKey());
+ }
+
+ static unsigned getHashValue(const APSInt &Key) {
+ return DenseMapInfo<APInt, void>::getHashValue(Key);
+ }
+
+ static bool isEqual(const APSInt &LHS, const APSInt &RHS) {
+ return LHS.getBitWidth() == RHS.getBitWidth() &&
+ LHS.isUnsigned() == RHS.isUnsigned() && LHS == RHS;
+ }
+};
+
+} // end namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/AllocatorList.h b/contrib/libs/llvm14/include/llvm/ADT/AllocatorList.h
new file mode 100644
index 0000000000..ce7d47a71d
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/AllocatorList.h
@@ -0,0 +1,243 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/AllocatorList.h - Custom allocator list ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ALLOCATORLIST_H
+#define LLVM_ADT_ALLOCATORLIST_H
+
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/simple_ilist.h"
+#include "llvm/Support/Allocator.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+
+/// A linked-list with a custom, local allocator.
+///
+/// Expose a std::list-like interface that owns and uses a custom LLVM-style
+/// allocator (e.g., BumpPtrAllocator), leveraging \a simple_ilist for the
+/// implementation details.
+///
+/// Because this list owns the allocator, calling \a splice() with a different
+/// list isn't generally safe. As such, \a splice has been left out of the
+/// interface entirely.
+template <class T, class AllocatorT> class AllocatorList : AllocatorT {
+ struct Node : ilist_node<Node> {
+ Node(Node &&) = delete;
+ Node(const Node &) = delete;
+ Node &operator=(Node &&) = delete;
+ Node &operator=(const Node &) = delete;
+
+ Node(T &&V) : V(std::move(V)) {}
+ Node(const T &V) : V(V) {}
+ template <class... Ts> Node(Ts &&... Vs) : V(std::forward<Ts>(Vs)...) {}
+ T V;
+ };
+
+ using list_type = simple_ilist<Node>;
+
+ list_type List;
+
+ AllocatorT &getAlloc() { return *this; }
+ const AllocatorT &getAlloc() const { return *this; }
+
+ template <class... ArgTs> Node *create(ArgTs &&... Args) {
+ return new (getAlloc()) Node(std::forward<ArgTs>(Args)...);
+ }
+
+ struct Cloner {
+ AllocatorList &AL;
+
+ Cloner(AllocatorList &AL) : AL(AL) {}
+
+ Node *operator()(const Node &N) const { return AL.create(N.V); }
+ };
+
+ struct Disposer {
+ AllocatorList &AL;
+
+ Disposer(AllocatorList &AL) : AL(AL) {}
+
+ void operator()(Node *N) const {
+ N->~Node();
+ AL.getAlloc().Deallocate(N);
+ }
+ };
+
+public:
+ using value_type = T;
+ using pointer = T *;
+ using reference = T &;
+ using const_pointer = const T *;
+ using const_reference = const T &;
+ using size_type = typename list_type::size_type;
+ using difference_type = typename list_type::difference_type;
+
+private:
+ template <class ValueT, class IteratorBase>
+ class IteratorImpl
+ : public iterator_adaptor_base<IteratorImpl<ValueT, IteratorBase>,
+ IteratorBase,
+ std::bidirectional_iterator_tag, ValueT> {
+ template <class OtherValueT, class OtherIteratorBase>
+ friend class IteratorImpl;
+ friend AllocatorList;
+
+ using base_type =
+ iterator_adaptor_base<IteratorImpl<ValueT, IteratorBase>, IteratorBase,
+ std::bidirectional_iterator_tag, ValueT>;
+
+ public:
+ using value_type = ValueT;
+ using pointer = ValueT *;
+ using reference = ValueT &;
+
+ IteratorImpl() = default;
+ IteratorImpl(const IteratorImpl &) = default;
+ IteratorImpl &operator=(const IteratorImpl &) = default;
+
+ explicit IteratorImpl(const IteratorBase &I) : base_type(I) {}
+
+ template <class OtherValueT, class OtherIteratorBase>
+ IteratorImpl(const IteratorImpl<OtherValueT, OtherIteratorBase> &X,
+ std::enable_if_t<std::is_convertible<
+ OtherIteratorBase, IteratorBase>::value> * = nullptr)
+ : base_type(X.wrapped()) {}
+
+ ~IteratorImpl() = default;
+
+ reference operator*() const { return base_type::wrapped()->V; }
+ pointer operator->() const { return &operator*(); }
+ };
+
+public:
+ using iterator = IteratorImpl<T, typename list_type::iterator>;
+ using reverse_iterator =
+ IteratorImpl<T, typename list_type::reverse_iterator>;
+ using const_iterator =
+ IteratorImpl<const T, typename list_type::const_iterator>;
+ using const_reverse_iterator =
+ IteratorImpl<const T, typename list_type::const_reverse_iterator>;
+
+ AllocatorList() = default;
+ AllocatorList(AllocatorList &&X)
+ : AllocatorT(std::move(X.getAlloc())), List(std::move(X.List)) {}
+
+ AllocatorList(const AllocatorList &X) {
+ List.cloneFrom(X.List, Cloner(*this), Disposer(*this));
+ }
+
+ AllocatorList &operator=(AllocatorList &&X) {
+ clear(); // Dispose of current nodes explicitly.
+ List = std::move(X.List);
+ getAlloc() = std::move(X.getAlloc());
+ return *this;
+ }
+
+ AllocatorList &operator=(const AllocatorList &X) {
+ List.cloneFrom(X.List, Cloner(*this), Disposer(*this));
+ return *this;
+ }
+
+ ~AllocatorList() { clear(); }
+
+ void swap(AllocatorList &RHS) {
+ List.swap(RHS.List);
+ std::swap(getAlloc(), RHS.getAlloc());
+ }
+
+ bool empty() { return List.empty(); }
+ size_t size() { return List.size(); }
+
+ iterator begin() { return iterator(List.begin()); }
+ iterator end() { return iterator(List.end()); }
+ const_iterator begin() const { return const_iterator(List.begin()); }
+ const_iterator end() const { return const_iterator(List.end()); }
+ reverse_iterator rbegin() { return reverse_iterator(List.rbegin()); }
+ reverse_iterator rend() { return reverse_iterator(List.rend()); }
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator(List.rbegin());
+ }
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(List.rend());
+ }
+
+ T &back() { return List.back().V; }
+ T &front() { return List.front().V; }
+ const T &back() const { return List.back().V; }
+ const T &front() const { return List.front().V; }
+
+ template <class... Ts> iterator emplace(iterator I, Ts &&... Vs) {
+ return iterator(List.insert(I.wrapped(), *create(std::forward<Ts>(Vs)...)));
+ }
+
+ iterator insert(iterator I, T &&V) {
+ return iterator(List.insert(I.wrapped(), *create(std::move(V))));
+ }
+ iterator insert(iterator I, const T &V) {
+ return iterator(List.insert(I.wrapped(), *create(V)));
+ }
+
+ template <class Iterator>
+ void insert(iterator I, Iterator First, Iterator Last) {
+ for (; First != Last; ++First)
+ List.insert(I.wrapped(), *create(*First));
+ }
+
+ iterator erase(iterator I) {
+ return iterator(List.eraseAndDispose(I.wrapped(), Disposer(*this)));
+ }
+
+ iterator erase(iterator First, iterator Last) {
+ return iterator(
+ List.eraseAndDispose(First.wrapped(), Last.wrapped(), Disposer(*this)));
+ }
+
+ void clear() { List.clearAndDispose(Disposer(*this)); }
+ void pop_back() { List.eraseAndDispose(--List.end(), Disposer(*this)); }
+ void pop_front() { List.eraseAndDispose(List.begin(), Disposer(*this)); }
+ void push_back(T &&V) { insert(end(), std::move(V)); }
+ void push_front(T &&V) { insert(begin(), std::move(V)); }
+ void push_back(const T &V) { insert(end(), V); }
+ void push_front(const T &V) { insert(begin(), V); }
+ template <class... Ts> void emplace_back(Ts &&... Vs) {
+ emplace(end(), std::forward<Ts>(Vs)...);
+ }
+ template <class... Ts> void emplace_front(Ts &&... Vs) {
+ emplace(begin(), std::forward<Ts>(Vs)...);
+ }
+
+ /// Reset the underlying allocator.
+ ///
+ /// \pre \c empty()
+ void resetAlloc() {
+ assert(empty() && "Cannot reset allocator if not empty");
+ getAlloc().Reset();
+ }
+};
+
+template <class T> using BumpPtrList = AllocatorList<T, BumpPtrAllocator>;
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ALLOCATORLIST_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/Any.h b/contrib/libs/llvm14/include/llvm/ADT/Any.h
new file mode 100644
index 0000000000..8f3d2fa72d
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/Any.h
@@ -0,0 +1,168 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- Any.h - Generic type erased holder of any type -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file provides Any, a non-template class modeled in the spirit of
+/// std::any. The idea is to provide a type-safe replacement for C's void*.
+/// It can hold a value of any copy-constructible copy-assignable type
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ANY_H
+#define LLVM_ADT_ANY_H
+
+#include "llvm/ADT/STLForwardCompat.h"
+#include "llvm/Support/Compiler.h"
+
+#include <cassert>
+#include <memory>
+#include <type_traits>
+
+namespace llvm {
+
+class LLVM_EXTERNAL_VISIBILITY Any {
+
+ // The `Typeid<T>::Id` static data member below is a globally unique
+ // identifier for the type `T`. It is explicitly marked with default
+ // visibility so that when `-fvisibility=hidden` is used, the loader still
+ // merges duplicate definitions across DSO boundaries.
+ template <typename T> struct TypeId { static const char Id; };
+
+ struct StorageBase {
+ virtual ~StorageBase() = default;
+ virtual std::unique_ptr<StorageBase> clone() const = 0;
+ virtual const void *id() const = 0;
+ };
+
+ template <typename T> struct StorageImpl : public StorageBase {
+ explicit StorageImpl(const T &Value) : Value(Value) {}
+
+ explicit StorageImpl(T &&Value) : Value(std::move(Value)) {}
+
+ std::unique_ptr<StorageBase> clone() const override {
+ return std::make_unique<StorageImpl<T>>(Value);
+ }
+
+ const void *id() const override { return &TypeId<T>::Id; }
+
+ T Value;
+
+ private:
+ StorageImpl &operator=(const StorageImpl &Other) = delete;
+ StorageImpl(const StorageImpl &Other) = delete;
+ };
+
+public:
+ Any() = default;
+
+ Any(const Any &Other)
+ : Storage(Other.Storage ? Other.Storage->clone() : nullptr) {}
+
+ // When T is Any or T is not copy-constructible we need to explicitly disable
+ // the forwarding constructor so that the copy constructor gets selected
+ // instead.
+ template <typename T,
+ std::enable_if_t<
+ llvm::conjunction<
+ llvm::negation<std::is_same<std::decay_t<T>, Any>>,
+ // We also disable this overload when an `Any` object can be
+ // converted to the parameter type because in that case,
+ // this constructor may combine with that conversion during
+ // overload resolution for determining copy
+ // constructibility, and then when we try to determine copy
+ // constructibility below we may infinitely recurse. This is
+ // being evaluated by the standards committee as a potential
+ // DR in `std::any` as well, but we're going ahead and
+ // adopting it to work-around usage of `Any` with types that
+ // need to be implicitly convertible from an `Any`.
+ llvm::negation<std::is_convertible<Any, std::decay_t<T>>>,
+ std::is_copy_constructible<std::decay_t<T>>>::value,
+ int> = 0>
+ Any(T &&Value) {
+ Storage =
+ std::make_unique<StorageImpl<std::decay_t<T>>>(std::forward<T>(Value));
+ }
+
+ Any(Any &&Other) : Storage(std::move(Other.Storage)) {}
+
+ Any &swap(Any &Other) {
+ std::swap(Storage, Other.Storage);
+ return *this;
+ }
+
+ Any &operator=(Any Other) {
+ Storage = std::move(Other.Storage);
+ return *this;
+ }
+
+ bool hasValue() const { return !!Storage; }
+
+ void reset() { Storage.reset(); }
+
+private:
+ template <class T> friend T any_cast(const Any &Value);
+ template <class T> friend T any_cast(Any &Value);
+ template <class T> friend T any_cast(Any &&Value);
+ template <class T> friend const T *any_cast(const Any *Value);
+ template <class T> friend T *any_cast(Any *Value);
+ template <typename T> friend bool any_isa(const Any &Value);
+
+ std::unique_ptr<StorageBase> Storage;
+};
+
+template <typename T> const char Any::TypeId<T>::Id = 0;
+
+
+template <typename T> bool any_isa(const Any &Value) {
+ if (!Value.Storage)
+ return false;
+ return Value.Storage->id() == &Any::TypeId<remove_cvref_t<T>>::Id;
+}
+
+template <class T> T any_cast(const Any &Value) {
+ return static_cast<T>(*any_cast<remove_cvref_t<T>>(&Value));
+}
+
+template <class T> T any_cast(Any &Value) {
+ return static_cast<T>(*any_cast<remove_cvref_t<T>>(&Value));
+}
+
+template <class T> T any_cast(Any &&Value) {
+ return static_cast<T>(std::move(*any_cast<remove_cvref_t<T>>(&Value)));
+}
+
+template <class T> const T *any_cast(const Any *Value) {
+ using U = remove_cvref_t<T>;
+ assert(Value && any_isa<T>(*Value) && "Bad any cast!");
+ if (!Value || !any_isa<U>(*Value))
+ return nullptr;
+ return &static_cast<Any::StorageImpl<U> &>(*Value->Storage).Value;
+}
+
+template <class T> T *any_cast(Any *Value) {
+ using U = std::decay_t<T>;
+ assert(Value && any_isa<U>(*Value) && "Bad any cast!");
+ if (!Value || !any_isa<U>(*Value))
+ return nullptr;
+ return &static_cast<Any::StorageImpl<U> &>(*Value->Storage).Value;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ANY_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/ArrayRef.h b/contrib/libs/llvm14/include/llvm/ADT/ArrayRef.h
new file mode 100644
index 0000000000..bbadf5469c
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/ArrayRef.h
@@ -0,0 +1,614 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- ArrayRef.h - Array Reference Wrapper ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ARRAYREF_H
+#define LLVM_ADT_ARRAYREF_H
+
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Compiler.h"
+#include <algorithm>
+#include <array>
+#include <cassert>
+#include <cstddef>
+#include <initializer_list>
+#include <iterator>
+#include <memory>
+#include <type_traits>
+#include <vector>
+
+namespace llvm {
+
+ /// ArrayRef - Represent a constant reference to an array (0 or more elements
+ /// consecutively in memory), i.e. a start pointer and a length. It allows
+ /// various APIs to take consecutive elements easily and conveniently.
+ ///
+ /// This class does not own the underlying data, it is expected to be used in
+ /// situations where the data resides in some other buffer, whose lifetime
+ /// extends past that of the ArrayRef. For this reason, it is not in general
+ /// safe to store an ArrayRef.
+ ///
+ /// This is intended to be trivially copyable, so it should be passed by
+ /// value.
+ template<typename T>
+ class LLVM_GSL_POINTER LLVM_NODISCARD ArrayRef {
+ public:
+ using value_type = T;
+ using pointer = value_type *;
+ using const_pointer = const value_type *;
+ using reference = value_type &;
+ using const_reference = const value_type &;
+ using iterator = const_pointer;
+ using const_iterator = const_pointer;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
+
+ private:
+ /// The start of the array, in an external buffer.
+ const T *Data = nullptr;
+
+ /// The number of elements.
+ size_type Length = 0;
+
+ public:
+ /// @name Constructors
+ /// @{
+
+ /// Construct an empty ArrayRef.
+ /*implicit*/ ArrayRef() = default;
+
+ /// Construct an empty ArrayRef from None.
+ /*implicit*/ ArrayRef(NoneType) {}
+
+ /// Construct an ArrayRef from a single element.
+ /*implicit*/ ArrayRef(const T &OneElt)
+ : Data(&OneElt), Length(1) {}
+
+ /// Construct an ArrayRef from a pointer and length.
+ /*implicit*/ ArrayRef(const T *data, size_t length)
+ : Data(data), Length(length) {}
+
+ /// Construct an ArrayRef from a range.
+ ArrayRef(const T *begin, const T *end)
+ : Data(begin), Length(end - begin) {}
+
+ /// Construct an ArrayRef from a SmallVector. This is templated in order to
+ /// avoid instantiating SmallVectorTemplateCommon<T> whenever we
+ /// copy-construct an ArrayRef.
+ template<typename U>
+ /*implicit*/ ArrayRef(const SmallVectorTemplateCommon<T, U> &Vec)
+ : Data(Vec.data()), Length(Vec.size()) {
+ }
+
+ /// Construct an ArrayRef from a std::vector.
+ template<typename A>
+ /*implicit*/ ArrayRef(const std::vector<T, A> &Vec)
+ : Data(Vec.data()), Length(Vec.size()) {}
+
+ /// Construct an ArrayRef from a std::array
+ template <size_t N>
+ /*implicit*/ constexpr ArrayRef(const std::array<T, N> &Arr)
+ : Data(Arr.data()), Length(N) {}
+
+ /// Construct an ArrayRef from a C array.
+ template <size_t N>
+ /*implicit*/ constexpr ArrayRef(const T (&Arr)[N]) : Data(Arr), Length(N) {}
+
+ /// Construct an ArrayRef from a std::initializer_list.
+#if LLVM_GNUC_PREREQ(9, 0, 0)
+// Disable gcc's warning in this constructor as it generates an enormous amount
+// of messages. Anyone using ArrayRef should already be aware of the fact that
+// it does not do lifetime extension.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Winit-list-lifetime"
+#endif
+ /*implicit*/ ArrayRef(const std::initializer_list<T> &Vec)
+ : Data(Vec.begin() == Vec.end() ? (T*)nullptr : Vec.begin()),
+ Length(Vec.size()) {}
+#if LLVM_GNUC_PREREQ(9, 0, 0)
+#pragma GCC diagnostic pop
+#endif
+
+ /// Construct an ArrayRef<const T*> from ArrayRef<T*>. This uses SFINAE to
+ /// ensure that only ArrayRefs of pointers can be converted.
+ template <typename U>
+ ArrayRef(const ArrayRef<U *> &A,
+ std::enable_if_t<std::is_convertible<U *const *, T const *>::value>
+ * = nullptr)
+ : Data(A.data()), Length(A.size()) {}
+
+ /// Construct an ArrayRef<const T*> from a SmallVector<T*>. This is
+ /// templated in order to avoid instantiating SmallVectorTemplateCommon<T>
+ /// whenever we copy-construct an ArrayRef.
+ template <typename U, typename DummyT>
+ /*implicit*/ ArrayRef(
+ const SmallVectorTemplateCommon<U *, DummyT> &Vec,
+ std::enable_if_t<std::is_convertible<U *const *, T const *>::value> * =
+ nullptr)
+ : Data(Vec.data()), Length(Vec.size()) {}
+
+ /// Construct an ArrayRef<const T*> from std::vector<T*>. This uses SFINAE
+ /// to ensure that only vectors of pointers can be converted.
+ template <typename U, typename A>
+ ArrayRef(const std::vector<U *, A> &Vec,
+ std::enable_if_t<std::is_convertible<U *const *, T const *>::value>
+ * = nullptr)
+ : Data(Vec.data()), Length(Vec.size()) {}
+
+ /// @}
+ /// @name Simple Operations
+ /// @{
+
+ iterator begin() const { return Data; }
+ iterator end() const { return Data + Length; }
+
+ reverse_iterator rbegin() const { return reverse_iterator(end()); }
+ reverse_iterator rend() const { return reverse_iterator(begin()); }
+
+ /// empty - Check if the array is empty.
+ bool empty() const { return Length == 0; }
+
+ const T *data() const { return Data; }
+
+ /// size - Get the array size.
+ size_t size() const { return Length; }
+
+ /// front - Get the first element.
+ const T &front() const {
+ assert(!empty());
+ return Data[0];
+ }
+
+ /// back - Get the last element.
+ const T &back() const {
+ assert(!empty());
+ return Data[Length-1];
+ }
+
+ // copy - Allocate copy in Allocator and return ArrayRef<T> to it.
+ template <typename Allocator> ArrayRef<T> copy(Allocator &A) {
+ T *Buff = A.template Allocate<T>(Length);
+ std::uninitialized_copy(begin(), end(), Buff);
+ return ArrayRef<T>(Buff, Length);
+ }
+
+ /// equals - Check for element-wise equality.
+ bool equals(ArrayRef RHS) const {
+ if (Length != RHS.Length)
+ return false;
+ return std::equal(begin(), end(), RHS.begin());
+ }
+
+ /// slice(n, m) - Chop off the first N elements of the array, and keep M
+ /// elements in the array.
+ ArrayRef<T> slice(size_t N, size_t M) const {
+ assert(N+M <= size() && "Invalid specifier");
+ return ArrayRef<T>(data()+N, M);
+ }
+
+ /// slice(n) - Chop off the first N elements of the array.
+ ArrayRef<T> slice(size_t N) const { return slice(N, size() - N); }
+
+ /// Drop the first \p N elements of the array.
+ ArrayRef<T> drop_front(size_t N = 1) const {
+ assert(size() >= N && "Dropping more elements than exist");
+ return slice(N, size() - N);
+ }
+
+ /// Drop the last \p N elements of the array.
+ ArrayRef<T> drop_back(size_t N = 1) const {
+ assert(size() >= N && "Dropping more elements than exist");
+ return slice(0, size() - N);
+ }
+
+ /// Return a copy of *this with the first N elements satisfying the
+ /// given predicate removed.
+ template <class PredicateT> ArrayRef<T> drop_while(PredicateT Pred) const {
+ return ArrayRef<T>(find_if_not(*this, Pred), end());
+ }
+
+ /// Return a copy of *this with the first N elements not satisfying
+ /// the given predicate removed.
+ template <class PredicateT> ArrayRef<T> drop_until(PredicateT Pred) const {
+ return ArrayRef<T>(find_if(*this, Pred), end());
+ }
+
+ /// Return a copy of *this with only the first \p N elements.
+ ArrayRef<T> take_front(size_t N = 1) const {
+ if (N >= size())
+ return *this;
+ return drop_back(size() - N);
+ }
+
+ /// Return a copy of *this with only the last \p N elements.
+ ArrayRef<T> take_back(size_t N = 1) const {
+ if (N >= size())
+ return *this;
+ return drop_front(size() - N);
+ }
+
+ /// Return the first N elements of this Array that satisfy the given
+ /// predicate.
+ template <class PredicateT> ArrayRef<T> take_while(PredicateT Pred) const {
+ return ArrayRef<T>(begin(), find_if_not(*this, Pred));
+ }
+
+ /// Return the first N elements of this Array that don't satisfy the
+ /// given predicate.
+ template <class PredicateT> ArrayRef<T> take_until(PredicateT Pred) const {
+ return ArrayRef<T>(begin(), find_if(*this, Pred));
+ }
+
+ /// @}
+ /// @name Operator Overloads
+ /// @{
+ const T &operator[](size_t Index) const {
+ assert(Index < Length && "Invalid index!");
+ return Data[Index];
+ }
+
+ /// Disallow accidental assignment from a temporary.
+ ///
+ /// The declaration here is extra complicated so that "arrayRef = {}"
+ /// continues to select the move assignment operator.
+ template <typename U>
+ std::enable_if_t<std::is_same<U, T>::value, ArrayRef<T>> &
+ operator=(U &&Temporary) = delete;
+
+ /// Disallow accidental assignment from a temporary.
+ ///
+ /// The declaration here is extra complicated so that "arrayRef = {}"
+ /// continues to select the move assignment operator.
+ template <typename U>
+ std::enable_if_t<std::is_same<U, T>::value, ArrayRef<T>> &
+ operator=(std::initializer_list<U>) = delete;
+
+ /// @}
+ /// @name Expensive Operations
+ /// @{
+ std::vector<T> vec() const {
+ return std::vector<T>(Data, Data+Length);
+ }
+
+ /// @}
+ /// @name Conversion operators
+ /// @{
+ operator std::vector<T>() const {
+ return std::vector<T>(Data, Data+Length);
+ }
+
+ /// @}
+ };
+
+ /// MutableArrayRef - Represent a mutable reference to an array (0 or more
+ /// elements consecutively in memory), i.e. a start pointer and a length. It
+ /// allows various APIs to take and modify consecutive elements easily and
+ /// conveniently.
+ ///
+ /// This class does not own the underlying data, it is expected to be used in
+ /// situations where the data resides in some other buffer, whose lifetime
+ /// extends past that of the MutableArrayRef. For this reason, it is not in
+ /// general safe to store a MutableArrayRef.
+ ///
+ /// This is intended to be trivially copyable, so it should be passed by
+ /// value.
+ template<typename T>
+ class LLVM_NODISCARD MutableArrayRef : public ArrayRef<T> {
+ public:
+ using value_type = T;
+ using pointer = value_type *;
+ using const_pointer = const value_type *;
+ using reference = value_type &;
+ using const_reference = const value_type &;
+ using iterator = pointer;
+ using const_iterator = const_pointer;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
+
+ /// Construct an empty MutableArrayRef.
+ /*implicit*/ MutableArrayRef() = default;
+
+ /// Construct an empty MutableArrayRef from None.
+ /*implicit*/ MutableArrayRef(NoneType) : ArrayRef<T>() {}
+
+ /// Construct a MutableArrayRef from a single element.
+ /*implicit*/ MutableArrayRef(T &OneElt) : ArrayRef<T>(OneElt) {}
+
+ /// Construct a MutableArrayRef from a pointer and length.
+ /*implicit*/ MutableArrayRef(T *data, size_t length)
+ : ArrayRef<T>(data, length) {}
+
+ /// Construct a MutableArrayRef from a range.
+ MutableArrayRef(T *begin, T *end) : ArrayRef<T>(begin, end) {}
+
+ /// Construct a MutableArrayRef from a SmallVector.
+ /*implicit*/ MutableArrayRef(SmallVectorImpl<T> &Vec)
+ : ArrayRef<T>(Vec) {}
+
+ /// Construct a MutableArrayRef from a std::vector.
+ /*implicit*/ MutableArrayRef(std::vector<T> &Vec)
+ : ArrayRef<T>(Vec) {}
+
+ /// Construct a MutableArrayRef from a std::array
+ template <size_t N>
+ /*implicit*/ constexpr MutableArrayRef(std::array<T, N> &Arr)
+ : ArrayRef<T>(Arr) {}
+
+ /// Construct a MutableArrayRef from a C array.
+ template <size_t N>
+ /*implicit*/ constexpr MutableArrayRef(T (&Arr)[N]) : ArrayRef<T>(Arr) {}
+
+ T *data() const { return const_cast<T*>(ArrayRef<T>::data()); }
+
+ iterator begin() const { return data(); }
+ iterator end() const { return data() + this->size(); }
+
+ reverse_iterator rbegin() const { return reverse_iterator(end()); }
+ reverse_iterator rend() const { return reverse_iterator(begin()); }
+
+ /// front - Get the first element.
+ T &front() const {
+ assert(!this->empty());
+ return data()[0];
+ }
+
+ /// back - Get the last element.
+ T &back() const {
+ assert(!this->empty());
+ return data()[this->size()-1];
+ }
+
+ /// slice(n, m) - Chop off the first N elements of the array, and keep M
+ /// elements in the array.
+ MutableArrayRef<T> slice(size_t N, size_t M) const {
+ assert(N + M <= this->size() && "Invalid specifier");
+ return MutableArrayRef<T>(this->data() + N, M);
+ }
+
+ /// slice(n) - Chop off the first N elements of the array.
+ MutableArrayRef<T> slice(size_t N) const {
+ return slice(N, this->size() - N);
+ }
+
+ /// Drop the first \p N elements of the array.
+ MutableArrayRef<T> drop_front(size_t N = 1) const {
+ assert(this->size() >= N && "Dropping more elements than exist");
+ return slice(N, this->size() - N);
+ }
+
+ MutableArrayRef<T> drop_back(size_t N = 1) const {
+ assert(this->size() >= N && "Dropping more elements than exist");
+ return slice(0, this->size() - N);
+ }
+
+ /// Return a copy of *this with the first N elements satisfying the
+ /// given predicate removed.
+ template <class PredicateT>
+ MutableArrayRef<T> drop_while(PredicateT Pred) const {
+ return MutableArrayRef<T>(find_if_not(*this, Pred), end());
+ }
+
+ /// Return a copy of *this with the first N elements not satisfying
+ /// the given predicate removed.
+ template <class PredicateT>
+ MutableArrayRef<T> drop_until(PredicateT Pred) const {
+ return MutableArrayRef<T>(find_if(*this, Pred), end());
+ }
+
+ /// Return a copy of *this with only the first \p N elements.
+ MutableArrayRef<T> take_front(size_t N = 1) const {
+ if (N >= this->size())
+ return *this;
+ return drop_back(this->size() - N);
+ }
+
+ /// Return a copy of *this with only the last \p N elements.
+ MutableArrayRef<T> take_back(size_t N = 1) const {
+ if (N >= this->size())
+ return *this;
+ return drop_front(this->size() - N);
+ }
+
+ /// Return the first N elements of this Array that satisfy the given
+ /// predicate.
+ template <class PredicateT>
+ MutableArrayRef<T> take_while(PredicateT Pred) const {
+ return MutableArrayRef<T>(begin(), find_if_not(*this, Pred));
+ }
+
+ /// Return the first N elements of this Array that don't satisfy the
+ /// given predicate.
+ template <class PredicateT>
+ MutableArrayRef<T> take_until(PredicateT Pred) const {
+ return MutableArrayRef<T>(begin(), find_if(*this, Pred));
+ }
+
+ /// @}
+ /// @name Operator Overloads
+ /// @{
+ T &operator[](size_t Index) const {
+ assert(Index < this->size() && "Invalid index!");
+ return data()[Index];
+ }
+ };
+
+ /// This is a MutableArrayRef that owns its array.
+ template <typename T> class OwningArrayRef : public MutableArrayRef<T> {
+ public:
+ OwningArrayRef() = default;
+ OwningArrayRef(size_t Size) : MutableArrayRef<T>(new T[Size], Size) {}
+
+ OwningArrayRef(ArrayRef<T> Data)
+ : MutableArrayRef<T>(new T[Data.size()], Data.size()) {
+ std::copy(Data.begin(), Data.end(), this->begin());
+ }
+
+ OwningArrayRef(OwningArrayRef &&Other) { *this = std::move(Other); }
+
+ OwningArrayRef &operator=(OwningArrayRef &&Other) {
+ delete[] this->data();
+ this->MutableArrayRef<T>::operator=(Other);
+ Other.MutableArrayRef<T>::operator=(MutableArrayRef<T>());
+ return *this;
+ }
+
+ ~OwningArrayRef() { delete[] this->data(); }
+ };
+
+ /// @name ArrayRef Convenience constructors
+ /// @{
+
+ /// Construct an ArrayRef from a single element.
+ template<typename T>
+ ArrayRef<T> makeArrayRef(const T &OneElt) {
+ return OneElt;
+ }
+
+ /// Construct an ArrayRef from a pointer and length.
+ template<typename T>
+ ArrayRef<T> makeArrayRef(const T *data, size_t length) {
+ return ArrayRef<T>(data, length);
+ }
+
+ /// Construct an ArrayRef from a range.
+ template<typename T>
+ ArrayRef<T> makeArrayRef(const T *begin, const T *end) {
+ return ArrayRef<T>(begin, end);
+ }
+
+ /// Construct an ArrayRef from a SmallVector.
+ template <typename T>
+ ArrayRef<T> makeArrayRef(const SmallVectorImpl<T> &Vec) {
+ return Vec;
+ }
+
+ /// Construct an ArrayRef from a SmallVector.
+ template <typename T, unsigned N>
+ ArrayRef<T> makeArrayRef(const SmallVector<T, N> &Vec) {
+ return Vec;
+ }
+
+ /// Construct an ArrayRef from a std::vector.
+ template<typename T>
+ ArrayRef<T> makeArrayRef(const std::vector<T> &Vec) {
+ return Vec;
+ }
+
+ /// Construct an ArrayRef from a std::array.
+ template <typename T, std::size_t N>
+ ArrayRef<T> makeArrayRef(const std::array<T, N> &Arr) {
+ return Arr;
+ }
+
+ /// Construct an ArrayRef from an ArrayRef (no-op) (const)
+ template <typename T> ArrayRef<T> makeArrayRef(const ArrayRef<T> &Vec) {
+ return Vec;
+ }
+
+ /// Construct an ArrayRef from an ArrayRef (no-op)
+ template <typename T> ArrayRef<T> &makeArrayRef(ArrayRef<T> &Vec) {
+ return Vec;
+ }
+
+ /// Construct an ArrayRef from a C array.
+ template<typename T, size_t N>
+ ArrayRef<T> makeArrayRef(const T (&Arr)[N]) {
+ return ArrayRef<T>(Arr);
+ }
+
+ /// Construct a MutableArrayRef from a single element.
+ template<typename T>
+ MutableArrayRef<T> makeMutableArrayRef(T &OneElt) {
+ return OneElt;
+ }
+
+ /// Construct a MutableArrayRef from a pointer and length.
+ template<typename T>
+ MutableArrayRef<T> makeMutableArrayRef(T *data, size_t length) {
+ return MutableArrayRef<T>(data, length);
+ }
+
+ /// @}
+ /// @name ArrayRef Comparison Operators
+ /// @{
+
+ template<typename T>
+ inline bool operator==(ArrayRef<T> LHS, ArrayRef<T> RHS) {
+ return LHS.equals(RHS);
+ }
+
+ template <typename T>
+ inline bool operator==(SmallVectorImpl<T> &LHS, ArrayRef<T> RHS) {
+ return ArrayRef<T>(LHS).equals(RHS);
+ }
+
+ template <typename T>
+ inline bool operator!=(ArrayRef<T> LHS, ArrayRef<T> RHS) {
+ return !(LHS == RHS);
+ }
+
+ template <typename T>
+ inline bool operator!=(SmallVectorImpl<T> &LHS, ArrayRef<T> RHS) {
+ return !(LHS == RHS);
+ }
+
+ /// @}
+
+ template <typename T> hash_code hash_value(ArrayRef<T> S) {
+ return hash_combine_range(S.begin(), S.end());
+ }
+
+ // Provide DenseMapInfo for ArrayRefs.
+ template <typename T> struct DenseMapInfo<ArrayRef<T>, void> {
+ static inline ArrayRef<T> getEmptyKey() {
+ return ArrayRef<T>(
+ reinterpret_cast<const T *>(~static_cast<uintptr_t>(0)), size_t(0));
+ }
+
+ static inline ArrayRef<T> getTombstoneKey() {
+ return ArrayRef<T>(
+ reinterpret_cast<const T *>(~static_cast<uintptr_t>(1)), size_t(0));
+ }
+
+ static unsigned getHashValue(ArrayRef<T> Val) {
+ assert(Val.data() != getEmptyKey().data() &&
+ "Cannot hash the empty key!");
+ assert(Val.data() != getTombstoneKey().data() &&
+ "Cannot hash the tombstone key!");
+ return (unsigned)(hash_value(Val));
+ }
+
+ static bool isEqual(ArrayRef<T> LHS, ArrayRef<T> RHS) {
+ if (RHS.data() == getEmptyKey().data())
+ return LHS.data() == getEmptyKey().data();
+ if (RHS.data() == getTombstoneKey().data())
+ return LHS.data() == getTombstoneKey().data();
+ return LHS == RHS;
+ }
+ };
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ARRAYREF_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/BitVector.h b/contrib/libs/llvm14/include/llvm/ADT/BitVector.h
new file mode 100644
index 0000000000..936a2b2073
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/BitVector.h
@@ -0,0 +1,867 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/BitVector.h - Bit vectors -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the BitVector class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_BITVECTOR_H
+#define LLVM_ADT_BITVECTOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/MathExtras.h"
+#include <algorithm>
+#include <cassert>
+#include <climits>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <utility>
+
+namespace llvm {
+
+/// ForwardIterator for the bits that are set.
+/// Iterators get invalidated when resize / reserve is called.
+template <typename BitVectorT> class const_set_bits_iterator_impl {
+ const BitVectorT &Parent;
+ int Current = 0;
+
+ void advance() {
+ assert(Current != -1 && "Trying to advance past end.");
+ Current = Parent.find_next(Current);
+ }
+
+public:
+ const_set_bits_iterator_impl(const BitVectorT &Parent, int Current)
+ : Parent(Parent), Current(Current) {}
+ explicit const_set_bits_iterator_impl(const BitVectorT &Parent)
+ : const_set_bits_iterator_impl(Parent, Parent.find_first()) {}
+ const_set_bits_iterator_impl(const const_set_bits_iterator_impl &) = default;
+
+ const_set_bits_iterator_impl operator++(int) {
+ auto Prev = *this;
+ advance();
+ return Prev;
+ }
+
+ const_set_bits_iterator_impl &operator++() {
+ advance();
+ return *this;
+ }
+
+ unsigned operator*() const { return Current; }
+
+ bool operator==(const const_set_bits_iterator_impl &Other) const {
+ assert(&Parent == &Other.Parent &&
+ "Comparing iterators from different BitVectors");
+ return Current == Other.Current;
+ }
+
+ bool operator!=(const const_set_bits_iterator_impl &Other) const {
+ assert(&Parent == &Other.Parent &&
+ "Comparing iterators from different BitVectors");
+ return Current != Other.Current;
+ }
+};
+
+class BitVector {
+ typedef uintptr_t BitWord;
+
+ enum { BITWORD_SIZE = (unsigned)sizeof(BitWord) * CHAR_BIT };
+
+ static_assert(BITWORD_SIZE == 64 || BITWORD_SIZE == 32,
+ "Unsupported word size");
+
+ using Storage = SmallVector<BitWord>;
+
+ Storage Bits; // Actual bits.
+ unsigned Size; // Size of bitvector in bits.
+
+public:
+ using size_type = unsigned;
+
+ // Encapsulation of a single bit.
+ class reference {
+
+ BitWord *WordRef;
+ unsigned BitPos;
+
+ public:
+ reference(BitVector &b, unsigned Idx) {
+ WordRef = &b.Bits[Idx / BITWORD_SIZE];
+ BitPos = Idx % BITWORD_SIZE;
+ }
+
+ reference() = delete;
+ reference(const reference&) = default;
+
+ reference &operator=(reference t) {
+ *this = bool(t);
+ return *this;
+ }
+
+ reference& operator=(bool t) {
+ if (t)
+ *WordRef |= BitWord(1) << BitPos;
+ else
+ *WordRef &= ~(BitWord(1) << BitPos);
+ return *this;
+ }
+
+ operator bool() const {
+ return ((*WordRef) & (BitWord(1) << BitPos)) != 0;
+ }
+ };
+
+ typedef const_set_bits_iterator_impl<BitVector> const_set_bits_iterator;
+ typedef const_set_bits_iterator set_iterator;
+
+ const_set_bits_iterator set_bits_begin() const {
+ return const_set_bits_iterator(*this);
+ }
+ const_set_bits_iterator set_bits_end() const {
+ return const_set_bits_iterator(*this, -1);
+ }
+ iterator_range<const_set_bits_iterator> set_bits() const {
+ return make_range(set_bits_begin(), set_bits_end());
+ }
+
+ /// BitVector default ctor - Creates an empty bitvector.
+ BitVector() : Size(0) {}
+
+ /// BitVector ctor - Creates a bitvector of specified number of bits. All
+ /// bits are initialized to the specified value.
+ explicit BitVector(unsigned s, bool t = false)
+ : Bits(NumBitWords(s), 0 - (BitWord)t), Size(s) {
+ if (t)
+ clear_unused_bits();
+ }
+
+ /// empty - Tests whether there are no bits in this bitvector.
+ bool empty() const { return Size == 0; }
+
+ /// size - Returns the number of bits in this bitvector.
+ size_type size() const { return Size; }
+
+ /// count - Returns the number of bits which are set.
+ size_type count() const {
+ unsigned NumBits = 0;
+ for (auto Bit : Bits)
+ NumBits += countPopulation(Bit);
+ return NumBits;
+ }
+
+ /// any - Returns true if any bit is set.
+ bool any() const {
+ return any_of(Bits, [](BitWord Bit) { return Bit != 0; });
+ }
+
+ /// all - Returns true if all bits are set.
+ bool all() const {
+ for (unsigned i = 0; i < Size / BITWORD_SIZE; ++i)
+ if (Bits[i] != ~BitWord(0))
+ return false;
+
+ // If bits remain check that they are ones. The unused bits are always zero.
+ if (unsigned Remainder = Size % BITWORD_SIZE)
+ return Bits[Size / BITWORD_SIZE] == (BitWord(1) << Remainder) - 1;
+
+ return true;
+ }
+
+ /// none - Returns true if none of the bits are set.
+ bool none() const {
+ return !any();
+ }
+
+ /// find_first_in - Returns the index of the first set / unset bit,
+ /// depending on \p Set, in the range [Begin, End).
+ /// Returns -1 if all bits in the range are unset / set.
+ int find_first_in(unsigned Begin, unsigned End, bool Set = true) const {
+ assert(Begin <= End && End <= Size);
+ if (Begin == End)
+ return -1;
+
+ unsigned FirstWord = Begin / BITWORD_SIZE;
+ unsigned LastWord = (End - 1) / BITWORD_SIZE;
+
+ // Check subsequent words.
+ // The code below is based on search for the first _set_ bit. If
+ // we're searching for the first _unset_, we just take the
+ // complement of each word before we use it and apply
+ // the same method.
+ for (unsigned i = FirstWord; i <= LastWord; ++i) {
+ BitWord Copy = Bits[i];
+ if (!Set)
+ Copy = ~Copy;
+
+ if (i == FirstWord) {
+ unsigned FirstBit = Begin % BITWORD_SIZE;
+ Copy &= maskTrailingZeros<BitWord>(FirstBit);
+ }
+
+ if (i == LastWord) {
+ unsigned LastBit = (End - 1) % BITWORD_SIZE;
+ Copy &= maskTrailingOnes<BitWord>(LastBit + 1);
+ }
+ if (Copy != 0)
+ return i * BITWORD_SIZE + countTrailingZeros(Copy);
+ }
+ return -1;
+ }
+
+ /// find_last_in - Returns the index of the last set bit in the range
+ /// [Begin, End). Returns -1 if all bits in the range are unset.
+ int find_last_in(unsigned Begin, unsigned End) const {
+ assert(Begin <= End && End <= Size);
+ if (Begin == End)
+ return -1;
+
+ unsigned LastWord = (End - 1) / BITWORD_SIZE;
+ unsigned FirstWord = Begin / BITWORD_SIZE;
+
+ for (unsigned i = LastWord + 1; i >= FirstWord + 1; --i) {
+ unsigned CurrentWord = i - 1;
+
+ BitWord Copy = Bits[CurrentWord];
+ if (CurrentWord == LastWord) {
+ unsigned LastBit = (End - 1) % BITWORD_SIZE;
+ Copy &= maskTrailingOnes<BitWord>(LastBit + 1);
+ }
+
+ if (CurrentWord == FirstWord) {
+ unsigned FirstBit = Begin % BITWORD_SIZE;
+ Copy &= maskTrailingZeros<BitWord>(FirstBit);
+ }
+
+ if (Copy != 0)
+ return (CurrentWord + 1) * BITWORD_SIZE - countLeadingZeros(Copy) - 1;
+ }
+
+ return -1;
+ }
+
+ /// find_first_unset_in - Returns the index of the first unset bit in the
+ /// range [Begin, End). Returns -1 if all bits in the range are set.
+ int find_first_unset_in(unsigned Begin, unsigned End) const {
+ return find_first_in(Begin, End, /* Set = */ false);
+ }
+
+ /// find_last_unset_in - Returns the index of the last unset bit in the
+ /// range [Begin, End). Returns -1 if all bits in the range are set.
+ int find_last_unset_in(unsigned Begin, unsigned End) const {
+ assert(Begin <= End && End <= Size);
+ if (Begin == End)
+ return -1;
+
+ unsigned LastWord = (End - 1) / BITWORD_SIZE;
+ unsigned FirstWord = Begin / BITWORD_SIZE;
+
+ for (unsigned i = LastWord + 1; i >= FirstWord + 1; --i) {
+ unsigned CurrentWord = i - 1;
+
+ BitWord Copy = Bits[CurrentWord];
+ if (CurrentWord == LastWord) {
+ unsigned LastBit = (End - 1) % BITWORD_SIZE;
+ Copy |= maskTrailingZeros<BitWord>(LastBit + 1);
+ }
+
+ if (CurrentWord == FirstWord) {
+ unsigned FirstBit = Begin % BITWORD_SIZE;
+ Copy |= maskTrailingOnes<BitWord>(FirstBit);
+ }
+
+ if (Copy != ~BitWord(0)) {
+ unsigned Result =
+ (CurrentWord + 1) * BITWORD_SIZE - countLeadingOnes(Copy) - 1;
+ return Result < Size ? Result : -1;
+ }
+ }
+ return -1;
+ }
+
+ /// find_first - Returns the index of the first set bit, -1 if none
+ /// of the bits are set.
+ int find_first() const { return find_first_in(0, Size); }
+
+ /// find_last - Returns the index of the last set bit, -1 if none of the bits
+ /// are set.
+ int find_last() const { return find_last_in(0, Size); }
+
+ /// find_next - Returns the index of the next set bit following the
+ /// "Prev" bit. Returns -1 if the next set bit is not found.
+ int find_next(unsigned Prev) const { return find_first_in(Prev + 1, Size); }
+
+ /// find_prev - Returns the index of the first set bit that precedes the
+ /// the bit at \p PriorTo. Returns -1 if all previous bits are unset.
+ int find_prev(unsigned PriorTo) const { return find_last_in(0, PriorTo); }
+
+ /// find_first_unset - Returns the index of the first unset bit, -1 if all
+ /// of the bits are set.
+ int find_first_unset() const { return find_first_unset_in(0, Size); }
+
+ /// find_next_unset - Returns the index of the next unset bit following the
+ /// "Prev" bit. Returns -1 if all remaining bits are set.
+ int find_next_unset(unsigned Prev) const {
+ return find_first_unset_in(Prev + 1, Size);
+ }
+
+ /// find_last_unset - Returns the index of the last unset bit, -1 if all of
+ /// the bits are set.
+ int find_last_unset() const { return find_last_unset_in(0, Size); }
+
+ /// find_prev_unset - Returns the index of the first unset bit that precedes
+ /// the bit at \p PriorTo. Returns -1 if all previous bits are set.
+ int find_prev_unset(unsigned PriorTo) {
+ return find_last_unset_in(0, PriorTo);
+ }
+
+ /// clear - Removes all bits from the bitvector.
+ void clear() {
+ Size = 0;
+ Bits.clear();
+ }
+
+ /// resize - Grow or shrink the bitvector.
+ void resize(unsigned N, bool t = false) {
+ set_unused_bits(t);
+ Size = N;
+ Bits.resize(NumBitWords(N), 0 - BitWord(t));
+ clear_unused_bits();
+ }
+
+ void reserve(unsigned N) { Bits.reserve(NumBitWords(N)); }
+
+ // Set, reset, flip
+ BitVector &set() {
+ init_words(true);
+ clear_unused_bits();
+ return *this;
+ }
+
+ BitVector &set(unsigned Idx) {
+ assert(Idx < Size && "access in bound");
+ Bits[Idx / BITWORD_SIZE] |= BitWord(1) << (Idx % BITWORD_SIZE);
+ return *this;
+ }
+
+ /// set - Efficiently set a range of bits in [I, E)
+ BitVector &set(unsigned I, unsigned E) {
+ assert(I <= E && "Attempted to set backwards range!");
+ assert(E <= size() && "Attempted to set out-of-bounds range!");
+
+ if (I == E) return *this;
+
+ if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
+ BitWord EMask = BitWord(1) << (E % BITWORD_SIZE);
+ BitWord IMask = BitWord(1) << (I % BITWORD_SIZE);
+ BitWord Mask = EMask - IMask;
+ Bits[I / BITWORD_SIZE] |= Mask;
+ return *this;
+ }
+
+ BitWord PrefixMask = ~BitWord(0) << (I % BITWORD_SIZE);
+ Bits[I / BITWORD_SIZE] |= PrefixMask;
+ I = alignTo(I, BITWORD_SIZE);
+
+ for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
+ Bits[I / BITWORD_SIZE] = ~BitWord(0);
+
+ BitWord PostfixMask = (BitWord(1) << (E % BITWORD_SIZE)) - 1;
+ if (I < E)
+ Bits[I / BITWORD_SIZE] |= PostfixMask;
+
+ return *this;
+ }
+
+ BitVector &reset() {
+ init_words(false);
+ return *this;
+ }
+
+ BitVector &reset(unsigned Idx) {
+ Bits[Idx / BITWORD_SIZE] &= ~(BitWord(1) << (Idx % BITWORD_SIZE));
+ return *this;
+ }
+
+ /// reset - Efficiently reset a range of bits in [I, E)
+ BitVector &reset(unsigned I, unsigned E) {
+ assert(I <= E && "Attempted to reset backwards range!");
+ assert(E <= size() && "Attempted to reset out-of-bounds range!");
+
+ if (I == E) return *this;
+
+ if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
+ BitWord EMask = BitWord(1) << (E % BITWORD_SIZE);
+ BitWord IMask = BitWord(1) << (I % BITWORD_SIZE);
+ BitWord Mask = EMask - IMask;
+ Bits[I / BITWORD_SIZE] &= ~Mask;
+ return *this;
+ }
+
+ BitWord PrefixMask = ~BitWord(0) << (I % BITWORD_SIZE);
+ Bits[I / BITWORD_SIZE] &= ~PrefixMask;
+ I = alignTo(I, BITWORD_SIZE);
+
+ for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
+ Bits[I / BITWORD_SIZE] = BitWord(0);
+
+ BitWord PostfixMask = (BitWord(1) << (E % BITWORD_SIZE)) - 1;
+ if (I < E)
+ Bits[I / BITWORD_SIZE] &= ~PostfixMask;
+
+ return *this;
+ }
+
+ BitVector &flip() {
+ for (auto &Bit : Bits)
+ Bit = ~Bit;
+ clear_unused_bits();
+ return *this;
+ }
+
+ BitVector &flip(unsigned Idx) {
+ Bits[Idx / BITWORD_SIZE] ^= BitWord(1) << (Idx % BITWORD_SIZE);
+ return *this;
+ }
+
+ // Indexing.
+ reference operator[](unsigned Idx) {
+ assert (Idx < Size && "Out-of-bounds Bit access.");
+ return reference(*this, Idx);
+ }
+
+ bool operator[](unsigned Idx) const {
+ assert (Idx < Size && "Out-of-bounds Bit access.");
+ BitWord Mask = BitWord(1) << (Idx % BITWORD_SIZE);
+ return (Bits[Idx / BITWORD_SIZE] & Mask) != 0;
+ }
+
+ /// Return the last element in the vector.
+ bool back() const {
+ assert(!empty() && "Getting last element of empty vector.");
+ return (*this)[size() - 1];
+ }
+
+ bool test(unsigned Idx) const {
+ return (*this)[Idx];
+ }
+
+ // Push single bit to end of vector.
+ void push_back(bool Val) {
+ unsigned OldSize = Size;
+ unsigned NewSize = Size + 1;
+
+ // Resize, which will insert zeros.
+ // If we already fit then the unused bits will be already zero.
+ if (NewSize > getBitCapacity())
+ resize(NewSize, false);
+ else
+ Size = NewSize;
+
+ // If true, set single bit.
+ if (Val)
+ set(OldSize);
+ }
+
+ /// Pop one bit from the end of the vector.
+ void pop_back() {
+ assert(!empty() && "Empty vector has no element to pop.");
+ resize(size() - 1);
+ }
+
+ /// Test if any common bits are set.
+ bool anyCommon(const BitVector &RHS) const {
+ unsigned ThisWords = Bits.size();
+ unsigned RHSWords = RHS.Bits.size();
+ for (unsigned i = 0, e = std::min(ThisWords, RHSWords); i != e; ++i)
+ if (Bits[i] & RHS.Bits[i])
+ return true;
+ return false;
+ }
+
+ // Comparison operators.
+ bool operator==(const BitVector &RHS) const {
+ if (size() != RHS.size())
+ return false;
+ unsigned NumWords = Bits.size();
+ return std::equal(Bits.begin(), Bits.begin() + NumWords, RHS.Bits.begin());
+ }
+
+ bool operator!=(const BitVector &RHS) const { return !(*this == RHS); }
+
+ /// Intersection, union, disjoint union.
+ BitVector &operator&=(const BitVector &RHS) {
+ unsigned ThisWords = Bits.size();
+ unsigned RHSWords = RHS.Bits.size();
+ unsigned i;
+ for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
+ Bits[i] &= RHS.Bits[i];
+
+ // Any bits that are just in this bitvector become zero, because they aren't
+ // in the RHS bit vector. Any words only in RHS are ignored because they
+ // are already zero in the LHS.
+ for (; i != ThisWords; ++i)
+ Bits[i] = 0;
+
+ return *this;
+ }
+
+ /// reset - Reset bits that are set in RHS. Same as *this &= ~RHS.
+ BitVector &reset(const BitVector &RHS) {
+ unsigned ThisWords = Bits.size();
+ unsigned RHSWords = RHS.Bits.size();
+ for (unsigned i = 0; i != std::min(ThisWords, RHSWords); ++i)
+ Bits[i] &= ~RHS.Bits[i];
+ return *this;
+ }
+
+ /// test - Check if (This - RHS) is zero.
+ /// This is the same as reset(RHS) and any().
+ bool test(const BitVector &RHS) const {
+ unsigned ThisWords = Bits.size();
+ unsigned RHSWords = RHS.Bits.size();
+ unsigned i;
+ for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
+ if ((Bits[i] & ~RHS.Bits[i]) != 0)
+ return true;
+
+ for (; i != ThisWords ; ++i)
+ if (Bits[i] != 0)
+ return true;
+
+ return false;
+ }
+
+ template <class F, class... ArgTys>
+ static BitVector &apply(F &&f, BitVector &Out, BitVector const &Arg,
+ ArgTys const &...Args) {
+ assert(llvm::all_of(
+ std::initializer_list<unsigned>{Args.size()...},
+ [&Arg](auto const &BV) { return Arg.size() == BV; }) &&
+ "consistent sizes");
+ Out.resize(Arg.size());
+ for (size_type I = 0, E = Arg.Bits.size(); I != E; ++I)
+ Out.Bits[I] = f(Arg.Bits[I], Args.Bits[I]...);
+ Out.clear_unused_bits();
+ return Out;
+ }
+
+ BitVector &operator|=(const BitVector &RHS) {
+ if (size() < RHS.size())
+ resize(RHS.size());
+ for (size_type I = 0, E = RHS.Bits.size(); I != E; ++I)
+ Bits[I] |= RHS.Bits[I];
+ return *this;
+ }
+
+ BitVector &operator^=(const BitVector &RHS) {
+ if (size() < RHS.size())
+ resize(RHS.size());
+ for (size_type I = 0, E = RHS.Bits.size(); I != E; ++I)
+ Bits[I] ^= RHS.Bits[I];
+ return *this;
+ }
+
+ BitVector &operator>>=(unsigned N) {
+ assert(N <= Size);
+ if (LLVM_UNLIKELY(empty() || N == 0))
+ return *this;
+
+ unsigned NumWords = Bits.size();
+ assert(NumWords >= 1);
+
+ wordShr(N / BITWORD_SIZE);
+
+ unsigned BitDistance = N % BITWORD_SIZE;
+ if (BitDistance == 0)
+ return *this;
+
+ // When the shift size is not a multiple of the word size, then we have
+ // a tricky situation where each word in succession needs to extract some
+ // of the bits from the next word and or them into this word while
+ // shifting this word to make room for the new bits. This has to be done
+ // for every word in the array.
+
+ // Since we're shifting each word right, some bits will fall off the end
+ // of each word to the right, and empty space will be created on the left.
+ // The final word in the array will lose bits permanently, so starting at
+ // the beginning, work forwards shifting each word to the right, and
+ // OR'ing in the bits from the end of the next word to the beginning of
+ // the current word.
+
+ // Example:
+ // Starting with {0xAABBCCDD, 0xEEFF0011, 0x22334455} and shifting right
+ // by 4 bits.
+ // Step 1: Word[0] >>= 4 ; 0x0ABBCCDD
+ // Step 2: Word[0] |= 0x10000000 ; 0x1ABBCCDD
+ // Step 3: Word[1] >>= 4 ; 0x0EEFF001
+ // Step 4: Word[1] |= 0x50000000 ; 0x5EEFF001
+ // Step 5: Word[2] >>= 4 ; 0x02334455
+ // Result: { 0x1ABBCCDD, 0x5EEFF001, 0x02334455 }
+ const BitWord Mask = maskTrailingOnes<BitWord>(BitDistance);
+ const unsigned LSH = BITWORD_SIZE - BitDistance;
+
+ for (unsigned I = 0; I < NumWords - 1; ++I) {
+ Bits[I] >>= BitDistance;
+ Bits[I] |= (Bits[I + 1] & Mask) << LSH;
+ }
+
+ Bits[NumWords - 1] >>= BitDistance;
+
+ return *this;
+ }
+
+ BitVector &operator<<=(unsigned N) {
+ assert(N <= Size);
+ if (LLVM_UNLIKELY(empty() || N == 0))
+ return *this;
+
+ unsigned NumWords = Bits.size();
+ assert(NumWords >= 1);
+
+ wordShl(N / BITWORD_SIZE);
+
+ unsigned BitDistance = N % BITWORD_SIZE;
+ if (BitDistance == 0)
+ return *this;
+
+ // When the shift size is not a multiple of the word size, then we have
+ // a tricky situation where each word in succession needs to extract some
+ // of the bits from the previous word and or them into this word while
+ // shifting this word to make room for the new bits. This has to be done
+ // for every word in the array. This is similar to the algorithm outlined
+ // in operator>>=, but backwards.
+
+ // Since we're shifting each word left, some bits will fall off the end
+ // of each word to the left, and empty space will be created on the right.
+ // The first word in the array will lose bits permanently, so starting at
+ // the end, work backwards shifting each word to the left, and OR'ing
+ // in the bits from the end of the next word to the beginning of the
+ // current word.
+
+ // Example:
+ // Starting with {0xAABBCCDD, 0xEEFF0011, 0x22334455} and shifting left
+ // by 4 bits.
+ // Step 1: Word[2] <<= 4 ; 0x23344550
+ // Step 2: Word[2] |= 0x0000000E ; 0x2334455E
+ // Step 3: Word[1] <<= 4 ; 0xEFF00110
+ // Step 4: Word[1] |= 0x0000000A ; 0xEFF0011A
+ // Step 5: Word[0] <<= 4 ; 0xABBCCDD0
+ // Result: { 0xABBCCDD0, 0xEFF0011A, 0x2334455E }
+ const BitWord Mask = maskLeadingOnes<BitWord>(BitDistance);
+ const unsigned RSH = BITWORD_SIZE - BitDistance;
+
+ for (int I = NumWords - 1; I > 0; --I) {
+ Bits[I] <<= BitDistance;
+ Bits[I] |= (Bits[I - 1] & Mask) >> RSH;
+ }
+ Bits[0] <<= BitDistance;
+ clear_unused_bits();
+
+ return *this;
+ }
+
+ void swap(BitVector &RHS) {
+ std::swap(Bits, RHS.Bits);
+ std::swap(Size, RHS.Size);
+ }
+
+ void invalid() {
+ assert(!Size && Bits.empty());
+ Size = (unsigned)-1;
+ }
+ bool isInvalid() const { return Size == (unsigned)-1; }
+
+ ArrayRef<BitWord> getData() const { return {&Bits[0], Bits.size()}; }
+
+ //===--------------------------------------------------------------------===//
+ // Portable bit mask operations.
+ //===--------------------------------------------------------------------===//
+ //
+ // These methods all operate on arrays of uint32_t, each holding 32 bits. The
+ // fixed word size makes it easier to work with literal bit vector constants
+ // in portable code.
+ //
+ // The LSB in each word is the lowest numbered bit. The size of a portable
+ // bit mask is always a whole multiple of 32 bits. If no bit mask size is
+ // given, the bit mask is assumed to cover the entire BitVector.
+
+ /// setBitsInMask - Add '1' bits from Mask to this vector. Don't resize.
+ /// This computes "*this |= Mask".
+ void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ applyMask<true, false>(Mask, MaskWords);
+ }
+
+ /// clearBitsInMask - Clear any bits in this vector that are set in Mask.
+ /// Don't resize. This computes "*this &= ~Mask".
+ void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ applyMask<false, false>(Mask, MaskWords);
+ }
+
+ /// setBitsNotInMask - Add a bit to this vector for every '0' bit in Mask.
+ /// Don't resize. This computes "*this |= ~Mask".
+ void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ applyMask<true, true>(Mask, MaskWords);
+ }
+
+ /// clearBitsNotInMask - Clear a bit in this vector for every '0' bit in Mask.
+ /// Don't resize. This computes "*this &= Mask".
+ void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ applyMask<false, true>(Mask, MaskWords);
+ }
+
+private:
+ /// Perform a logical left shift of \p Count words by moving everything
+ /// \p Count words to the right in memory.
+ ///
+ /// While confusing, words are stored from least significant at Bits[0] to
+ /// most significant at Bits[NumWords-1]. A logical shift left, however,
+ /// moves the current least significant bit to a higher logical index, and
+ /// fills the previous least significant bits with 0. Thus, we actually
+ /// need to move the bytes of the memory to the right, not to the left.
+ /// Example:
+ /// Words = [0xBBBBAAAA, 0xDDDDFFFF, 0x00000000, 0xDDDD0000]
+ /// represents a BitVector where 0xBBBBAAAA contain the least significant
+ /// bits. So if we want to shift the BitVector left by 2 words, we need
+ /// to turn this into 0x00000000 0x00000000 0xBBBBAAAA 0xDDDDFFFF by using a
+ /// memmove which moves right, not left.
+ void wordShl(uint32_t Count) {
+ if (Count == 0)
+ return;
+
+ uint32_t NumWords = Bits.size();
+
+ // Since we always move Word-sized chunks of data with src and dest both
+ // aligned to a word-boundary, we don't need to worry about endianness
+ // here.
+ std::copy(Bits.begin(), Bits.begin() + NumWords - Count,
+ Bits.begin() + Count);
+ std::fill(Bits.begin(), Bits.begin() + Count, 0);
+ clear_unused_bits();
+ }
+
+ /// Perform a logical right shift of \p Count words by moving those
+ /// words to the left in memory. See wordShl for more information.
+ ///
+ void wordShr(uint32_t Count) {
+ if (Count == 0)
+ return;
+
+ uint32_t NumWords = Bits.size();
+
+ std::copy(Bits.begin() + Count, Bits.begin() + NumWords, Bits.begin());
+ std::fill(Bits.begin() + NumWords - Count, Bits.begin() + NumWords, 0);
+ }
+
+ int next_unset_in_word(int WordIndex, BitWord Word) const {
+ unsigned Result = WordIndex * BITWORD_SIZE + countTrailingOnes(Word);
+ return Result < size() ? Result : -1;
+ }
+
+ unsigned NumBitWords(unsigned S) const {
+ return (S + BITWORD_SIZE-1) / BITWORD_SIZE;
+ }
+
+ // Set the unused bits in the high words.
+ void set_unused_bits(bool t = true) {
+ // Then set any stray high bits of the last used word.
+ if (unsigned ExtraBits = Size % BITWORD_SIZE) {
+ BitWord ExtraBitMask = ~BitWord(0) << ExtraBits;
+ if (t)
+ Bits.back() |= ExtraBitMask;
+ else
+ Bits.back() &= ~ExtraBitMask;
+ }
+ }
+
+ // Clear the unused bits in the high words.
+ void clear_unused_bits() {
+ set_unused_bits(false);
+ }
+
+ void init_words(bool t) {
+ std::fill(Bits.begin(), Bits.end(), 0 - (BitWord)t);
+ }
+
+ template<bool AddBits, bool InvertMask>
+ void applyMask(const uint32_t *Mask, unsigned MaskWords) {
+ static_assert(BITWORD_SIZE % 32 == 0, "Unsupported BitWord size.");
+ MaskWords = std::min(MaskWords, (size() + 31) / 32);
+ const unsigned Scale = BITWORD_SIZE / 32;
+ unsigned i;
+ for (i = 0; MaskWords >= Scale; ++i, MaskWords -= Scale) {
+ BitWord BW = Bits[i];
+ // This inner loop should unroll completely when BITWORD_SIZE > 32.
+ for (unsigned b = 0; b != BITWORD_SIZE; b += 32) {
+ uint32_t M = *Mask++;
+ if (InvertMask) M = ~M;
+ if (AddBits) BW |= BitWord(M) << b;
+ else BW &= ~(BitWord(M) << b);
+ }
+ Bits[i] = BW;
+ }
+ for (unsigned b = 0; MaskWords; b += 32, --MaskWords) {
+ uint32_t M = *Mask++;
+ if (InvertMask) M = ~M;
+ if (AddBits) Bits[i] |= BitWord(M) << b;
+ else Bits[i] &= ~(BitWord(M) << b);
+ }
+ if (AddBits)
+ clear_unused_bits();
+ }
+
+public:
+ /// Return the size (in bytes) of the bit vector.
+ size_type getMemorySize() const { return Bits.size() * sizeof(BitWord); }
+ size_type getBitCapacity() const { return Bits.size() * BITWORD_SIZE; }
+};
+
+inline BitVector::size_type capacity_in_bytes(const BitVector &X) {
+ return X.getMemorySize();
+}
+
+template <> struct DenseMapInfo<BitVector> {
+ static inline BitVector getEmptyKey() { return {}; }
+ static inline BitVector getTombstoneKey() {
+ BitVector V;
+ V.invalid();
+ return V;
+ }
+ static unsigned getHashValue(const BitVector &V) {
+ return DenseMapInfo<std::pair<BitVector::size_type, ArrayRef<uintptr_t>>>::
+ getHashValue(std::make_pair(V.size(), V.getData()));
+ }
+ static bool isEqual(const BitVector &LHS, const BitVector &RHS) {
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return LHS.isInvalid() == RHS.isInvalid();
+ return LHS == RHS;
+ }
+};
+} // end namespace llvm
+
+namespace std {
+ /// Implement std::swap in terms of BitVector swap.
+inline void swap(llvm::BitVector &LHS, llvm::BitVector &RHS) { LHS.swap(RHS); }
+} // end namespace std
+
+#endif // LLVM_ADT_BITVECTOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/Bitfields.h b/contrib/libs/llvm14/include/llvm/ADT/Bitfields.h
new file mode 100644
index 0000000000..2759b15a2a
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/Bitfields.h
@@ -0,0 +1,300 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- llvm/ADT/Bitfield.h - Get and Set bits in an integer ---*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements methods to test, set and extract typed bits from packed
+/// unsigned integers.
+///
+/// Why not C++ bitfields?
+/// ----------------------
+/// C++ bitfields do not offer control over the bit layout nor consistent
+/// behavior when it comes to out of range values.
+/// For instance, the layout is implementation defined and adjacent bits may be
+/// packed together but are not required to. This is problematic when storage is
+/// sparse and data must be stored in a particular integer type.
+///
+/// The methods provided in this file ensure precise control over the
+/// layout/storage as well as protection against out of range values.
+///
+/// Usage example
+/// -------------
+/// \code{.cpp}
+/// uint8_t Storage = 0;
+///
+/// // Store and retrieve a single bit as bool.
+/// using Bool = Bitfield::Element<bool, 0, 1>;
+/// Bitfield::set<Bool>(Storage, true);
+/// EXPECT_EQ(Storage, 0b00000001);
+/// // ^
+/// EXPECT_EQ(Bitfield::get<Bool>(Storage), true);
+///
+/// // Store and retrieve a 2 bit typed enum.
+/// // Note: enum underlying type must be unsigned.
+/// enum class SuitEnum : uint8_t { CLUBS, DIAMONDS, HEARTS, SPADES };
+/// // Note: enum maximum value needs to be passed in as last parameter.
+/// using Suit = Bitfield::Element<SuitEnum, 1, 2, SuitEnum::SPADES>;
+/// Bitfield::set<Suit>(Storage, SuitEnum::HEARTS);
+/// EXPECT_EQ(Storage, 0b00000101);
+/// // ^^
+/// EXPECT_EQ(Bitfield::get<Suit>(Storage), SuitEnum::HEARTS);
+///
+/// // Store and retrieve a 5 bit value as unsigned.
+/// using Value = Bitfield::Element<unsigned, 3, 5>;
+/// Bitfield::set<Value>(Storage, 10);
+/// EXPECT_EQ(Storage, 0b01010101);
+/// // ^^^^^
+/// EXPECT_EQ(Bitfield::get<Value>(Storage), 10U);
+///
+/// // Interpret the same 5 bit value as signed.
+/// using SignedValue = Bitfield::Element<int, 3, 5>;
+/// Bitfield::set<SignedValue>(Storage, -2);
+/// EXPECT_EQ(Storage, 0b11110101);
+/// // ^^^^^
+/// EXPECT_EQ(Bitfield::get<SignedValue>(Storage), -2);
+///
+/// // Ability to efficiently test if a field is non zero.
+/// EXPECT_TRUE(Bitfield::test<Value>(Storage));
+///
+/// // Alter Storage changes value.
+/// Storage = 0;
+/// EXPECT_EQ(Bitfield::get<Bool>(Storage), false);
+/// EXPECT_EQ(Bitfield::get<Suit>(Storage), SuitEnum::CLUBS);
+/// EXPECT_EQ(Bitfield::get<Value>(Storage), 0U);
+/// EXPECT_EQ(Bitfield::get<SignedValue>(Storage), 0);
+///
+/// Storage = 255;
+/// EXPECT_EQ(Bitfield::get<Bool>(Storage), true);
+/// EXPECT_EQ(Bitfield::get<Suit>(Storage), SuitEnum::SPADES);
+/// EXPECT_EQ(Bitfield::get<Value>(Storage), 31U);
+/// EXPECT_EQ(Bitfield::get<SignedValue>(Storage), -1);
+/// \endcode
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_BITFIELDS_H
+#define LLVM_ADT_BITFIELDS_H
+
+#include <cassert>
+#include <climits> // CHAR_BIT
+#include <cstddef> // size_t
+#include <cstdint> // uintXX_t
+#include <limits> // numeric_limits
+#include <type_traits>
+
+namespace llvm {
+
+namespace bitfields_details {
+
+/// A struct defining useful bit patterns for n-bits integer types.
+template <typename T, unsigned Bits> struct BitPatterns {
+ /// Bit patterns are forged using the equivalent `Unsigned` type because of
+ /// undefined operations over signed types (e.g. Bitwise shift operators).
+ /// Moreover same size casting from unsigned to signed is well defined but not
+ /// the other way around.
+ using Unsigned = typename std::make_unsigned<T>::type;
+ static_assert(sizeof(Unsigned) == sizeof(T), "Types must have same size");
+
+ static constexpr unsigned TypeBits = sizeof(Unsigned) * CHAR_BIT;
+ static_assert(TypeBits >= Bits, "n-bit must fit in T");
+
+ /// e.g. with TypeBits == 8 and Bits == 6.
+ static constexpr Unsigned AllZeros = Unsigned(0); // 00000000
+ static constexpr Unsigned AllOnes = ~Unsigned(0); // 11111111
+ static constexpr Unsigned Umin = AllZeros; // 00000000
+ static constexpr Unsigned Umax = AllOnes >> (TypeBits - Bits); // 00111111
+ static constexpr Unsigned SignBitMask = Unsigned(1) << (Bits - 1); // 00100000
+ static constexpr Unsigned Smax = Umax >> 1U; // 00011111
+ static constexpr Unsigned Smin = ~Smax; // 11100000
+ static constexpr Unsigned SignExtend = Unsigned(Smin << 1U); // 11000000
+};
+
+/// `Compressor` is used to manipulate the bits of a (possibly signed) integer
+/// type so it can be packed and unpacked into a `bits` sized integer,
+/// `Compressor` is specialized on signed-ness so no runtime cost is incurred.
+/// The `pack` method also checks that the passed in `UserValue` is valid.
+template <typename T, unsigned Bits, bool = std::is_unsigned<T>::value>
+struct Compressor {
+ static_assert(std::is_unsigned<T>::value, "T is unsigned");
+ using BP = BitPatterns<T, Bits>;
+
+ static T pack(T UserValue, T UserMaxValue) {
+ assert(UserValue <= UserMaxValue && "value is too big");
+ assert(UserValue <= BP::Umax && "value is too big");
+ return UserValue;
+ }
+
+ static T unpack(T StorageValue) { return StorageValue; }
+};
+
+template <typename T, unsigned Bits> struct Compressor<T, Bits, false> {
+ static_assert(std::is_signed<T>::value, "T is signed");
+ using BP = BitPatterns<T, Bits>;
+
+ static T pack(T UserValue, T UserMaxValue) {
+ assert(UserValue <= UserMaxValue && "value is too big");
+ assert(UserValue <= T(BP::Smax) && "value is too big");
+ assert(UserValue >= T(BP::Smin) && "value is too small");
+ if (UserValue < 0)
+ UserValue &= ~BP::SignExtend;
+ return UserValue;
+ }
+
+ static T unpack(T StorageValue) {
+ if (StorageValue >= T(BP::SignBitMask))
+ StorageValue |= BP::SignExtend;
+ return StorageValue;
+ }
+};
+
+/// Impl is where Bifield description and Storage are put together to interact
+/// with values.
+template <typename Bitfield, typename StorageType> struct Impl {
+ static_assert(std::is_unsigned<StorageType>::value,
+ "Storage must be unsigned");
+ using IntegerType = typename Bitfield::IntegerType;
+ using C = Compressor<IntegerType, Bitfield::Bits>;
+ using BP = BitPatterns<StorageType, Bitfield::Bits>;
+
+ static constexpr size_t StorageBits = sizeof(StorageType) * CHAR_BIT;
+ static_assert(Bitfield::FirstBit <= StorageBits, "Data must fit in mask");
+ static_assert(Bitfield::LastBit <= StorageBits, "Data must fit in mask");
+ static constexpr StorageType Mask = BP::Umax << Bitfield::Shift;
+
+ /// Checks `UserValue` is within bounds and packs it between `FirstBit` and
+ /// `LastBit` of `Packed` leaving the rest unchanged.
+ static void update(StorageType &Packed, IntegerType UserValue) {
+ const StorageType StorageValue = C::pack(UserValue, Bitfield::UserMaxValue);
+ Packed &= ~Mask;
+ Packed |= StorageValue << Bitfield::Shift;
+ }
+
+ /// Interprets bits between `FirstBit` and `LastBit` of `Packed` as
+ /// an`IntegerType`.
+ static IntegerType extract(StorageType Packed) {
+ const StorageType StorageValue = (Packed & Mask) >> Bitfield::Shift;
+ return C::unpack(StorageValue);
+ }
+
+ /// Interprets bits between `FirstBit` and `LastBit` of `Packed` as
+ /// an`IntegerType`.
+ static StorageType test(StorageType Packed) { return Packed & Mask; }
+};
+
+/// `Bitfield` deals with the following type:
+/// - unsigned enums
+/// - signed and unsigned integer
+/// - `bool`
+/// Internally though we only manipulate integer with well defined and
+/// consistent semantics, this excludes typed enums and `bool` that are replaced
+/// with their unsigned counterparts. The correct type is restored in the public
+/// API.
+template <typename T, bool = std::is_enum<T>::value>
+struct ResolveUnderlyingType {
+ using type = typename std::underlying_type<T>::type;
+};
+template <typename T> struct ResolveUnderlyingType<T, false> {
+ using type = T;
+};
+template <> struct ResolveUnderlyingType<bool, false> {
+ /// In case sizeof(bool) != 1, replace `void` by an additionnal
+ /// std::conditional.
+ using type = std::conditional<sizeof(bool) == 1, uint8_t, void>::type;
+};
+
+} // namespace bitfields_details
+
+/// Holds functions to get, set or test bitfields.
+struct Bitfield {
+ /// Describes an element of a Bitfield. This type is then used with the
+ /// Bitfield static member functions.
+ /// \tparam T The type of the field once in unpacked form.
+ /// \tparam Offset The position of the first bit.
+ /// \tparam Size The size of the field.
+ /// \tparam MaxValue For enums the maximum enum allowed.
+ template <typename T, unsigned Offset, unsigned Size,
+ T MaxValue = std::is_enum<T>::value
+ ? T(0) // coupled with static_assert below
+ : std::numeric_limits<T>::max()>
+ struct Element {
+ using Type = T;
+ using IntegerType =
+ typename bitfields_details::ResolveUnderlyingType<T>::type;
+ static constexpr unsigned Shift = Offset;
+ static constexpr unsigned Bits = Size;
+ static constexpr unsigned FirstBit = Offset;
+ static constexpr unsigned LastBit = Shift + Bits - 1;
+ static constexpr unsigned NextBit = Shift + Bits;
+
+ private:
+ template <typename, typename> friend struct bitfields_details::Impl;
+
+ static_assert(Bits > 0, "Bits must be non zero");
+ static constexpr size_t TypeBits = sizeof(IntegerType) * CHAR_BIT;
+ static_assert(Bits <= TypeBits, "Bits may not be greater than T size");
+ static_assert(!std::is_enum<T>::value || MaxValue != T(0),
+ "Enum Bitfields must provide a MaxValue");
+ static_assert(!std::is_enum<T>::value ||
+ std::is_unsigned<IntegerType>::value,
+ "Enum must be unsigned");
+ static_assert(std::is_integral<IntegerType>::value &&
+ std::numeric_limits<IntegerType>::is_integer,
+ "IntegerType must be an integer type");
+
+ static constexpr IntegerType UserMaxValue =
+ static_cast<IntegerType>(MaxValue);
+ };
+
+ /// Unpacks the field from the `Packed` value.
+ template <typename Bitfield, typename StorageType>
+ static typename Bitfield::Type get(StorageType Packed) {
+ using I = bitfields_details::Impl<Bitfield, StorageType>;
+ return static_cast<typename Bitfield::Type>(I::extract(Packed));
+ }
+
+ /// Return a non-zero value if the field is non-zero.
+ /// It is more efficient than `getField`.
+ template <typename Bitfield, typename StorageType>
+ static StorageType test(StorageType Packed) {
+ using I = bitfields_details::Impl<Bitfield, StorageType>;
+ return I::test(Packed);
+ }
+
+ /// Sets the typed value in the provided `Packed` value.
+ /// The method will asserts if the provided value is too big to fit in.
+ template <typename Bitfield, typename StorageType>
+ static void set(StorageType &Packed, typename Bitfield::Type Value) {
+ using I = bitfields_details::Impl<Bitfield, StorageType>;
+ I::update(Packed, static_cast<typename Bitfield::IntegerType>(Value));
+ }
+
+ /// Returns whether the two bitfields share common bits.
+ template <typename A, typename B> static constexpr bool isOverlapping() {
+ return A::LastBit >= B::FirstBit && B::LastBit >= A::FirstBit;
+ }
+
+ template <typename A> static constexpr bool areContiguous() { return true; }
+ template <typename A, typename B, typename... Others>
+ static constexpr bool areContiguous() {
+ return A::NextBit == B::FirstBit && areContiguous<B, Others...>();
+ }
+};
+
+} // namespace llvm
+
+#endif // LLVM_ADT_BITFIELDS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/BitmaskEnum.h b/contrib/libs/llvm14/include/llvm/ADT/BitmaskEnum.h
new file mode 100644
index 0000000000..0b210593da
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/BitmaskEnum.h
@@ -0,0 +1,164 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- llvm/ADT/BitmaskEnum.h ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_BITMASKENUM_H
+#define LLVM_ADT_BITMASKENUM_H
+
+#include <cassert>
+#include <type_traits>
+#include <utility>
+
+#include "llvm/Support/MathExtras.h"
+
+/// LLVM_MARK_AS_BITMASK_ENUM lets you opt in an individual enum type so you can
+/// perform bitwise operations on it without putting static_cast everywhere.
+///
+/// \code
+/// enum MyEnum {
+/// E1 = 1, E2 = 2, E3 = 4, E4 = 8,
+/// LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ E4)
+/// };
+///
+/// void Foo() {
+/// MyEnum A = (E1 | E2) & E3 ^ ~E4; // Look, ma: No static_cast!
+/// }
+/// \endcode
+///
+/// Normally when you do a bitwise operation on an enum value, you get back an
+/// instance of the underlying type (e.g. int). But using this macro, bitwise
+/// ops on your enum will return you back instances of the enum. This is
+/// particularly useful for enums which represent a combination of flags.
+///
+/// The parameter to LLVM_MARK_AS_BITMASK_ENUM should be the largest individual
+/// value in your enum.
+///
+/// All of the enum's values must be non-negative.
+#define LLVM_MARK_AS_BITMASK_ENUM(LargestValue) \
+ LLVM_BITMASK_LARGEST_ENUMERATOR = LargestValue
+
+/// LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE() pulls the operator overloads used
+/// by LLVM_MARK_AS_BITMASK_ENUM into the current namespace.
+///
+/// Suppose you have an enum foo::bar::MyEnum. Before using
+/// LLVM_MARK_AS_BITMASK_ENUM on MyEnum, you must put
+/// LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE() somewhere inside namespace foo or
+/// namespace foo::bar. This allows the relevant operator overloads to be found
+/// by ADL.
+///
+/// You don't need to use this macro in namespace llvm; it's done at the bottom
+/// of this file.
+#define LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE() \
+ using ::llvm::BitmaskEnumDetail::operator~; \
+ using ::llvm::BitmaskEnumDetail::operator|; \
+ using ::llvm::BitmaskEnumDetail::operator&; \
+ using ::llvm::BitmaskEnumDetail::operator^; \
+ using ::llvm::BitmaskEnumDetail::operator|=; \
+ using ::llvm::BitmaskEnumDetail::operator&=; \
+ /* Force a semicolon at the end of this macro. */ \
+ using ::llvm::BitmaskEnumDetail::operator^=
+
+namespace llvm {
+
+/// Traits class to determine whether an enum has a
+/// LLVM_BITMASK_LARGEST_ENUMERATOR enumerator.
+template <typename E, typename Enable = void>
+struct is_bitmask_enum : std::false_type {};
+
+template <typename E>
+struct is_bitmask_enum<
+ E, std::enable_if_t<sizeof(E::LLVM_BITMASK_LARGEST_ENUMERATOR) >= 0>>
+ : std::true_type {};
+namespace BitmaskEnumDetail {
+
+/// Get a bitmask with 1s in all places up to the high-order bit of E's largest
+/// value.
+template <typename E> std::underlying_type_t<E> Mask() {
+ // On overflow, NextPowerOf2 returns zero with the type uint64_t, so
+ // subtracting 1 gives us the mask with all bits set, like we want.
+ return NextPowerOf2(static_cast<std::underlying_type_t<E>>(
+ E::LLVM_BITMASK_LARGEST_ENUMERATOR)) -
+ 1;
+}
+
+/// Check that Val is in range for E, and return Val cast to E's underlying
+/// type.
+template <typename E> std::underlying_type_t<E> Underlying(E Val) {
+ auto U = static_cast<std::underlying_type_t<E>>(Val);
+ assert(U >= 0 && "Negative enum values are not allowed.");
+ assert(U <= Mask<E>() && "Enum value too large (or largest val too small?)");
+ return U;
+}
+
+constexpr unsigned bitWidth(uint64_t Value) {
+ return Value ? 1 + bitWidth(Value >> 1) : 0;
+}
+
+template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
+E operator~(E Val) {
+ return static_cast<E>(~Underlying(Val) & Mask<E>());
+}
+
+template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
+E operator|(E LHS, E RHS) {
+ return static_cast<E>(Underlying(LHS) | Underlying(RHS));
+}
+
+template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
+E operator&(E LHS, E RHS) {
+ return static_cast<E>(Underlying(LHS) & Underlying(RHS));
+}
+
+template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
+E operator^(E LHS, E RHS) {
+ return static_cast<E>(Underlying(LHS) ^ Underlying(RHS));
+}
+
+// |=, &=, and ^= return a reference to LHS, to match the behavior of the
+// operators on builtin types.
+
+template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
+E &operator|=(E &LHS, E RHS) {
+ LHS = LHS | RHS;
+ return LHS;
+}
+
+template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
+E &operator&=(E &LHS, E RHS) {
+ LHS = LHS & RHS;
+ return LHS;
+}
+
+template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
+E &operator^=(E &LHS, E RHS) {
+ LHS = LHS ^ RHS;
+ return LHS;
+}
+
+} // namespace BitmaskEnumDetail
+
+// Enable bitmask enums in namespace ::llvm and all nested namespaces.
+LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
+template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
+constexpr unsigned BitWidth = BitmaskEnumDetail::bitWidth(uint64_t{
+ static_cast<std::underlying_type_t<E>>(
+ E::LLVM_BITMASK_LARGEST_ENUMERATOR)});
+
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/BreadthFirstIterator.h b/contrib/libs/llvm14/include/llvm/ADT/BreadthFirstIterator.h
new file mode 100644
index 0000000000..a91823d845
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/BreadthFirstIterator.h
@@ -0,0 +1,175 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/BreadthFirstIterator.h - Breadth First iterator -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file builds on the ADT/GraphTraits.h file to build a generic breadth
+/// first graph iterator. This file exposes the following functions/types:
+///
+/// bf_begin/bf_end/bf_iterator
+/// * Normal breadth-first iteration - visit a graph level-by-level.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_BREADTHFIRSTITERATOR_H
+#define LLVM_ADT_BREADTHFIRSTITERATOR_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/iterator_range.h"
+#include <iterator>
+#include <queue>
+#include <utility>
+
+namespace llvm {
+
+// bf_iterator_storage - A private class which is used to figure out where to
+// store the visited set. We only provide a non-external variant for now.
+template <class SetType> class bf_iterator_storage {
+public:
+ SetType Visited;
+};
+
+// The visited state for the iteration is a simple set.
+template <typename NodeRef, unsigned SmallSize = 8>
+using bf_iterator_default_set = SmallPtrSet<NodeRef, SmallSize>;
+
+// Generic Breadth first search iterator.
+template <class GraphT,
+ class SetType =
+ bf_iterator_default_set<typename GraphTraits<GraphT>::NodeRef>,
+ class GT = GraphTraits<GraphT>>
+class bf_iterator : public bf_iterator_storage<SetType> {
+public:
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = typename GT::NodeRef;
+ using difference_type = std::ptrdiff_t;
+ using pointer = value_type *;
+ using reference = value_type &;
+
+private:
+ using NodeRef = typename GT::NodeRef;
+ using ChildItTy = typename GT::ChildIteratorType;
+
+ // First element is the node reference, second is the next child to visit.
+ using QueueElement = std::pair<NodeRef, Optional<ChildItTy>>;
+
+ // Visit queue - used to maintain BFS ordering.
+ // Optional<> because we need markers for levels.
+ std::queue<Optional<QueueElement>> VisitQueue;
+
+ // Current level.
+ unsigned Level = 0;
+
+ inline bf_iterator(NodeRef Node) {
+ this->Visited.insert(Node);
+ Level = 0;
+
+ // Also, insert a dummy node as marker.
+ VisitQueue.push(QueueElement(Node, None));
+ VisitQueue.push(None);
+ }
+
+ inline bf_iterator() = default;
+
+ inline void toNext() {
+ Optional<QueueElement> Head = VisitQueue.front();
+ QueueElement H = Head.getValue();
+ NodeRef Node = H.first;
+ Optional<ChildItTy> &ChildIt = H.second;
+
+ if (!ChildIt)
+ ChildIt.emplace(GT::child_begin(Node));
+ while (*ChildIt != GT::child_end(Node)) {
+ NodeRef Next = *(*ChildIt)++;
+
+ // Already visited?
+ if (this->Visited.insert(Next).second)
+ VisitQueue.push(QueueElement(Next, None));
+ }
+ VisitQueue.pop();
+
+ // Go to the next element skipping markers if needed.
+ if (!VisitQueue.empty()) {
+ Head = VisitQueue.front();
+ if (Head != None)
+ return;
+ Level += 1;
+ VisitQueue.pop();
+
+ // Don't push another marker if this is the last
+ // element.
+ if (!VisitQueue.empty())
+ VisitQueue.push(None);
+ }
+ }
+
+public:
+ // Provide static begin and end methods as our public "constructors"
+ static bf_iterator begin(const GraphT &G) {
+ return bf_iterator(GT::getEntryNode(G));
+ }
+
+ static bf_iterator end(const GraphT &G) { return bf_iterator(); }
+
+ bool operator==(const bf_iterator &RHS) const {
+ return VisitQueue == RHS.VisitQueue;
+ }
+
+ bool operator!=(const bf_iterator &RHS) const { return !(*this == RHS); }
+
+ const NodeRef &operator*() const { return VisitQueue.front()->first; }
+
+ // This is a nonstandard operator-> that dereferences the pointer an extra
+ // time so that you can actually call methods on the node, because the
+ // contained type is a pointer.
+ NodeRef operator->() const { return **this; }
+
+ bf_iterator &operator++() { // Pre-increment
+ toNext();
+ return *this;
+ }
+
+ bf_iterator operator++(int) { // Post-increment
+ bf_iterator ItCopy = *this;
+ ++*this;
+ return ItCopy;
+ }
+
+ unsigned getLevel() const { return Level; }
+};
+
+// Provide global constructors that automatically figure out correct types.
+template <class T> bf_iterator<T> bf_begin(const T &G) {
+ return bf_iterator<T>::begin(G);
+}
+
+template <class T> bf_iterator<T> bf_end(const T &G) {
+ return bf_iterator<T>::end(G);
+}
+
+// Provide an accessor method to use them in range-based patterns.
+template <class T> iterator_range<bf_iterator<T>> breadth_first(const T &G) {
+ return make_range(bf_begin(G), bf_end(G));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_BREADTHFIRSTITERATOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/CachedHashString.h b/contrib/libs/llvm14/include/llvm/ADT/CachedHashString.h
new file mode 100644
index 0000000000..63703d413e
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/CachedHashString.h
@@ -0,0 +1,195 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/CachedHashString.h - Prehashed string/StringRef -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines CachedHashString and CachedHashStringRef. These are
+/// owning and not-owning string types that store their hash in addition to
+/// their string data.
+///
+/// Unlike std::string, CachedHashString can be used in DenseSet/DenseMap
+/// (because, unlike std::string, CachedHashString lets us have empty and
+/// tombstone values).
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_CACHEDHASHSTRING_H
+#define LLVM_ADT_CACHEDHASHSTRING_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+/// A container which contains a StringRef plus a precomputed hash.
+class CachedHashStringRef {
+ const char *P;
+ uint32_t Size;
+ uint32_t Hash;
+
+public:
+ // Explicit because hashing a string isn't free.
+ explicit CachedHashStringRef(StringRef S)
+ : CachedHashStringRef(S, DenseMapInfo<StringRef>::getHashValue(S)) {}
+
+ CachedHashStringRef(StringRef S, uint32_t Hash)
+ : P(S.data()), Size(S.size()), Hash(Hash) {
+ assert(S.size() <= std::numeric_limits<uint32_t>::max());
+ }
+
+ StringRef val() const { return StringRef(P, Size); }
+ const char *data() const { return P; }
+ uint32_t size() const { return Size; }
+ uint32_t hash() const { return Hash; }
+};
+
+template <> struct DenseMapInfo<CachedHashStringRef> {
+ static CachedHashStringRef getEmptyKey() {
+ return CachedHashStringRef(DenseMapInfo<StringRef>::getEmptyKey(), 0);
+ }
+ static CachedHashStringRef getTombstoneKey() {
+ return CachedHashStringRef(DenseMapInfo<StringRef>::getTombstoneKey(), 1);
+ }
+ static unsigned getHashValue(const CachedHashStringRef &S) {
+ assert(!isEqual(S, getEmptyKey()) && "Cannot hash the empty key!");
+ assert(!isEqual(S, getTombstoneKey()) && "Cannot hash the tombstone key!");
+ return S.hash();
+ }
+ static bool isEqual(const CachedHashStringRef &LHS,
+ const CachedHashStringRef &RHS) {
+ return LHS.hash() == RHS.hash() &&
+ DenseMapInfo<StringRef>::isEqual(LHS.val(), RHS.val());
+ }
+};
+
+/// A container which contains a string, which it owns, plus a precomputed hash.
+///
+/// We do not null-terminate the string.
+class CachedHashString {
+ friend struct DenseMapInfo<CachedHashString>;
+
+ char *P;
+ uint32_t Size;
+ uint32_t Hash;
+
+ static char *getEmptyKeyPtr() { return DenseMapInfo<char *>::getEmptyKey(); }
+ static char *getTombstoneKeyPtr() {
+ return DenseMapInfo<char *>::getTombstoneKey();
+ }
+
+ bool isEmptyOrTombstone() const {
+ return P == getEmptyKeyPtr() || P == getTombstoneKeyPtr();
+ }
+
+ struct ConstructEmptyOrTombstoneTy {};
+
+ CachedHashString(ConstructEmptyOrTombstoneTy, char *EmptyOrTombstonePtr)
+ : P(EmptyOrTombstonePtr), Size(0), Hash(0) {
+ assert(isEmptyOrTombstone());
+ }
+
+ // TODO: Use small-string optimization to avoid allocating.
+
+public:
+ explicit CachedHashString(const char *S) : CachedHashString(StringRef(S)) {}
+
+ // Explicit because copying and hashing a string isn't free.
+ explicit CachedHashString(StringRef S)
+ : CachedHashString(S, DenseMapInfo<StringRef>::getHashValue(S)) {}
+
+ CachedHashString(StringRef S, uint32_t Hash)
+ : P(new char[S.size()]), Size(S.size()), Hash(Hash) {
+ memcpy(P, S.data(), S.size());
+ }
+
+ // Ideally this class would not be copyable. But SetVector requires copyable
+ // keys, and we want this to be usable there.
+ CachedHashString(const CachedHashString &Other)
+ : Size(Other.Size), Hash(Other.Hash) {
+ if (Other.isEmptyOrTombstone()) {
+ P = Other.P;
+ } else {
+ P = new char[Size];
+ memcpy(P, Other.P, Size);
+ }
+ }
+
+ CachedHashString &operator=(CachedHashString Other) {
+ swap(*this, Other);
+ return *this;
+ }
+
+ CachedHashString(CachedHashString &&Other) noexcept
+ : P(Other.P), Size(Other.Size), Hash(Other.Hash) {
+ Other.P = getEmptyKeyPtr();
+ }
+
+ ~CachedHashString() {
+ if (!isEmptyOrTombstone())
+ delete[] P;
+ }
+
+ StringRef val() const { return StringRef(P, Size); }
+ uint32_t size() const { return Size; }
+ uint32_t hash() const { return Hash; }
+
+ operator StringRef() const { return val(); }
+ operator CachedHashStringRef() const {
+ return CachedHashStringRef(val(), Hash);
+ }
+
+ friend void swap(CachedHashString &LHS, CachedHashString &RHS) {
+ using std::swap;
+ swap(LHS.P, RHS.P);
+ swap(LHS.Size, RHS.Size);
+ swap(LHS.Hash, RHS.Hash);
+ }
+};
+
+template <> struct DenseMapInfo<CachedHashString> {
+ static CachedHashString getEmptyKey() {
+ return CachedHashString(CachedHashString::ConstructEmptyOrTombstoneTy(),
+ CachedHashString::getEmptyKeyPtr());
+ }
+ static CachedHashString getTombstoneKey() {
+ return CachedHashString(CachedHashString::ConstructEmptyOrTombstoneTy(),
+ CachedHashString::getTombstoneKeyPtr());
+ }
+ static unsigned getHashValue(const CachedHashString &S) {
+ assert(!isEqual(S, getEmptyKey()) && "Cannot hash the empty key!");
+ assert(!isEqual(S, getTombstoneKey()) && "Cannot hash the tombstone key!");
+ return S.hash();
+ }
+ static bool isEqual(const CachedHashString &LHS,
+ const CachedHashString &RHS) {
+ if (LHS.hash() != RHS.hash())
+ return false;
+ if (LHS.P == CachedHashString::getEmptyKeyPtr())
+ return RHS.P == CachedHashString::getEmptyKeyPtr();
+ if (LHS.P == CachedHashString::getTombstoneKeyPtr())
+ return RHS.P == CachedHashString::getTombstoneKeyPtr();
+
+ // This is safe because if RHS.P is the empty or tombstone key, it will have
+ // length 0, so we'll never dereference its pointer.
+ return LHS.val() == RHS.val();
+ }
+};
+
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/CoalescingBitVector.h b/contrib/libs/llvm14/include/llvm/ADT/CoalescingBitVector.h
new file mode 100644
index 0000000000..a5777c6ef7
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/CoalescingBitVector.h
@@ -0,0 +1,462 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/CoalescingBitVector.h - A coalescing bitvector --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// A bitvector that uses an IntervalMap to coalesce adjacent elements
+/// into intervals.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_COALESCINGBITVECTOR_H
+#define LLVM_ADT_COALESCINGBITVECTOR_H
+
+#include "llvm/ADT/IntervalMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <initializer_list>
+
+namespace llvm {
+
+/// A bitvector that, under the hood, relies on an IntervalMap to coalesce
+/// elements into intervals. Good for representing sets which predominantly
+/// contain contiguous ranges. Bad for representing sets with lots of gaps
+/// between elements.
+///
+/// Compared to SparseBitVector, CoalescingBitVector offers more predictable
+/// performance for non-sequential find() operations.
+///
+/// \tparam IndexT - The type of the index into the bitvector.
+template <typename IndexT> class CoalescingBitVector {
+ static_assert(std::is_unsigned<IndexT>::value,
+ "Index must be an unsigned integer.");
+
+ using ThisT = CoalescingBitVector<IndexT>;
+
+ /// An interval map for closed integer ranges. The mapped values are unused.
+ using MapT = IntervalMap<IndexT, char>;
+
+ using UnderlyingIterator = typename MapT::const_iterator;
+
+ using IntervalT = std::pair<IndexT, IndexT>;
+
+public:
+ using Allocator = typename MapT::Allocator;
+
+ /// Construct by passing in a CoalescingBitVector<IndexT>::Allocator
+ /// reference.
+ CoalescingBitVector(Allocator &Alloc)
+ : Alloc(&Alloc), Intervals(Alloc) {}
+
+ /// \name Copy/move constructors and assignment operators.
+ /// @{
+
+ CoalescingBitVector(const ThisT &Other)
+ : Alloc(Other.Alloc), Intervals(*Other.Alloc) {
+ set(Other);
+ }
+
+ ThisT &operator=(const ThisT &Other) {
+ clear();
+ set(Other);
+ return *this;
+ }
+
+ CoalescingBitVector(ThisT &&Other) = delete;
+ ThisT &operator=(ThisT &&Other) = delete;
+
+ /// @}
+
+ /// Clear all the bits.
+ void clear() { Intervals.clear(); }
+
+ /// Check whether no bits are set.
+ bool empty() const { return Intervals.empty(); }
+
+ /// Count the number of set bits.
+ unsigned count() const {
+ unsigned Bits = 0;
+ for (auto It = Intervals.begin(), End = Intervals.end(); It != End; ++It)
+ Bits += 1 + It.stop() - It.start();
+ return Bits;
+ }
+
+ /// Set the bit at \p Index.
+ ///
+ /// This method does /not/ support setting a bit that has already been set,
+ /// for efficiency reasons. If possible, restructure your code to not set the
+ /// same bit multiple times, or use \ref test_and_set.
+ void set(IndexT Index) {
+ assert(!test(Index) && "Setting already-set bits not supported/efficient, "
+ "IntervalMap will assert");
+ insert(Index, Index);
+ }
+
+ /// Set the bits set in \p Other.
+ ///
+ /// This method does /not/ support setting already-set bits, see \ref set
+ /// for the rationale. For a safe set union operation, use \ref operator|=.
+ void set(const ThisT &Other) {
+ for (auto It = Other.Intervals.begin(), End = Other.Intervals.end();
+ It != End; ++It)
+ insert(It.start(), It.stop());
+ }
+
+ /// Set the bits at \p Indices. Used for testing, primarily.
+ void set(std::initializer_list<IndexT> Indices) {
+ for (IndexT Index : Indices)
+ set(Index);
+ }
+
+ /// Check whether the bit at \p Index is set.
+ bool test(IndexT Index) const {
+ const auto It = Intervals.find(Index);
+ if (It == Intervals.end())
+ return false;
+ assert(It.stop() >= Index && "Interval must end after Index");
+ return It.start() <= Index;
+ }
+
+ /// Set the bit at \p Index. Supports setting an already-set bit.
+ void test_and_set(IndexT Index) {
+ if (!test(Index))
+ set(Index);
+ }
+
+ /// Reset the bit at \p Index. Supports resetting an already-unset bit.
+ void reset(IndexT Index) {
+ auto It = Intervals.find(Index);
+ if (It == Intervals.end())
+ return;
+
+ // Split the interval containing Index into up to two parts: one from
+ // [Start, Index-1] and another from [Index+1, Stop]. If Index is equal to
+ // either Start or Stop, we create one new interval. If Index is equal to
+ // both Start and Stop, we simply erase the existing interval.
+ IndexT Start = It.start();
+ if (Index < Start)
+ // The index was not set.
+ return;
+ IndexT Stop = It.stop();
+ assert(Index <= Stop && "Wrong interval for index");
+ It.erase();
+ if (Start < Index)
+ insert(Start, Index - 1);
+ if (Index < Stop)
+ insert(Index + 1, Stop);
+ }
+
+ /// Set union. If \p RHS is guaranteed to not overlap with this, \ref set may
+ /// be a faster alternative.
+ void operator|=(const ThisT &RHS) {
+ // Get the overlaps between the two interval maps.
+ SmallVector<IntervalT, 8> Overlaps;
+ getOverlaps(RHS, Overlaps);
+
+ // Insert the non-overlapping parts of all the intervals from RHS.
+ for (auto It = RHS.Intervals.begin(), End = RHS.Intervals.end();
+ It != End; ++It) {
+ IndexT Start = It.start();
+ IndexT Stop = It.stop();
+ SmallVector<IntervalT, 8> NonOverlappingParts;
+ getNonOverlappingParts(Start, Stop, Overlaps, NonOverlappingParts);
+ for (IntervalT AdditivePortion : NonOverlappingParts)
+ insert(AdditivePortion.first, AdditivePortion.second);
+ }
+ }
+
+ /// Set intersection.
+ void operator&=(const ThisT &RHS) {
+ // Get the overlaps between the two interval maps (i.e. the intersection).
+ SmallVector<IntervalT, 8> Overlaps;
+ getOverlaps(RHS, Overlaps);
+ // Rebuild the interval map, including only the overlaps.
+ clear();
+ for (IntervalT Overlap : Overlaps)
+ insert(Overlap.first, Overlap.second);
+ }
+
+ /// Reset all bits present in \p Other.
+ void intersectWithComplement(const ThisT &Other) {
+ SmallVector<IntervalT, 8> Overlaps;
+ if (!getOverlaps(Other, Overlaps)) {
+ // If there is no overlap with Other, the intersection is empty.
+ return;
+ }
+
+ // Delete the overlapping intervals. Split up intervals that only partially
+ // intersect an overlap.
+ for (IntervalT Overlap : Overlaps) {
+ IndexT OlapStart, OlapStop;
+ std::tie(OlapStart, OlapStop) = Overlap;
+
+ auto It = Intervals.find(OlapStart);
+ IndexT CurrStart = It.start();
+ IndexT CurrStop = It.stop();
+ assert(CurrStart <= OlapStart && OlapStop <= CurrStop &&
+ "Expected some intersection!");
+
+ // Split the overlap interval into up to two parts: one from [CurrStart,
+ // OlapStart-1] and another from [OlapStop+1, CurrStop]. If OlapStart is
+ // equal to CurrStart, the first split interval is unnecessary. Ditto for
+ // when OlapStop is equal to CurrStop, we omit the second split interval.
+ It.erase();
+ if (CurrStart < OlapStart)
+ insert(CurrStart, OlapStart - 1);
+ if (OlapStop < CurrStop)
+ insert(OlapStop + 1, CurrStop);
+ }
+ }
+
+ bool operator==(const ThisT &RHS) const {
+ // We cannot just use std::equal because it checks the dereferenced values
+ // of an iterator pair for equality, not the iterators themselves. In our
+ // case that results in comparison of the (unused) IntervalMap values.
+ auto ItL = Intervals.begin();
+ auto ItR = RHS.Intervals.begin();
+ while (ItL != Intervals.end() && ItR != RHS.Intervals.end() &&
+ ItL.start() == ItR.start() && ItL.stop() == ItR.stop()) {
+ ++ItL;
+ ++ItR;
+ }
+ return ItL == Intervals.end() && ItR == RHS.Intervals.end();
+ }
+
+ bool operator!=(const ThisT &RHS) const { return !operator==(RHS); }
+
+ class const_iterator {
+ friend class CoalescingBitVector;
+
+ public:
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = IndexT;
+ using difference_type = std::ptrdiff_t;
+ using pointer = value_type *;
+ using reference = value_type &;
+
+ private:
+ // For performance reasons, make the offset at the end different than the
+ // one used in \ref begin, to optimize the common `It == end()` pattern.
+ static constexpr unsigned kIteratorAtTheEndOffset = ~0u;
+
+ UnderlyingIterator MapIterator;
+ unsigned OffsetIntoMapIterator = 0;
+
+ // Querying the start/stop of an IntervalMap iterator can be very expensive.
+ // Cache these values for performance reasons.
+ IndexT CachedStart = IndexT();
+ IndexT CachedStop = IndexT();
+
+ void setToEnd() {
+ OffsetIntoMapIterator = kIteratorAtTheEndOffset;
+ CachedStart = IndexT();
+ CachedStop = IndexT();
+ }
+
+ /// MapIterator has just changed, reset the cached state to point to the
+ /// start of the new underlying iterator.
+ void resetCache() {
+ if (MapIterator.valid()) {
+ OffsetIntoMapIterator = 0;
+ CachedStart = MapIterator.start();
+ CachedStop = MapIterator.stop();
+ } else {
+ setToEnd();
+ }
+ }
+
+ /// Advance the iterator to \p Index, if it is contained within the current
+ /// interval. The public-facing method which supports advancing past the
+ /// current interval is \ref advanceToLowerBound.
+ void advanceTo(IndexT Index) {
+ assert(Index <= CachedStop && "Cannot advance to OOB index");
+ if (Index < CachedStart)
+ // We're already past this index.
+ return;
+ OffsetIntoMapIterator = Index - CachedStart;
+ }
+
+ const_iterator(UnderlyingIterator MapIt) : MapIterator(MapIt) {
+ resetCache();
+ }
+
+ public:
+ const_iterator() { setToEnd(); }
+
+ bool operator==(const const_iterator &RHS) const {
+ // Do /not/ compare MapIterator for equality, as this is very expensive.
+ // The cached start/stop values make that check unnecessary.
+ return std::tie(OffsetIntoMapIterator, CachedStart, CachedStop) ==
+ std::tie(RHS.OffsetIntoMapIterator, RHS.CachedStart,
+ RHS.CachedStop);
+ }
+
+ bool operator!=(const const_iterator &RHS) const {
+ return !operator==(RHS);
+ }
+
+ IndexT operator*() const { return CachedStart + OffsetIntoMapIterator; }
+
+ const_iterator &operator++() { // Pre-increment (++It).
+ if (CachedStart + OffsetIntoMapIterator < CachedStop) {
+ // Keep going within the current interval.
+ ++OffsetIntoMapIterator;
+ } else {
+ // We reached the end of the current interval: advance.
+ ++MapIterator;
+ resetCache();
+ }
+ return *this;
+ }
+
+ const_iterator operator++(int) { // Post-increment (It++).
+ const_iterator tmp = *this;
+ operator++();
+ return tmp;
+ }
+
+ /// Advance the iterator to the first set bit AT, OR AFTER, \p Index. If
+ /// no such set bit exists, advance to end(). This is like std::lower_bound.
+ /// This is useful if \p Index is close to the current iterator position.
+ /// However, unlike \ref find(), this has worst-case O(n) performance.
+ void advanceToLowerBound(IndexT Index) {
+ if (OffsetIntoMapIterator == kIteratorAtTheEndOffset)
+ return;
+
+ // Advance to the first interval containing (or past) Index, or to end().
+ while (Index > CachedStop) {
+ ++MapIterator;
+ resetCache();
+ if (OffsetIntoMapIterator == kIteratorAtTheEndOffset)
+ return;
+ }
+
+ advanceTo(Index);
+ }
+ };
+
+ const_iterator begin() const { return const_iterator(Intervals.begin()); }
+
+ const_iterator end() const { return const_iterator(); }
+
+ /// Return an iterator pointing to the first set bit AT, OR AFTER, \p Index.
+ /// If no such set bit exists, return end(). This is like std::lower_bound.
+ /// This has worst-case logarithmic performance (roughly O(log(gaps between
+ /// contiguous ranges))).
+ const_iterator find(IndexT Index) const {
+ auto UnderlyingIt = Intervals.find(Index);
+ if (UnderlyingIt == Intervals.end())
+ return end();
+ auto It = const_iterator(UnderlyingIt);
+ It.advanceTo(Index);
+ return It;
+ }
+
+ /// Return a range iterator which iterates over all of the set bits in the
+ /// half-open range [Start, End).
+ iterator_range<const_iterator> half_open_range(IndexT Start,
+ IndexT End) const {
+ assert(Start < End && "Not a valid range");
+ auto StartIt = find(Start);
+ if (StartIt == end() || *StartIt >= End)
+ return {end(), end()};
+ auto EndIt = StartIt;
+ EndIt.advanceToLowerBound(End);
+ return {StartIt, EndIt};
+ }
+
+ void print(raw_ostream &OS) const {
+ OS << "{";
+ for (auto It = Intervals.begin(), End = Intervals.end(); It != End;
+ ++It) {
+ OS << "[" << It.start();
+ if (It.start() != It.stop())
+ OS << ", " << It.stop();
+ OS << "]";
+ }
+ OS << "}";
+ }
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ LLVM_DUMP_METHOD void dump() const {
+ // LLDB swallows the first line of output after callling dump(). Add
+ // newlines before/after the braces to work around this.
+ dbgs() << "\n";
+ print(dbgs());
+ dbgs() << "\n";
+ }
+#endif
+
+private:
+ void insert(IndexT Start, IndexT End) { Intervals.insert(Start, End, 0); }
+
+ /// Record the overlaps between \p this and \p Other in \p Overlaps. Return
+ /// true if there is any overlap.
+ bool getOverlaps(const ThisT &Other,
+ SmallVectorImpl<IntervalT> &Overlaps) const {
+ for (IntervalMapOverlaps<MapT, MapT> I(Intervals, Other.Intervals);
+ I.valid(); ++I)
+ Overlaps.emplace_back(I.start(), I.stop());
+ assert(llvm::is_sorted(Overlaps,
+ [](IntervalT LHS, IntervalT RHS) {
+ return LHS.second < RHS.first;
+ }) &&
+ "Overlaps must be sorted");
+ return !Overlaps.empty();
+ }
+
+ /// Given the set of overlaps between this and some other bitvector, and an
+ /// interval [Start, Stop] from that bitvector, determine the portions of the
+ /// interval which do not overlap with this.
+ void getNonOverlappingParts(IndexT Start, IndexT Stop,
+ const SmallVectorImpl<IntervalT> &Overlaps,
+ SmallVectorImpl<IntervalT> &NonOverlappingParts) {
+ IndexT NextUncoveredBit = Start;
+ for (IntervalT Overlap : Overlaps) {
+ IndexT OlapStart, OlapStop;
+ std::tie(OlapStart, OlapStop) = Overlap;
+
+ // [Start;Stop] and [OlapStart;OlapStop] overlap iff OlapStart <= Stop
+ // and Start <= OlapStop.
+ bool DoesOverlap = OlapStart <= Stop && Start <= OlapStop;
+ if (!DoesOverlap)
+ continue;
+
+ // Cover the range [NextUncoveredBit, OlapStart). This puts the start of
+ // the next uncovered range at OlapStop+1.
+ if (NextUncoveredBit < OlapStart)
+ NonOverlappingParts.emplace_back(NextUncoveredBit, OlapStart - 1);
+ NextUncoveredBit = OlapStop + 1;
+ if (NextUncoveredBit > Stop)
+ break;
+ }
+ if (NextUncoveredBit <= Stop)
+ NonOverlappingParts.emplace_back(NextUncoveredBit, Stop);
+ }
+
+ Allocator *Alloc;
+ MapT Intervals;
+};
+
+} // namespace llvm
+
+#endif // LLVM_ADT_COALESCINGBITVECTOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/CombinationGenerator.h b/contrib/libs/llvm14/include/llvm/ADT/CombinationGenerator.h
new file mode 100644
index 0000000000..35bdc781f3
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/CombinationGenerator.h
@@ -0,0 +1,159 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- llvm/ADT/CombinationGenerator.h ------------------------*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Combination generator.
+///
+/// Example: given input {{0, 1}, {2}, {3, 4}} it will produce the following
+/// combinations: {0, 2, 3}, {0, 2, 4}, {1, 2, 3}, {1, 2, 4}.
+///
+/// It is useful to think of input as vector-of-vectors, where the
+/// outer vector is the variable space, and inner vector is choice space.
+/// The number of choices for each variable can be different.
+///
+/// As for implementation, it is useful to think of this as a weird number,
+/// where each digit (==variable) may have different base (==number of choices).
+/// Thus modelling of 'produce next combination' is exactly analogous to the
+/// incrementing of an number - increment lowest digit (pick next choice for the
+/// variable), and if it wrapped to the beginning then increment next digit.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_COMBINATIONGENERATOR_H
+#define LLVM_ADT_COMBINATIONGENERATOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include <cassert>
+#include <cstring>
+
+namespace llvm {
+
+template <typename choice_type, typename choices_storage_type,
+ int variable_smallsize>
+class CombinationGenerator {
+ template <typename T> struct WrappingIterator {
+ using value_type = T;
+
+ const ArrayRef<value_type> Range;
+ typename decltype(Range)::const_iterator Position;
+
+ // Rewind the tape, placing the position to again point at the beginning.
+ void rewind() { Position = Range.begin(); }
+
+ // Advance position forward, possibly wrapping to the beginning.
+ // Returns whether the wrap happened.
+ bool advance() {
+ ++Position;
+ bool Wrapped = Position == Range.end();
+ if (Wrapped)
+ rewind();
+ return Wrapped;
+ }
+
+ // Get the value at which we are currently pointing.
+ const value_type &operator*() const { return *Position; }
+
+ WrappingIterator(ArrayRef<value_type> Range_) : Range(Range_) {
+ assert(!Range.empty() && "The range must not be empty.");
+ rewind();
+ }
+ };
+
+ const ArrayRef<choices_storage_type> VariablesChoices;
+
+ void performGeneration(
+ const function_ref<bool(ArrayRef<choice_type>)> Callback) const {
+ SmallVector<WrappingIterator<choice_type>, variable_smallsize>
+ VariablesState;
+
+ // 'increment' of the the whole VariablesState is defined identically to the
+ // increment of a number: starting from the least significant element,
+ // increment it, and if it wrapped, then propagate that carry by also
+ // incrementing next (more significant) element.
+ auto IncrementState =
+ [](MutableArrayRef<WrappingIterator<choice_type>> VariablesState)
+ -> bool {
+ for (WrappingIterator<choice_type> &Variable :
+ llvm::reverse(VariablesState)) {
+ bool Wrapped = Variable.advance();
+ if (!Wrapped)
+ return false; // There you go, next combination is ready.
+ // We have carry - increment more significant variable next..
+ }
+ return true; // MSB variable wrapped, no more unique combinations.
+ };
+
+ // Initialize the per-variable state to refer to the possible choices for
+ // that variable.
+ VariablesState.reserve(VariablesChoices.size());
+ for (ArrayRef<choice_type> VC : VariablesChoices)
+ VariablesState.emplace_back(VC);
+
+ // Temporary buffer to store each combination before performing Callback.
+ SmallVector<choice_type, variable_smallsize> CurrentCombination;
+ CurrentCombination.resize(VariablesState.size());
+
+ while (true) {
+ // Gather the currently-selected variable choices into a vector.
+ for (auto I : llvm::zip(VariablesState, CurrentCombination))
+ std::get<1>(I) = *std::get<0>(I);
+ // And pass the new combination into callback, as intended.
+ if (/*Abort=*/Callback(CurrentCombination))
+ return;
+ // And tick the state to next combination, which will be unique.
+ if (IncrementState(VariablesState))
+ return; // All combinations produced.
+ }
+ };
+
+public:
+ CombinationGenerator(ArrayRef<choices_storage_type> VariablesChoices_)
+ : VariablesChoices(VariablesChoices_) {
+#ifndef NDEBUG
+ assert(!VariablesChoices.empty() && "There should be some variables.");
+ llvm::for_each(VariablesChoices, [](ArrayRef<choice_type> VariableChoices) {
+ assert(!VariableChoices.empty() &&
+ "There must always be some choice, at least a placeholder one.");
+ });
+#endif
+ }
+
+ // How many combinations can we produce, max?
+ // This is at most how many times the callback will be called.
+ size_t numCombinations() const {
+ size_t NumVariants = 1;
+ for (ArrayRef<choice_type> VariableChoices : VariablesChoices)
+ NumVariants *= VariableChoices.size();
+ assert(NumVariants >= 1 &&
+ "We should always end up producing at least one combination");
+ return NumVariants;
+ }
+
+ // Actually perform exhaustive combination generation.
+ // Each result will be passed into the callback.
+ void generate(const function_ref<bool(ArrayRef<choice_type>)> Callback) {
+ performGeneration(Callback);
+ }
+};
+
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/DAGDeltaAlgorithm.h b/contrib/libs/llvm14/include/llvm/ADT/DAGDeltaAlgorithm.h
new file mode 100644
index 0000000000..dc08d81408
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/DAGDeltaAlgorithm.h
@@ -0,0 +1,89 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- DAGDeltaAlgorithm.h - A DAG Minimization Algorithm ------*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DAGDELTAALGORITHM_H
+#define LLVM_ADT_DAGDELTAALGORITHM_H
+
+#include <set>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+/// DAGDeltaAlgorithm - Implements a "delta debugging" algorithm for minimizing
+/// directed acyclic graphs using a predicate function.
+///
+/// The result of the algorithm is a subset of the input change set which is
+/// guaranteed to satisfy the predicate, assuming that the input set did. For
+/// well formed predicates, the result set is guaranteed to be such that
+/// removing any single element not required by the dependencies on the other
+/// elements would falsify the predicate.
+///
+/// The DAG should be used to represent dependencies in the changes which are
+/// likely to hold across the predicate function. That is, for a particular
+/// changeset S and predicate P:
+///
+/// P(S) => P(S union pred(S))
+///
+/// The minimization algorithm uses this dependency information to attempt to
+/// eagerly prune large subsets of changes. As with \see DeltaAlgorithm, the DAG
+/// is not required to satisfy this property, but the algorithm will run
+/// substantially fewer tests with appropriate dependencies. \see DeltaAlgorithm
+/// for more information on the properties which the predicate function itself
+/// should satisfy.
+class DAGDeltaAlgorithm {
+ virtual void anchor();
+
+public:
+ using change_ty = unsigned;
+ using edge_ty = std::pair<change_ty, change_ty>;
+
+ // FIXME: Use a decent data structure.
+ using changeset_ty = std::set<change_ty>;
+ using changesetlist_ty = std::vector<changeset_ty>;
+
+public:
+ virtual ~DAGDeltaAlgorithm() = default;
+
+ /// Run - Minimize the DAG formed by the \p Changes vertices and the
+ /// \p Dependencies edges by executing \see ExecuteOneTest() on subsets of
+ /// changes and returning the smallest set which still satisfies the test
+ /// predicate and the input \p Dependencies.
+ ///
+ /// \param Changes The list of changes.
+ ///
+ /// \param Dependencies The list of dependencies amongst changes. For each
+ /// (x,y) in \p Dependencies, both x and y must be in \p Changes. The
+ /// minimization algorithm guarantees that for each tested changed set S,
+ /// \f$ x \in S \f$ implies \f$ y \in S \f$. It is an error to have cyclic
+ /// dependencies.
+ changeset_ty Run(const changeset_ty &Changes,
+ const std::vector<edge_ty> &Dependencies);
+
+ /// UpdatedSearchState - Callback used when the search state changes.
+ virtual void UpdatedSearchState(const changeset_ty &Changes,
+ const changesetlist_ty &Sets,
+ const changeset_ty &Required) {}
+
+ /// ExecuteOneTest - Execute a single test predicate on the change set \p S.
+ virtual bool ExecuteOneTest(const changeset_ty &S) = 0;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_DAGDELTAALGORITHM_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/DeltaAlgorithm.h b/contrib/libs/llvm14/include/llvm/ADT/DeltaAlgorithm.h
new file mode 100644
index 0000000000..670ca4d877
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/DeltaAlgorithm.h
@@ -0,0 +1,103 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- DeltaAlgorithm.h - A Set Minimization Algorithm ---------*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DELTAALGORITHM_H
+#define LLVM_ADT_DELTAALGORITHM_H
+
+#include <set>
+#include <vector>
+
+namespace llvm {
+
+/// DeltaAlgorithm - Implements the delta debugging algorithm (A. Zeller '99)
+/// for minimizing arbitrary sets using a predicate function.
+///
+/// The result of the algorithm is a subset of the input change set which is
+/// guaranteed to satisfy the predicate, assuming that the input set did. For
+/// well formed predicates, the result set is guaranteed to be such that
+/// removing any single element would falsify the predicate.
+///
+/// For best results the predicate function *should* (but need not) satisfy
+/// certain properties, in particular:
+/// (1) The predicate should return false on an empty set and true on the full
+/// set.
+/// (2) If the predicate returns true for a set of changes, it should return
+/// true for all supersets of that set.
+///
+/// It is not an error to provide a predicate that does not satisfy these
+/// requirements, and the algorithm will generally produce reasonable
+/// results. However, it may run substantially more tests than with a good
+/// predicate.
+class DeltaAlgorithm {
+public:
+ using change_ty = unsigned;
+ // FIXME: Use a decent data structure.
+ using changeset_ty = std::set<change_ty>;
+ using changesetlist_ty = std::vector<changeset_ty>;
+
+private:
+ /// Cache of failed test results. Successful test results are never cached
+ /// since we always reduce following a success.
+ std::set<changeset_ty> FailedTestsCache;
+
+ /// GetTestResult - Get the test result for the \p Changes from the
+ /// cache, executing the test if necessary.
+ ///
+ /// \param Changes - The change set to test.
+ /// \return - The test result.
+ bool GetTestResult(const changeset_ty &Changes);
+
+ /// Split - Partition a set of changes \p S into one or two subsets.
+ void Split(const changeset_ty &S, changesetlist_ty &Res);
+
+ /// Delta - Minimize a set of \p Changes which has been partitioned into
+ /// smaller sets, by attempting to remove individual subsets.
+ changeset_ty Delta(const changeset_ty &Changes,
+ const changesetlist_ty &Sets);
+
+ /// Search - Search for a subset (or subsets) in \p Sets which can be
+ /// removed from \p Changes while still satisfying the predicate.
+ ///
+ /// \param Res - On success, a subset of Changes which satisfies the
+ /// predicate.
+ /// \return - True on success.
+ bool Search(const changeset_ty &Changes, const changesetlist_ty &Sets,
+ changeset_ty &Res);
+
+protected:
+ /// UpdatedSearchState - Callback used when the search state changes.
+ virtual void UpdatedSearchState(const changeset_ty &Changes,
+ const changesetlist_ty &Sets) {}
+
+ /// ExecuteOneTest - Execute a single test predicate on the change set \p S.
+ virtual bool ExecuteOneTest(const changeset_ty &S) = 0;
+
+ DeltaAlgorithm& operator=(const DeltaAlgorithm&) = default;
+
+public:
+ virtual ~DeltaAlgorithm();
+
+ /// Run - Minimize the set \p Changes by executing \see ExecuteOneTest() on
+ /// subsets of changes and returning the smallest set which still satisfies
+ /// the test predicate.
+ changeset_ty Run(const changeset_ty &Changes);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_DELTAALGORITHM_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/DenseMap.h b/contrib/libs/llvm14/include/llvm/ADT/DenseMap.h
new file mode 100644
index 0000000000..beb57f9249
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/DenseMap.h
@@ -0,0 +1,1320 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the DenseMap class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DENSEMAP_H
+#define LLVM_ADT_DENSEMAP_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/EpochTracker.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/MemAlloc.h"
+#include "llvm/Support/ReverseIteration.h"
+#include "llvm/Support/type_traits.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstring>
+#include <initializer_list>
+#include <iterator>
+#include <new>
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+
+namespace detail {
+
+// We extend a pair to allow users to override the bucket type with their own
+// implementation without requiring two members.
+template <typename KeyT, typename ValueT>
+struct DenseMapPair : public std::pair<KeyT, ValueT> {
+ using std::pair<KeyT, ValueT>::pair;
+
+ KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
+ const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
+ ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
+ const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
+};
+
+} // end namespace detail
+
+template <typename KeyT, typename ValueT,
+ typename KeyInfoT = DenseMapInfo<KeyT>,
+ typename Bucket = llvm::detail::DenseMapPair<KeyT, ValueT>,
+ bool IsConst = false>
+class DenseMapIterator;
+
+template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
+ typename BucketT>
+class DenseMapBase : public DebugEpochBase {
+ template <typename T>
+ using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
+
+public:
+ using size_type = unsigned;
+ using key_type = KeyT;
+ using mapped_type = ValueT;
+ using value_type = BucketT;
+
+ using iterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT>;
+ using const_iterator =
+ DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>;
+
+ inline iterator begin() {
+ // When the map is empty, avoid the overhead of advancing/retreating past
+ // empty buckets.
+ if (empty())
+ return end();
+ if (shouldReverseIterate<KeyT>())
+ return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
+ return makeIterator(getBuckets(), getBucketsEnd(), *this);
+ }
+ inline iterator end() {
+ return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
+ }
+ inline const_iterator begin() const {
+ if (empty())
+ return end();
+ if (shouldReverseIterate<KeyT>())
+ return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
+ return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
+ }
+ inline const_iterator end() const {
+ return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
+ }
+
+ LLVM_NODISCARD bool empty() const {
+ return getNumEntries() == 0;
+ }
+ unsigned size() const { return getNumEntries(); }
+
+ /// Grow the densemap so that it can contain at least \p NumEntries items
+ /// before resizing again.
+ void reserve(size_type NumEntries) {
+ auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
+ incrementEpoch();
+ if (NumBuckets > getNumBuckets())
+ grow(NumBuckets);
+ }
+
+ void clear() {
+ incrementEpoch();
+ if (getNumEntries() == 0 && getNumTombstones() == 0) return;
+
+ // If the capacity of the array is huge, and the # elements used is small,
+ // shrink the array.
+ if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
+ shrink_and_clear();
+ return;
+ }
+
+ const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+ if (std::is_trivially_destructible<ValueT>::value) {
+ // Use a simpler loop when values don't need destruction.
+ for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
+ P->getFirst() = EmptyKey;
+ } else {
+ unsigned NumEntries = getNumEntries();
+ for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+ if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
+ if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
+ P->getSecond().~ValueT();
+ --NumEntries;
+ }
+ P->getFirst() = EmptyKey;
+ }
+ }
+ assert(NumEntries == 0 && "Node count imbalance!");
+ }
+ setNumEntries(0);
+ setNumTombstones(0);
+ }
+
+ /// Return 1 if the specified key is in the map, 0 otherwise.
+ size_type count(const_arg_type_t<KeyT> Val) const {
+ const BucketT *TheBucket;
+ return LookupBucketFor(Val, TheBucket) ? 1 : 0;
+ }
+
+ iterator find(const_arg_type_t<KeyT> Val) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Val, TheBucket))
+ return makeIterator(TheBucket,
+ shouldReverseIterate<KeyT>() ? getBuckets()
+ : getBucketsEnd(),
+ *this, true);
+ return end();
+ }
+ const_iterator find(const_arg_type_t<KeyT> Val) const {
+ const BucketT *TheBucket;
+ if (LookupBucketFor(Val, TheBucket))
+ return makeConstIterator(TheBucket,
+ shouldReverseIterate<KeyT>() ? getBuckets()
+ : getBucketsEnd(),
+ *this, true);
+ return end();
+ }
+
+ /// Alternate version of find() which allows a different, and possibly
+ /// less expensive, key type.
+ /// The DenseMapInfo is responsible for supplying methods
+ /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
+ /// type used.
+ template<class LookupKeyT>
+ iterator find_as(const LookupKeyT &Val) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Val, TheBucket))
+ return makeIterator(TheBucket,
+ shouldReverseIterate<KeyT>() ? getBuckets()
+ : getBucketsEnd(),
+ *this, true);
+ return end();
+ }
+ template<class LookupKeyT>
+ const_iterator find_as(const LookupKeyT &Val) const {
+ const BucketT *TheBucket;
+ if (LookupBucketFor(Val, TheBucket))
+ return makeConstIterator(TheBucket,
+ shouldReverseIterate<KeyT>() ? getBuckets()
+ : getBucketsEnd(),
+ *this, true);
+ return end();
+ }
+
+ /// lookup - Return the entry for the specified key, or a default
+ /// constructed value if no such entry exists.
+ ValueT lookup(const_arg_type_t<KeyT> Val) const {
+ const BucketT *TheBucket;
+ if (LookupBucketFor(Val, TheBucket))
+ return TheBucket->getSecond();
+ return ValueT();
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // If the key is already in the map, it returns false and doesn't update the
+ // value.
+ std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
+ return try_emplace(KV.first, KV.second);
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // If the key is already in the map, it returns false and doesn't update the
+ // value.
+ std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
+ return try_emplace(std::move(KV.first), std::move(KV.second));
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // The value is constructed in-place if the key is not in the map, otherwise
+ // it is not moved.
+ template <typename... Ts>
+ std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return std::make_pair(makeIterator(TheBucket,
+ shouldReverseIterate<KeyT>()
+ ? getBuckets()
+ : getBucketsEnd(),
+ *this, true),
+ false); // Already in map.
+
+ // Otherwise, insert the new element.
+ TheBucket =
+ InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
+ return std::make_pair(makeIterator(TheBucket,
+ shouldReverseIterate<KeyT>()
+ ? getBuckets()
+ : getBucketsEnd(),
+ *this, true),
+ true);
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // The value is constructed in-place if the key is not in the map, otherwise
+ // it is not moved.
+ template <typename... Ts>
+ std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return std::make_pair(makeIterator(TheBucket,
+ shouldReverseIterate<KeyT>()
+ ? getBuckets()
+ : getBucketsEnd(),
+ *this, true),
+ false); // Already in map.
+
+ // Otherwise, insert the new element.
+ TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
+ return std::make_pair(makeIterator(TheBucket,
+ shouldReverseIterate<KeyT>()
+ ? getBuckets()
+ : getBucketsEnd(),
+ *this, true),
+ true);
+ }
+
+ /// Alternate version of insert() which allows a different, and possibly
+ /// less expensive, key type.
+ /// The DenseMapInfo is responsible for supplying methods
+ /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
+ /// type used.
+ template <typename LookupKeyT>
+ std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
+ const LookupKeyT &Val) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Val, TheBucket))
+ return std::make_pair(makeIterator(TheBucket,
+ shouldReverseIterate<KeyT>()
+ ? getBuckets()
+ : getBucketsEnd(),
+ *this, true),
+ false); // Already in map.
+
+ // Otherwise, insert the new element.
+ TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
+ std::move(KV.second), Val);
+ return std::make_pair(makeIterator(TheBucket,
+ shouldReverseIterate<KeyT>()
+ ? getBuckets()
+ : getBucketsEnd(),
+ *this, true),
+ true);
+ }
+
+ /// insert - Range insertion of pairs.
+ template<typename InputIt>
+ void insert(InputIt I, InputIt E) {
+ for (; I != E; ++I)
+ insert(*I);
+ }
+
+ bool erase(const KeyT &Val) {
+ BucketT *TheBucket;
+ if (!LookupBucketFor(Val, TheBucket))
+ return false; // not in map.
+
+ TheBucket->getSecond().~ValueT();
+ TheBucket->getFirst() = getTombstoneKey();
+ decrementNumEntries();
+ incrementNumTombstones();
+ return true;
+ }
+ void erase(iterator I) {
+ BucketT *TheBucket = &*I;
+ TheBucket->getSecond().~ValueT();
+ TheBucket->getFirst() = getTombstoneKey();
+ decrementNumEntries();
+ incrementNumTombstones();
+ }
+
+ value_type& FindAndConstruct(const KeyT &Key) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return *TheBucket;
+
+ return *InsertIntoBucket(TheBucket, Key);
+ }
+
+ ValueT &operator[](const KeyT &Key) {
+ return FindAndConstruct(Key).second;
+ }
+
+ value_type& FindAndConstruct(KeyT &&Key) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return *TheBucket;
+
+ return *InsertIntoBucket(TheBucket, std::move(Key));
+ }
+
+ ValueT &operator[](KeyT &&Key) {
+ return FindAndConstruct(std::move(Key)).second;
+ }
+
+ /// isPointerIntoBucketsArray - Return true if the specified pointer points
+ /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
+ /// value in the DenseMap).
+ bool isPointerIntoBucketsArray(const void *Ptr) const {
+ return Ptr >= getBuckets() && Ptr < getBucketsEnd();
+ }
+
+ /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
+ /// array. In conjunction with the previous method, this can be used to
+ /// determine whether an insertion caused the DenseMap to reallocate.
+ const void *getPointerIntoBucketsArray() const { return getBuckets(); }
+
+protected:
+ DenseMapBase() = default;
+
+ void destroyAll() {
+ if (getNumBuckets() == 0) // Nothing to do.
+ return;
+
+ const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+ for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+ if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
+ P->getSecond().~ValueT();
+ P->getFirst().~KeyT();
+ }
+ }
+
+ void initEmpty() {
+ setNumEntries(0);
+ setNumTombstones(0);
+
+ assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
+ "# initial buckets must be a power of two!");
+ const KeyT EmptyKey = getEmptyKey();
+ for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
+ ::new (&B->getFirst()) KeyT(EmptyKey);
+ }
+
+ /// Returns the number of buckets to allocate to ensure that the DenseMap can
+ /// accommodate \p NumEntries without need to grow().
+ unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
+ // Ensure that "NumEntries * 4 < NumBuckets * 3"
+ if (NumEntries == 0)
+ return 0;
+ // +1 is required because of the strict equality.
+ // For example if NumEntries is 48, we need to return 401.
+ return NextPowerOf2(NumEntries * 4 / 3 + 1);
+ }
+
+ void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
+ initEmpty();
+
+ // Insert all the old elements.
+ const KeyT EmptyKey = getEmptyKey();
+ const KeyT TombstoneKey = getTombstoneKey();
+ for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
+ if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
+ // Insert the key/value into the new table.
+ BucketT *DestBucket;
+ bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
+ (void)FoundVal; // silence warning.
+ assert(!FoundVal && "Key already in new map?");
+ DestBucket->getFirst() = std::move(B->getFirst());
+ ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
+ incrementNumEntries();
+
+ // Free the value.
+ B->getSecond().~ValueT();
+ }
+ B->getFirst().~KeyT();
+ }
+ }
+
+ template <typename OtherBaseT>
+ void copyFrom(
+ const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
+ assert(&other != this);
+ assert(getNumBuckets() == other.getNumBuckets());
+
+ setNumEntries(other.getNumEntries());
+ setNumTombstones(other.getNumTombstones());
+
+ if (std::is_trivially_copyable<KeyT>::value &&
+ std::is_trivially_copyable<ValueT>::value)
+ memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(),
+ getNumBuckets() * sizeof(BucketT));
+ else
+ for (size_t i = 0; i < getNumBuckets(); ++i) {
+ ::new (&getBuckets()[i].getFirst())
+ KeyT(other.getBuckets()[i].getFirst());
+ if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
+ !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
+ ::new (&getBuckets()[i].getSecond())
+ ValueT(other.getBuckets()[i].getSecond());
+ }
+ }
+
+ static unsigned getHashValue(const KeyT &Val) {
+ return KeyInfoT::getHashValue(Val);
+ }
+
+ template<typename LookupKeyT>
+ static unsigned getHashValue(const LookupKeyT &Val) {
+ return KeyInfoT::getHashValue(Val);
+ }
+
+ static const KeyT getEmptyKey() {
+ static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
+ "Must pass the derived type to this template!");
+ return KeyInfoT::getEmptyKey();
+ }
+
+ static const KeyT getTombstoneKey() {
+ return KeyInfoT::getTombstoneKey();
+ }
+
+private:
+ iterator makeIterator(BucketT *P, BucketT *E,
+ DebugEpochBase &Epoch,
+ bool NoAdvance=false) {
+ if (shouldReverseIterate<KeyT>()) {
+ BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
+ return iterator(B, E, Epoch, NoAdvance);
+ }
+ return iterator(P, E, Epoch, NoAdvance);
+ }
+
+ const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
+ const DebugEpochBase &Epoch,
+ const bool NoAdvance=false) const {
+ if (shouldReverseIterate<KeyT>()) {
+ const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
+ return const_iterator(B, E, Epoch, NoAdvance);
+ }
+ return const_iterator(P, E, Epoch, NoAdvance);
+ }
+
+ unsigned getNumEntries() const {
+ return static_cast<const DerivedT *>(this)->getNumEntries();
+ }
+
+ void setNumEntries(unsigned Num) {
+ static_cast<DerivedT *>(this)->setNumEntries(Num);
+ }
+
+ void incrementNumEntries() {
+ setNumEntries(getNumEntries() + 1);
+ }
+
+ void decrementNumEntries() {
+ setNumEntries(getNumEntries() - 1);
+ }
+
+ unsigned getNumTombstones() const {
+ return static_cast<const DerivedT *>(this)->getNumTombstones();
+ }
+
+ void setNumTombstones(unsigned Num) {
+ static_cast<DerivedT *>(this)->setNumTombstones(Num);
+ }
+
+ void incrementNumTombstones() {
+ setNumTombstones(getNumTombstones() + 1);
+ }
+
+ void decrementNumTombstones() {
+ setNumTombstones(getNumTombstones() - 1);
+ }
+
+ const BucketT *getBuckets() const {
+ return static_cast<const DerivedT *>(this)->getBuckets();
+ }
+
+ BucketT *getBuckets() {
+ return static_cast<DerivedT *>(this)->getBuckets();
+ }
+
+ unsigned getNumBuckets() const {
+ return static_cast<const DerivedT *>(this)->getNumBuckets();
+ }
+
+ BucketT *getBucketsEnd() {
+ return getBuckets() + getNumBuckets();
+ }
+
+ const BucketT *getBucketsEnd() const {
+ return getBuckets() + getNumBuckets();
+ }
+
+ void grow(unsigned AtLeast) {
+ static_cast<DerivedT *>(this)->grow(AtLeast);
+ }
+
+ void shrink_and_clear() {
+ static_cast<DerivedT *>(this)->shrink_and_clear();
+ }
+
+ template <typename KeyArg, typename... ValueArgs>
+ BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
+ ValueArgs &&... Values) {
+ TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
+
+ TheBucket->getFirst() = std::forward<KeyArg>(Key);
+ ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
+ return TheBucket;
+ }
+
+ template <typename LookupKeyT>
+ BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
+ ValueT &&Value, LookupKeyT &Lookup) {
+ TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
+
+ TheBucket->getFirst() = std::move(Key);
+ ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
+ return TheBucket;
+ }
+
+ template <typename LookupKeyT>
+ BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
+ BucketT *TheBucket) {
+ incrementEpoch();
+
+ // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
+ // the buckets are empty (meaning that many are filled with tombstones),
+ // grow the table.
+ //
+ // The later case is tricky. For example, if we had one empty bucket with
+ // tons of tombstones, failing lookups (e.g. for insertion) would have to
+ // probe almost the entire table until it found the empty bucket. If the
+ // table completely filled with tombstones, no lookup would ever succeed,
+ // causing infinite loops in lookup.
+ unsigned NewNumEntries = getNumEntries() + 1;
+ unsigned NumBuckets = getNumBuckets();
+ if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
+ this->grow(NumBuckets * 2);
+ LookupBucketFor(Lookup, TheBucket);
+ NumBuckets = getNumBuckets();
+ } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
+ NumBuckets/8)) {
+ this->grow(NumBuckets);
+ LookupBucketFor(Lookup, TheBucket);
+ }
+ assert(TheBucket);
+
+ // Only update the state after we've grown our bucket space appropriately
+ // so that when growing buckets we have self-consistent entry count.
+ incrementNumEntries();
+
+ // If we are writing over a tombstone, remember this.
+ const KeyT EmptyKey = getEmptyKey();
+ if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
+ decrementNumTombstones();
+
+ return TheBucket;
+ }
+
+ /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
+ /// FoundBucket. If the bucket contains the key and a value, this returns
+ /// true, otherwise it returns a bucket with an empty marker or tombstone and
+ /// returns false.
+ template<typename LookupKeyT>
+ bool LookupBucketFor(const LookupKeyT &Val,
+ const BucketT *&FoundBucket) const {
+ const BucketT *BucketsPtr = getBuckets();
+ const unsigned NumBuckets = getNumBuckets();
+
+ if (NumBuckets == 0) {
+ FoundBucket = nullptr;
+ return false;
+ }
+
+ // FoundTombstone - Keep track of whether we find a tombstone while probing.
+ const BucketT *FoundTombstone = nullptr;
+ const KeyT EmptyKey = getEmptyKey();
+ const KeyT TombstoneKey = getTombstoneKey();
+ assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
+ !KeyInfoT::isEqual(Val, TombstoneKey) &&
+ "Empty/Tombstone value shouldn't be inserted into map!");
+
+ unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
+ unsigned ProbeAmt = 1;
+ while (true) {
+ const BucketT *ThisBucket = BucketsPtr + BucketNo;
+ // Found Val's bucket? If so, return it.
+ if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
+ FoundBucket = ThisBucket;
+ return true;
+ }
+
+ // If we found an empty bucket, the key doesn't exist in the set.
+ // Insert it and return the default value.
+ if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
+ // If we've already seen a tombstone while probing, fill it in instead
+ // of the empty bucket we eventually probed to.
+ FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
+ return false;
+ }
+
+ // If this is a tombstone, remember it. If Val ends up not in the map, we
+ // prefer to return it than something that would require more probing.
+ if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
+ !FoundTombstone)
+ FoundTombstone = ThisBucket; // Remember the first tombstone found.
+
+ // Otherwise, it's a hash collision or a tombstone, continue quadratic
+ // probing.
+ BucketNo += ProbeAmt++;
+ BucketNo &= (NumBuckets-1);
+ }
+ }
+
+ template <typename LookupKeyT>
+ bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
+ const BucketT *ConstFoundBucket;
+ bool Result = const_cast<const DenseMapBase *>(this)
+ ->LookupBucketFor(Val, ConstFoundBucket);
+ FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
+ return Result;
+ }
+
+public:
+ /// Return the approximate size (in bytes) of the actual map.
+ /// This is just the raw memory used by DenseMap.
+ /// If entries are pointers to objects, the size of the referenced objects
+ /// are not included.
+ size_t getMemorySize() const {
+ return getNumBuckets() * sizeof(BucketT);
+ }
+};
+
+/// Equality comparison for DenseMap.
+///
+/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
+/// is also in RHS, and that no additional pairs are in RHS.
+/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
+/// complexity is linear, worst case is O(N^2) (if every hash collides).
+template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
+ typename BucketT>
+bool operator==(
+ const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
+ const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
+ if (LHS.size() != RHS.size())
+ return false;
+
+ for (auto &KV : LHS) {
+ auto I = RHS.find(KV.first);
+ if (I == RHS.end() || I->second != KV.second)
+ return false;
+ }
+
+ return true;
+}
+
+/// Inequality comparison for DenseMap.
+///
+/// Equivalent to !(LHS == RHS). See operator== for performance notes.
+template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
+ typename BucketT>
+bool operator!=(
+ const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
+ const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
+ return !(LHS == RHS);
+}
+
+template <typename KeyT, typename ValueT,
+ typename KeyInfoT = DenseMapInfo<KeyT>,
+ typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
+class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
+ KeyT, ValueT, KeyInfoT, BucketT> {
+ friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
+ // Lift some types from the dependent base class into this class for
+ // simplicity of referring to them.
+ using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
+ BucketT *Buckets;
+ unsigned NumEntries;
+ unsigned NumTombstones;
+ unsigned NumBuckets;
+
+public:
+ /// Create a DenseMap with an optional \p InitialReserve that guarantee that
+ /// this number of elements can be inserted in the map without grow()
+ explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
+
+ DenseMap(const DenseMap &other) : BaseT() {
+ init(0);
+ copyFrom(other);
+ }
+
+ DenseMap(DenseMap &&other) : BaseT() {
+ init(0);
+ swap(other);
+ }
+
+ template<typename InputIt>
+ DenseMap(const InputIt &I, const InputIt &E) {
+ init(std::distance(I, E));
+ this->insert(I, E);
+ }
+
+ DenseMap(std::initializer_list<typename BaseT::value_type> Vals) {
+ init(Vals.size());
+ this->insert(Vals.begin(), Vals.end());
+ }
+
+ ~DenseMap() {
+ this->destroyAll();
+ deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
+ }
+
+ void swap(DenseMap& RHS) {
+ this->incrementEpoch();
+ RHS.incrementEpoch();
+ std::swap(Buckets, RHS.Buckets);
+ std::swap(NumEntries, RHS.NumEntries);
+ std::swap(NumTombstones, RHS.NumTombstones);
+ std::swap(NumBuckets, RHS.NumBuckets);
+ }
+
+ DenseMap& operator=(const DenseMap& other) {
+ if (&other != this)
+ copyFrom(other);
+ return *this;
+ }
+
+ DenseMap& operator=(DenseMap &&other) {
+ this->destroyAll();
+ deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
+ init(0);
+ swap(other);
+ return *this;
+ }
+
+ void copyFrom(const DenseMap& other) {
+ this->destroyAll();
+ deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
+ if (allocateBuckets(other.NumBuckets)) {
+ this->BaseT::copyFrom(other);
+ } else {
+ NumEntries = 0;
+ NumTombstones = 0;
+ }
+ }
+
+ void init(unsigned InitNumEntries) {
+ auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
+ if (allocateBuckets(InitBuckets)) {
+ this->BaseT::initEmpty();
+ } else {
+ NumEntries = 0;
+ NumTombstones = 0;
+ }
+ }
+
+ void grow(unsigned AtLeast) {
+ unsigned OldNumBuckets = NumBuckets;
+ BucketT *OldBuckets = Buckets;
+
+ allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
+ assert(Buckets);
+ if (!OldBuckets) {
+ this->BaseT::initEmpty();
+ return;
+ }
+
+ this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
+
+ // Free the old table.
+ deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
+ alignof(BucketT));
+ }
+
+ void shrink_and_clear() {
+ unsigned OldNumBuckets = NumBuckets;
+ unsigned OldNumEntries = NumEntries;
+ this->destroyAll();
+
+ // Reduce the number of buckets.
+ unsigned NewNumBuckets = 0;
+ if (OldNumEntries)
+ NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
+ if (NewNumBuckets == NumBuckets) {
+ this->BaseT::initEmpty();
+ return;
+ }
+
+ deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
+ alignof(BucketT));
+ init(NewNumBuckets);
+ }
+
+private:
+ unsigned getNumEntries() const {
+ return NumEntries;
+ }
+
+ void setNumEntries(unsigned Num) {
+ NumEntries = Num;
+ }
+
+ unsigned getNumTombstones() const {
+ return NumTombstones;
+ }
+
+ void setNumTombstones(unsigned Num) {
+ NumTombstones = Num;
+ }
+
+ BucketT *getBuckets() const {
+ return Buckets;
+ }
+
+ unsigned getNumBuckets() const {
+ return NumBuckets;
+ }
+
+ bool allocateBuckets(unsigned Num) {
+ NumBuckets = Num;
+ if (NumBuckets == 0) {
+ Buckets = nullptr;
+ return false;
+ }
+
+ Buckets = static_cast<BucketT *>(
+ allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
+ return true;
+ }
+};
+
+template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
+ typename KeyInfoT = DenseMapInfo<KeyT>,
+ typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
+class SmallDenseMap
+ : public DenseMapBase<
+ SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
+ ValueT, KeyInfoT, BucketT> {
+ friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
+ // Lift some types from the dependent base class into this class for
+ // simplicity of referring to them.
+ using BaseT = DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
+ static_assert(isPowerOf2_64(InlineBuckets),
+ "InlineBuckets must be a power of 2.");
+
+ unsigned Small : 1;
+ unsigned NumEntries : 31;
+ unsigned NumTombstones;
+
+ struct LargeRep {
+ BucketT *Buckets;
+ unsigned NumBuckets;
+ };
+
+ /// A "union" of an inline bucket array and the struct representing
+ /// a large bucket. This union will be discriminated by the 'Small' bit.
+ AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
+
+public:
+ explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
+ init(NumInitBuckets);
+ }
+
+ SmallDenseMap(const SmallDenseMap &other) : BaseT() {
+ init(0);
+ copyFrom(other);
+ }
+
+ SmallDenseMap(SmallDenseMap &&other) : BaseT() {
+ init(0);
+ swap(other);
+ }
+
+ template<typename InputIt>
+ SmallDenseMap(const InputIt &I, const InputIt &E) {
+ init(NextPowerOf2(std::distance(I, E)));
+ this->insert(I, E);
+ }
+
+ SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)
+ : SmallDenseMap(Vals.begin(), Vals.end()) {}
+
+ ~SmallDenseMap() {
+ this->destroyAll();
+ deallocateBuckets();
+ }
+
+ void swap(SmallDenseMap& RHS) {
+ unsigned TmpNumEntries = RHS.NumEntries;
+ RHS.NumEntries = NumEntries;
+ NumEntries = TmpNumEntries;
+ std::swap(NumTombstones, RHS.NumTombstones);
+
+ const KeyT EmptyKey = this->getEmptyKey();
+ const KeyT TombstoneKey = this->getTombstoneKey();
+ if (Small && RHS.Small) {
+ // If we're swapping inline bucket arrays, we have to cope with some of
+ // the tricky bits of DenseMap's storage system: the buckets are not
+ // fully initialized. Thus we swap every key, but we may have
+ // a one-directional move of the value.
+ for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
+ BucketT *LHSB = &getInlineBuckets()[i],
+ *RHSB = &RHS.getInlineBuckets()[i];
+ bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
+ bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
+ if (hasLHSValue && hasRHSValue) {
+ // Swap together if we can...
+ std::swap(*LHSB, *RHSB);
+ continue;
+ }
+ // Swap separately and handle any asymmetry.
+ std::swap(LHSB->getFirst(), RHSB->getFirst());
+ if (hasLHSValue) {
+ ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
+ LHSB->getSecond().~ValueT();
+ } else if (hasRHSValue) {
+ ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
+ RHSB->getSecond().~ValueT();
+ }
+ }
+ return;
+ }
+ if (!Small && !RHS.Small) {
+ std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
+ std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
+ return;
+ }
+
+ SmallDenseMap &SmallSide = Small ? *this : RHS;
+ SmallDenseMap &LargeSide = Small ? RHS : *this;
+
+ // First stash the large side's rep and move the small side across.
+ LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
+ LargeSide.getLargeRep()->~LargeRep();
+ LargeSide.Small = true;
+ // This is similar to the standard move-from-old-buckets, but the bucket
+ // count hasn't actually rotated in this case. So we have to carefully
+ // move construct the keys and values into their new locations, but there
+ // is no need to re-hash things.
+ for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
+ BucketT *NewB = &LargeSide.getInlineBuckets()[i],
+ *OldB = &SmallSide.getInlineBuckets()[i];
+ ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
+ OldB->getFirst().~KeyT();
+ if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
+ ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
+ OldB->getSecond().~ValueT();
+ }
+ }
+
+ // The hard part of moving the small buckets across is done, just move
+ // the TmpRep into its new home.
+ SmallSide.Small = false;
+ new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
+ }
+
+ SmallDenseMap& operator=(const SmallDenseMap& other) {
+ if (&other != this)
+ copyFrom(other);
+ return *this;
+ }
+
+ SmallDenseMap& operator=(SmallDenseMap &&other) {
+ this->destroyAll();
+ deallocateBuckets();
+ init(0);
+ swap(other);
+ return *this;
+ }
+
+ void copyFrom(const SmallDenseMap& other) {
+ this->destroyAll();
+ deallocateBuckets();
+ Small = true;
+ if (other.getNumBuckets() > InlineBuckets) {
+ Small = false;
+ new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
+ }
+ this->BaseT::copyFrom(other);
+ }
+
+ void init(unsigned InitBuckets) {
+ Small = true;
+ if (InitBuckets > InlineBuckets) {
+ Small = false;
+ new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
+ }
+ this->BaseT::initEmpty();
+ }
+
+ void grow(unsigned AtLeast) {
+ if (AtLeast > InlineBuckets)
+ AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
+
+ if (Small) {
+ // First move the inline buckets into a temporary storage.
+ AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
+ BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
+ BucketT *TmpEnd = TmpBegin;
+
+ // Loop over the buckets, moving non-empty, non-tombstones into the
+ // temporary storage. Have the loop move the TmpEnd forward as it goes.
+ const KeyT EmptyKey = this->getEmptyKey();
+ const KeyT TombstoneKey = this->getTombstoneKey();
+ for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
+ if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
+ assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
+ "Too many inline buckets!");
+ ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
+ ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
+ ++TmpEnd;
+ P->getSecond().~ValueT();
+ }
+ P->getFirst().~KeyT();
+ }
+
+ // AtLeast == InlineBuckets can happen if there are many tombstones,
+ // and grow() is used to remove them. Usually we always switch to the
+ // large rep here.
+ if (AtLeast > InlineBuckets) {
+ Small = false;
+ new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
+ }
+ this->moveFromOldBuckets(TmpBegin, TmpEnd);
+ return;
+ }
+
+ LargeRep OldRep = std::move(*getLargeRep());
+ getLargeRep()->~LargeRep();
+ if (AtLeast <= InlineBuckets) {
+ Small = true;
+ } else {
+ new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
+ }
+
+ this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
+
+ // Free the old table.
+ deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
+ alignof(BucketT));
+ }
+
+ void shrink_and_clear() {
+ unsigned OldSize = this->size();
+ this->destroyAll();
+
+ // Reduce the number of buckets.
+ unsigned NewNumBuckets = 0;
+ if (OldSize) {
+ NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
+ if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
+ NewNumBuckets = 64;
+ }
+ if ((Small && NewNumBuckets <= InlineBuckets) ||
+ (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
+ this->BaseT::initEmpty();
+ return;
+ }
+
+ deallocateBuckets();
+ init(NewNumBuckets);
+ }
+
+private:
+ unsigned getNumEntries() const {
+ return NumEntries;
+ }
+
+ void setNumEntries(unsigned Num) {
+ // NumEntries is hardcoded to be 31 bits wide.
+ assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
+ NumEntries = Num;
+ }
+
+ unsigned getNumTombstones() const {
+ return NumTombstones;
+ }
+
+ void setNumTombstones(unsigned Num) {
+ NumTombstones = Num;
+ }
+
+ const BucketT *getInlineBuckets() const {
+ assert(Small);
+ // Note that this cast does not violate aliasing rules as we assert that
+ // the memory's dynamic type is the small, inline bucket buffer, and the
+ // 'storage' is a POD containing a char buffer.
+ return reinterpret_cast<const BucketT *>(&storage);
+ }
+
+ BucketT *getInlineBuckets() {
+ return const_cast<BucketT *>(
+ const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
+ }
+
+ const LargeRep *getLargeRep() const {
+ assert(!Small);
+ // Note, same rule about aliasing as with getInlineBuckets.
+ return reinterpret_cast<const LargeRep *>(&storage);
+ }
+
+ LargeRep *getLargeRep() {
+ return const_cast<LargeRep *>(
+ const_cast<const SmallDenseMap *>(this)->getLargeRep());
+ }
+
+ const BucketT *getBuckets() const {
+ return Small ? getInlineBuckets() : getLargeRep()->Buckets;
+ }
+
+ BucketT *getBuckets() {
+ return const_cast<BucketT *>(
+ const_cast<const SmallDenseMap *>(this)->getBuckets());
+ }
+
+ unsigned getNumBuckets() const {
+ return Small ? InlineBuckets : getLargeRep()->NumBuckets;
+ }
+
+ void deallocateBuckets() {
+ if (Small)
+ return;
+
+ deallocate_buffer(getLargeRep()->Buckets,
+ sizeof(BucketT) * getLargeRep()->NumBuckets,
+ alignof(BucketT));
+ getLargeRep()->~LargeRep();
+ }
+
+ LargeRep allocateBuckets(unsigned Num) {
+ assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
+ LargeRep Rep = {static_cast<BucketT *>(allocate_buffer(
+ sizeof(BucketT) * Num, alignof(BucketT))),
+ Num};
+ return Rep;
+ }
+};
+
+template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
+ bool IsConst>
+class DenseMapIterator : DebugEpochBase::HandleBase {
+ friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
+ friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
+
+public:
+ using difference_type = ptrdiff_t;
+ using value_type =
+ typename std::conditional<IsConst, const Bucket, Bucket>::type;
+ using pointer = value_type *;
+ using reference = value_type &;
+ using iterator_category = std::forward_iterator_tag;
+
+private:
+ pointer Ptr = nullptr;
+ pointer End = nullptr;
+
+public:
+ DenseMapIterator() = default;
+
+ DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch,
+ bool NoAdvance = false)
+ : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
+ assert(isHandleInSync() && "invalid construction!");
+
+ if (NoAdvance) return;
+ if (shouldReverseIterate<KeyT>()) {
+ RetreatPastEmptyBuckets();
+ return;
+ }
+ AdvancePastEmptyBuckets();
+ }
+
+ // Converting ctor from non-const iterators to const iterators. SFINAE'd out
+ // for const iterator destinations so it doesn't end up as a user defined copy
+ // constructor.
+ template <bool IsConstSrc,
+ typename = std::enable_if_t<!IsConstSrc && IsConst>>
+ DenseMapIterator(
+ const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
+ : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
+
+ reference operator*() const {
+ assert(isHandleInSync() && "invalid iterator access!");
+ assert(Ptr != End && "dereferencing end() iterator");
+ if (shouldReverseIterate<KeyT>())
+ return Ptr[-1];
+ return *Ptr;
+ }
+ pointer operator->() const {
+ assert(isHandleInSync() && "invalid iterator access!");
+ assert(Ptr != End && "dereferencing end() iterator");
+ if (shouldReverseIterate<KeyT>())
+ return &(Ptr[-1]);
+ return Ptr;
+ }
+
+ friend bool operator==(const DenseMapIterator &LHS,
+ const DenseMapIterator &RHS) {
+ assert((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!");
+ assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
+ assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&
+ "comparing incomparable iterators!");
+ return LHS.Ptr == RHS.Ptr;
+ }
+
+ friend bool operator!=(const DenseMapIterator &LHS,
+ const DenseMapIterator &RHS) {
+ return !(LHS == RHS);
+ }
+
+ inline DenseMapIterator& operator++() { // Preincrement
+ assert(isHandleInSync() && "invalid iterator access!");
+ assert(Ptr != End && "incrementing end() iterator");
+ if (shouldReverseIterate<KeyT>()) {
+ --Ptr;
+ RetreatPastEmptyBuckets();
+ return *this;
+ }
+ ++Ptr;
+ AdvancePastEmptyBuckets();
+ return *this;
+ }
+ DenseMapIterator operator++(int) { // Postincrement
+ assert(isHandleInSync() && "invalid iterator access!");
+ DenseMapIterator tmp = *this; ++*this; return tmp;
+ }
+
+private:
+ void AdvancePastEmptyBuckets() {
+ assert(Ptr <= End);
+ const KeyT Empty = KeyInfoT::getEmptyKey();
+ const KeyT Tombstone = KeyInfoT::getTombstoneKey();
+
+ while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
+ KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
+ ++Ptr;
+ }
+
+ void RetreatPastEmptyBuckets() {
+ assert(Ptr >= End);
+ const KeyT Empty = KeyInfoT::getEmptyKey();
+ const KeyT Tombstone = KeyInfoT::getTombstoneKey();
+
+ while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
+ KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
+ --Ptr;
+ }
+};
+
+template <typename KeyT, typename ValueT, typename KeyInfoT>
+inline size_t capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
+ return X.getMemorySize();
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_DENSEMAP_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/DenseMapInfo.h b/contrib/libs/llvm14/include/llvm/ADT/DenseMapInfo.h
new file mode 100644
index 0000000000..c6f23828ee
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/DenseMapInfo.h
@@ -0,0 +1,304 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/DenseMapInfo.h - Type traits for DenseMap -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines DenseMapInfo traits for DenseMap.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DENSEMAPINFO_H
+#define LLVM_ADT_DENSEMAPINFO_H
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <tuple>
+#include <utility>
+
+namespace llvm {
+
+namespace detail {
+
+/// Simplistic combination of 32-bit hash values into 32-bit hash values.
+static inline unsigned combineHashValue(unsigned a, unsigned b) {
+ uint64_t key = (uint64_t)a << 32 | (uint64_t)b;
+ key += ~(key << 32);
+ key ^= (key >> 22);
+ key += ~(key << 13);
+ key ^= (key >> 8);
+ key += (key << 3);
+ key ^= (key >> 15);
+ key += ~(key << 27);
+ key ^= (key >> 31);
+ return (unsigned)key;
+}
+
+} // end namespace detail
+
+/// An information struct used to provide DenseMap with the various necessary
+/// components for a given value type `T`. `Enable` is an optional additional
+/// parameter that is used to support SFINAE (generally using std::enable_if_t)
+/// in derived DenseMapInfo specializations; in non-SFINAE use cases this should
+/// just be `void`.
+template<typename T, typename Enable = void>
+struct DenseMapInfo {
+ //static inline T getEmptyKey();
+ //static inline T getTombstoneKey();
+ //static unsigned getHashValue(const T &Val);
+ //static bool isEqual(const T &LHS, const T &RHS);
+};
+
+// Provide DenseMapInfo for all pointers. Come up with sentinel pointer values
+// that are aligned to alignof(T) bytes, but try to avoid requiring T to be
+// complete. This allows clients to instantiate DenseMap<T*, ...> with forward
+// declared key types. Assume that no pointer key type requires more than 4096
+// bytes of alignment.
+template<typename T>
+struct DenseMapInfo<T*> {
+ // The following should hold, but it would require T to be complete:
+ // static_assert(alignof(T) <= (1 << Log2MaxAlign),
+ // "DenseMap does not support pointer keys requiring more than "
+ // "Log2MaxAlign bits of alignment");
+ static constexpr uintptr_t Log2MaxAlign = 12;
+
+ static inline T* getEmptyKey() {
+ uintptr_t Val = static_cast<uintptr_t>(-1);
+ Val <<= Log2MaxAlign;
+ return reinterpret_cast<T*>(Val);
+ }
+
+ static inline T* getTombstoneKey() {
+ uintptr_t Val = static_cast<uintptr_t>(-2);
+ Val <<= Log2MaxAlign;
+ return reinterpret_cast<T*>(Val);
+ }
+
+ static unsigned getHashValue(const T *PtrVal) {
+ return (unsigned((uintptr_t)PtrVal) >> 4) ^
+ (unsigned((uintptr_t)PtrVal) >> 9);
+ }
+
+ static bool isEqual(const T *LHS, const T *RHS) { return LHS == RHS; }
+};
+
+// Provide DenseMapInfo for chars.
+template<> struct DenseMapInfo<char> {
+ static inline char getEmptyKey() { return ~0; }
+ static inline char getTombstoneKey() { return ~0 - 1; }
+ static unsigned getHashValue(const char& Val) { return Val * 37U; }
+
+ static bool isEqual(const char &LHS, const char &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for unsigned chars.
+template <> struct DenseMapInfo<unsigned char> {
+ static inline unsigned char getEmptyKey() { return ~0; }
+ static inline unsigned char getTombstoneKey() { return ~0 - 1; }
+ static unsigned getHashValue(const unsigned char &Val) { return Val * 37U; }
+
+ static bool isEqual(const unsigned char &LHS, const unsigned char &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for unsigned shorts.
+template <> struct DenseMapInfo<unsigned short> {
+ static inline unsigned short getEmptyKey() { return 0xFFFF; }
+ static inline unsigned short getTombstoneKey() { return 0xFFFF - 1; }
+ static unsigned getHashValue(const unsigned short &Val) { return Val * 37U; }
+
+ static bool isEqual(const unsigned short &LHS, const unsigned short &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for unsigned ints.
+template<> struct DenseMapInfo<unsigned> {
+ static inline unsigned getEmptyKey() { return ~0U; }
+ static inline unsigned getTombstoneKey() { return ~0U - 1; }
+ static unsigned getHashValue(const unsigned& Val) { return Val * 37U; }
+
+ static bool isEqual(const unsigned& LHS, const unsigned& RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for unsigned longs.
+template<> struct DenseMapInfo<unsigned long> {
+ static inline unsigned long getEmptyKey() { return ~0UL; }
+ static inline unsigned long getTombstoneKey() { return ~0UL - 1L; }
+
+ static unsigned getHashValue(const unsigned long& Val) {
+ return (unsigned)(Val * 37UL);
+ }
+
+ static bool isEqual(const unsigned long& LHS, const unsigned long& RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for unsigned long longs.
+template<> struct DenseMapInfo<unsigned long long> {
+ static inline unsigned long long getEmptyKey() { return ~0ULL; }
+ static inline unsigned long long getTombstoneKey() { return ~0ULL - 1ULL; }
+
+ static unsigned getHashValue(const unsigned long long& Val) {
+ return (unsigned)(Val * 37ULL);
+ }
+
+ static bool isEqual(const unsigned long long& LHS,
+ const unsigned long long& RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for shorts.
+template <> struct DenseMapInfo<short> {
+ static inline short getEmptyKey() { return 0x7FFF; }
+ static inline short getTombstoneKey() { return -0x7FFF - 1; }
+ static unsigned getHashValue(const short &Val) { return Val * 37U; }
+ static bool isEqual(const short &LHS, const short &RHS) { return LHS == RHS; }
+};
+
+// Provide DenseMapInfo for ints.
+template<> struct DenseMapInfo<int> {
+ static inline int getEmptyKey() { return 0x7fffffff; }
+ static inline int getTombstoneKey() { return -0x7fffffff - 1; }
+ static unsigned getHashValue(const int& Val) { return (unsigned)(Val * 37U); }
+
+ static bool isEqual(const int& LHS, const int& RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for longs.
+template<> struct DenseMapInfo<long> {
+ static inline long getEmptyKey() {
+ return (1UL << (sizeof(long) * 8 - 1)) - 1UL;
+ }
+
+ static inline long getTombstoneKey() { return getEmptyKey() - 1L; }
+
+ static unsigned getHashValue(const long& Val) {
+ return (unsigned)(Val * 37UL);
+ }
+
+ static bool isEqual(const long& LHS, const long& RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for long longs.
+template<> struct DenseMapInfo<long long> {
+ static inline long long getEmptyKey() { return 0x7fffffffffffffffLL; }
+ static inline long long getTombstoneKey() { return -0x7fffffffffffffffLL-1; }
+
+ static unsigned getHashValue(const long long& Val) {
+ return (unsigned)(Val * 37ULL);
+ }
+
+ static bool isEqual(const long long& LHS,
+ const long long& RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for all pairs whose members have info.
+template<typename T, typename U>
+struct DenseMapInfo<std::pair<T, U>> {
+ using Pair = std::pair<T, U>;
+ using FirstInfo = DenseMapInfo<T>;
+ using SecondInfo = DenseMapInfo<U>;
+
+ static inline Pair getEmptyKey() {
+ return std::make_pair(FirstInfo::getEmptyKey(),
+ SecondInfo::getEmptyKey());
+ }
+
+ static inline Pair getTombstoneKey() {
+ return std::make_pair(FirstInfo::getTombstoneKey(),
+ SecondInfo::getTombstoneKey());
+ }
+
+ static unsigned getHashValue(const Pair& PairVal) {
+ return detail::combineHashValue(FirstInfo::getHashValue(PairVal.first),
+ SecondInfo::getHashValue(PairVal.second));
+ }
+
+ static bool isEqual(const Pair &LHS, const Pair &RHS) {
+ return FirstInfo::isEqual(LHS.first, RHS.first) &&
+ SecondInfo::isEqual(LHS.second, RHS.second);
+ }
+};
+
+// Provide DenseMapInfo for all tuples whose members have info.
+template <typename... Ts> struct DenseMapInfo<std::tuple<Ts...>> {
+ using Tuple = std::tuple<Ts...>;
+
+ static inline Tuple getEmptyKey() {
+ return Tuple(DenseMapInfo<Ts>::getEmptyKey()...);
+ }
+
+ static inline Tuple getTombstoneKey() {
+ return Tuple(DenseMapInfo<Ts>::getTombstoneKey()...);
+ }
+
+ template <unsigned I>
+ static unsigned getHashValueImpl(const Tuple &values, std::false_type) {
+ using EltType = typename std::tuple_element<I, Tuple>::type;
+ std::integral_constant<bool, I + 1 == sizeof...(Ts)> atEnd;
+ return detail::combineHashValue(
+ DenseMapInfo<EltType>::getHashValue(std::get<I>(values)),
+ getHashValueImpl<I + 1>(values, atEnd));
+ }
+
+ template <unsigned I>
+ static unsigned getHashValueImpl(const Tuple &, std::true_type) {
+ return 0;
+ }
+
+ static unsigned getHashValue(const std::tuple<Ts...> &values) {
+ std::integral_constant<bool, 0 == sizeof...(Ts)> atEnd;
+ return getHashValueImpl<0>(values, atEnd);
+ }
+
+ template <unsigned I>
+ static bool isEqualImpl(const Tuple &lhs, const Tuple &rhs, std::false_type) {
+ using EltType = typename std::tuple_element<I, Tuple>::type;
+ std::integral_constant<bool, I + 1 == sizeof...(Ts)> atEnd;
+ return DenseMapInfo<EltType>::isEqual(std::get<I>(lhs), std::get<I>(rhs)) &&
+ isEqualImpl<I + 1>(lhs, rhs, atEnd);
+ }
+
+ template <unsigned I>
+ static bool isEqualImpl(const Tuple &, const Tuple &, std::true_type) {
+ return true;
+ }
+
+ static bool isEqual(const Tuple &lhs, const Tuple &rhs) {
+ std::integral_constant<bool, 0 == sizeof...(Ts)> atEnd;
+ return isEqualImpl<0>(lhs, rhs, atEnd);
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_DENSEMAPINFO_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/DenseSet.h b/contrib/libs/llvm14/include/llvm/ADT/DenseSet.h
new file mode 100644
index 0000000000..823535ec14
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/DenseSet.h
@@ -0,0 +1,313 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/DenseSet.h - Dense probed hash table ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the DenseSet and SmallDenseSet classes.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DENSESET_H
+#define LLVM_ADT_DENSESET_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/type_traits.h"
+#include <cstddef>
+#include <initializer_list>
+#include <iterator>
+#include <utility>
+
+namespace llvm {
+
+namespace detail {
+
+struct DenseSetEmpty {};
+
+// Use the empty base class trick so we can create a DenseMap where the buckets
+// contain only a single item.
+template <typename KeyT> class DenseSetPair : public DenseSetEmpty {
+ KeyT key;
+
+public:
+ KeyT &getFirst() { return key; }
+ const KeyT &getFirst() const { return key; }
+ DenseSetEmpty &getSecond() { return *this; }
+ const DenseSetEmpty &getSecond() const { return *this; }
+};
+
+/// Base class for DenseSet and DenseSmallSet.
+///
+/// MapTy should be either
+///
+/// DenseMap<ValueT, detail::DenseSetEmpty, ValueInfoT,
+/// detail::DenseSetPair<ValueT>>
+///
+/// or the equivalent SmallDenseMap type. ValueInfoT must implement the
+/// DenseMapInfo "concept".
+template <typename ValueT, typename MapTy, typename ValueInfoT>
+class DenseSetImpl {
+ static_assert(sizeof(typename MapTy::value_type) == sizeof(ValueT),
+ "DenseMap buckets unexpectedly large!");
+ MapTy TheMap;
+
+ template <typename T>
+ using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
+
+public:
+ using key_type = ValueT;
+ using value_type = ValueT;
+ using size_type = unsigned;
+
+ explicit DenseSetImpl(unsigned InitialReserve = 0) : TheMap(InitialReserve) {}
+
+ template <typename InputIt>
+ DenseSetImpl(const InputIt &I, const InputIt &E)
+ : DenseSetImpl(PowerOf2Ceil(std::distance(I, E))) {
+ insert(I, E);
+ }
+
+ DenseSetImpl(std::initializer_list<ValueT> Elems)
+ : DenseSetImpl(PowerOf2Ceil(Elems.size())) {
+ insert(Elems.begin(), Elems.end());
+ }
+
+ bool empty() const { return TheMap.empty(); }
+ size_type size() const { return TheMap.size(); }
+ size_t getMemorySize() const { return TheMap.getMemorySize(); }
+
+ /// Grow the DenseSet so that it has at least Size buckets. Will not shrink
+ /// the Size of the set.
+ void resize(size_t Size) { TheMap.resize(Size); }
+
+ /// Grow the DenseSet so that it can contain at least \p NumEntries items
+ /// before resizing again.
+ void reserve(size_t Size) { TheMap.reserve(Size); }
+
+ void clear() {
+ TheMap.clear();
+ }
+
+ /// Return 1 if the specified key is in the set, 0 otherwise.
+ size_type count(const_arg_type_t<ValueT> V) const {
+ return TheMap.count(V);
+ }
+
+ bool erase(const ValueT &V) {
+ return TheMap.erase(V);
+ }
+
+ void swap(DenseSetImpl &RHS) { TheMap.swap(RHS.TheMap); }
+
+ // Iterators.
+
+ class ConstIterator;
+
+ class Iterator {
+ typename MapTy::iterator I;
+ friend class DenseSetImpl;
+ friend class ConstIterator;
+
+ public:
+ using difference_type = typename MapTy::iterator::difference_type;
+ using value_type = ValueT;
+ using pointer = value_type *;
+ using reference = value_type &;
+ using iterator_category = std::forward_iterator_tag;
+
+ Iterator() = default;
+ Iterator(const typename MapTy::iterator &i) : I(i) {}
+
+ ValueT &operator*() { return I->getFirst(); }
+ const ValueT &operator*() const { return I->getFirst(); }
+ ValueT *operator->() { return &I->getFirst(); }
+ const ValueT *operator->() const { return &I->getFirst(); }
+
+ Iterator& operator++() { ++I; return *this; }
+ Iterator operator++(int) { auto T = *this; ++I; return T; }
+ friend bool operator==(const Iterator &X, const Iterator &Y) {
+ return X.I == Y.I;
+ }
+ friend bool operator!=(const Iterator &X, const Iterator &Y) {
+ return X.I != Y.I;
+ }
+ };
+
+ class ConstIterator {
+ typename MapTy::const_iterator I;
+ friend class DenseSetImpl;
+ friend class Iterator;
+
+ public:
+ using difference_type = typename MapTy::const_iterator::difference_type;
+ using value_type = ValueT;
+ using pointer = const value_type *;
+ using reference = const value_type &;
+ using iterator_category = std::forward_iterator_tag;
+
+ ConstIterator() = default;
+ ConstIterator(const Iterator &B) : I(B.I) {}
+ ConstIterator(const typename MapTy::const_iterator &i) : I(i) {}
+
+ const ValueT &operator*() const { return I->getFirst(); }
+ const ValueT *operator->() const { return &I->getFirst(); }
+
+ ConstIterator& operator++() { ++I; return *this; }
+ ConstIterator operator++(int) { auto T = *this; ++I; return T; }
+ friend bool operator==(const ConstIterator &X, const ConstIterator &Y) {
+ return X.I == Y.I;
+ }
+ friend bool operator!=(const ConstIterator &X, const ConstIterator &Y) {
+ return X.I != Y.I;
+ }
+ };
+
+ using iterator = Iterator;
+ using const_iterator = ConstIterator;
+
+ iterator begin() { return Iterator(TheMap.begin()); }
+ iterator end() { return Iterator(TheMap.end()); }
+
+ const_iterator begin() const { return ConstIterator(TheMap.begin()); }
+ const_iterator end() const { return ConstIterator(TheMap.end()); }
+
+ iterator find(const_arg_type_t<ValueT> V) { return Iterator(TheMap.find(V)); }
+ const_iterator find(const_arg_type_t<ValueT> V) const {
+ return ConstIterator(TheMap.find(V));
+ }
+
+ /// Check if the set contains the given element.
+ bool contains(const_arg_type_t<ValueT> V) const {
+ return TheMap.find(V) != TheMap.end();
+ }
+
+ /// Alternative version of find() which allows a different, and possibly less
+ /// expensive, key type.
+ /// The DenseMapInfo is responsible for supplying methods
+ /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key type
+ /// used.
+ template <class LookupKeyT>
+ iterator find_as(const LookupKeyT &Val) {
+ return Iterator(TheMap.find_as(Val));
+ }
+ template <class LookupKeyT>
+ const_iterator find_as(const LookupKeyT &Val) const {
+ return ConstIterator(TheMap.find_as(Val));
+ }
+
+ void erase(Iterator I) { return TheMap.erase(I.I); }
+ void erase(ConstIterator CI) { return TheMap.erase(CI.I); }
+
+ std::pair<iterator, bool> insert(const ValueT &V) {
+ detail::DenseSetEmpty Empty;
+ return TheMap.try_emplace(V, Empty);
+ }
+
+ std::pair<iterator, bool> insert(ValueT &&V) {
+ detail::DenseSetEmpty Empty;
+ return TheMap.try_emplace(std::move(V), Empty);
+ }
+
+ /// Alternative version of insert that uses a different (and possibly less
+ /// expensive) key type.
+ template <typename LookupKeyT>
+ std::pair<iterator, bool> insert_as(const ValueT &V,
+ const LookupKeyT &LookupKey) {
+ return TheMap.insert_as({V, detail::DenseSetEmpty()}, LookupKey);
+ }
+ template <typename LookupKeyT>
+ std::pair<iterator, bool> insert_as(ValueT &&V, const LookupKeyT &LookupKey) {
+ return TheMap.insert_as({std::move(V), detail::DenseSetEmpty()}, LookupKey);
+ }
+
+ // Range insertion of values.
+ template<typename InputIt>
+ void insert(InputIt I, InputIt E) {
+ for (; I != E; ++I)
+ insert(*I);
+ }
+};
+
+/// Equality comparison for DenseSet.
+///
+/// Iterates over elements of LHS confirming that each element is also a member
+/// of RHS, and that RHS contains no additional values.
+/// Equivalent to N calls to RHS.count. Amortized complexity is linear, worst
+/// case is O(N^2) (if every hash collides).
+template <typename ValueT, typename MapTy, typename ValueInfoT>
+bool operator==(const DenseSetImpl<ValueT, MapTy, ValueInfoT> &LHS,
+ const DenseSetImpl<ValueT, MapTy, ValueInfoT> &RHS) {
+ if (LHS.size() != RHS.size())
+ return false;
+
+ for (auto &E : LHS)
+ if (!RHS.count(E))
+ return false;
+
+ return true;
+}
+
+/// Inequality comparison for DenseSet.
+///
+/// Equivalent to !(LHS == RHS). See operator== for performance notes.
+template <typename ValueT, typename MapTy, typename ValueInfoT>
+bool operator!=(const DenseSetImpl<ValueT, MapTy, ValueInfoT> &LHS,
+ const DenseSetImpl<ValueT, MapTy, ValueInfoT> &RHS) {
+ return !(LHS == RHS);
+}
+
+} // end namespace detail
+
+/// Implements a dense probed hash-table based set.
+template <typename ValueT, typename ValueInfoT = DenseMapInfo<ValueT>>
+class DenseSet : public detail::DenseSetImpl<
+ ValueT, DenseMap<ValueT, detail::DenseSetEmpty, ValueInfoT,
+ detail::DenseSetPair<ValueT>>,
+ ValueInfoT> {
+ using BaseT =
+ detail::DenseSetImpl<ValueT,
+ DenseMap<ValueT, detail::DenseSetEmpty, ValueInfoT,
+ detail::DenseSetPair<ValueT>>,
+ ValueInfoT>;
+
+public:
+ using BaseT::BaseT;
+};
+
+/// Implements a dense probed hash-table based set with some number of buckets
+/// stored inline.
+template <typename ValueT, unsigned InlineBuckets = 4,
+ typename ValueInfoT = DenseMapInfo<ValueT>>
+class SmallDenseSet
+ : public detail::DenseSetImpl<
+ ValueT, SmallDenseMap<ValueT, detail::DenseSetEmpty, InlineBuckets,
+ ValueInfoT, detail::DenseSetPair<ValueT>>,
+ ValueInfoT> {
+ using BaseT = detail::DenseSetImpl<
+ ValueT, SmallDenseMap<ValueT, detail::DenseSetEmpty, InlineBuckets,
+ ValueInfoT, detail::DenseSetPair<ValueT>>,
+ ValueInfoT>;
+
+public:
+ using BaseT::BaseT;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_DENSESET_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/DepthFirstIterator.h b/contrib/libs/llvm14/include/llvm/ADT/DepthFirstIterator.h
new file mode 100644
index 0000000000..fecf943bd7
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/DepthFirstIterator.h
@@ -0,0 +1,321 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/DepthFirstIterator.h - Depth First iterator -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file builds on the ADT/GraphTraits.h file to build generic depth
+/// first graph iterator. This file exposes the following functions/types:
+///
+/// df_begin/df_end/df_iterator
+/// * Normal depth-first iteration - visit a node and then all of its
+/// children.
+///
+/// idf_begin/idf_end/idf_iterator
+/// * Depth-first iteration on the 'inverse' graph.
+///
+/// df_ext_begin/df_ext_end/df_ext_iterator
+/// * Normal depth-first iteration - visit a node and then all of its
+/// children. This iterator stores the 'visited' set in an external set,
+/// which allows it to be more efficient, and allows external clients to
+/// use the set for other purposes.
+///
+/// idf_ext_begin/idf_ext_end/idf_ext_iterator
+/// * Depth-first iteration on the 'inverse' graph.
+/// This iterator stores the 'visited' set in an external set, which
+/// allows it to be more efficient, and allows external clients to use
+/// the set for other purposes.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DEPTHFIRSTITERATOR_H
+#define LLVM_ADT_DEPTHFIRSTITERATOR_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/iterator_range.h"
+#include <iterator>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+// df_iterator_storage - A private class which is used to figure out where to
+// store the visited set.
+template<class SetType, bool External> // Non-external set
+class df_iterator_storage {
+public:
+ SetType Visited;
+};
+
+template<class SetType>
+class df_iterator_storage<SetType, true> {
+public:
+ df_iterator_storage(SetType &VSet) : Visited(VSet) {}
+ df_iterator_storage(const df_iterator_storage &S) : Visited(S.Visited) {}
+
+ SetType &Visited;
+};
+
+// The visited stated for the iteration is a simple set augmented with
+// one more method, completed, which is invoked when all children of a
+// node have been processed. It is intended to distinguish of back and
+// cross edges in the spanning tree but is not used in the common case.
+template <typename NodeRef, unsigned SmallSize=8>
+struct df_iterator_default_set : public SmallPtrSet<NodeRef, SmallSize> {
+ using BaseSet = SmallPtrSet<NodeRef, SmallSize>;
+ using iterator = typename BaseSet::iterator;
+
+ std::pair<iterator,bool> insert(NodeRef N) { return BaseSet::insert(N); }
+ template <typename IterT>
+ void insert(IterT Begin, IterT End) { BaseSet::insert(Begin,End); }
+
+ void completed(NodeRef) {}
+};
+
+// Generic Depth First Iterator
+template <class GraphT,
+ class SetType =
+ df_iterator_default_set<typename GraphTraits<GraphT>::NodeRef>,
+ bool ExtStorage = false, class GT = GraphTraits<GraphT>>
+class df_iterator : public df_iterator_storage<SetType, ExtStorage> {
+public:
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = typename GT::NodeRef;
+ using difference_type = std::ptrdiff_t;
+ using pointer = value_type *;
+ using reference = value_type &;
+
+private:
+ using NodeRef = typename GT::NodeRef;
+ using ChildItTy = typename GT::ChildIteratorType;
+
+ // First element is node reference, second is the 'next child' to visit.
+ // The second child is initialized lazily to pick up graph changes during the
+ // DFS.
+ using StackElement = std::pair<NodeRef, Optional<ChildItTy>>;
+
+ // VisitStack - Used to maintain the ordering. Top = current block
+ std::vector<StackElement> VisitStack;
+
+ inline df_iterator(NodeRef Node) {
+ this->Visited.insert(Node);
+ VisitStack.push_back(StackElement(Node, None));
+ }
+
+ inline df_iterator() = default; // End is when stack is empty
+
+ inline df_iterator(NodeRef Node, SetType &S)
+ : df_iterator_storage<SetType, ExtStorage>(S) {
+ if (this->Visited.insert(Node).second)
+ VisitStack.push_back(StackElement(Node, None));
+ }
+
+ inline df_iterator(SetType &S)
+ : df_iterator_storage<SetType, ExtStorage>(S) {
+ // End is when stack is empty
+ }
+
+ inline void toNext() {
+ do {
+ NodeRef Node = VisitStack.back().first;
+ Optional<ChildItTy> &Opt = VisitStack.back().second;
+
+ if (!Opt)
+ Opt.emplace(GT::child_begin(Node));
+
+ // Notice that we directly mutate *Opt here, so that
+ // VisitStack.back().second actually gets updated as the iterator
+ // increases.
+ while (*Opt != GT::child_end(Node)) {
+ NodeRef Next = *(*Opt)++;
+ // Has our next sibling been visited?
+ if (this->Visited.insert(Next).second) {
+ // No, do it now.
+ VisitStack.push_back(StackElement(Next, None));
+ return;
+ }
+ }
+ this->Visited.completed(Node);
+
+ // Oops, ran out of successors... go up a level on the stack.
+ VisitStack.pop_back();
+ } while (!VisitStack.empty());
+ }
+
+public:
+ // Provide static begin and end methods as our public "constructors"
+ static df_iterator begin(const GraphT &G) {
+ return df_iterator(GT::getEntryNode(G));
+ }
+ static df_iterator end(const GraphT &G) { return df_iterator(); }
+
+ // Static begin and end methods as our public ctors for external iterators
+ static df_iterator begin(const GraphT &G, SetType &S) {
+ return df_iterator(GT::getEntryNode(G), S);
+ }
+ static df_iterator end(const GraphT &G, SetType &S) { return df_iterator(S); }
+
+ bool operator==(const df_iterator &x) const {
+ return VisitStack == x.VisitStack;
+ }
+ bool operator!=(const df_iterator &x) const { return !(*this == x); }
+
+ const NodeRef &operator*() const { return VisitStack.back().first; }
+
+ // This is a nonstandard operator-> that dereferences the pointer an extra
+ // time... so that you can actually call methods ON the Node, because
+ // the contained type is a pointer. This allows BBIt->getTerminator() f.e.
+ //
+ NodeRef operator->() const { return **this; }
+
+ df_iterator &operator++() { // Preincrement
+ toNext();
+ return *this;
+ }
+
+ /// Skips all children of the current node and traverses to next node
+ ///
+ /// Note: This function takes care of incrementing the iterator. If you
+ /// always increment and call this function, you risk walking off the end.
+ df_iterator &skipChildren() {
+ VisitStack.pop_back();
+ if (!VisitStack.empty())
+ toNext();
+ return *this;
+ }
+
+ df_iterator operator++(int) { // Postincrement
+ df_iterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ // nodeVisited - return true if this iterator has already visited the
+ // specified node. This is public, and will probably be used to iterate over
+ // nodes that a depth first iteration did not find: ie unreachable nodes.
+ //
+ bool nodeVisited(NodeRef Node) const {
+ return this->Visited.contains(Node);
+ }
+
+ /// getPathLength - Return the length of the path from the entry node to the
+ /// current node, counting both nodes.
+ unsigned getPathLength() const { return VisitStack.size(); }
+
+ /// getPath - Return the n'th node in the path from the entry node to the
+ /// current node.
+ NodeRef getPath(unsigned n) const { return VisitStack[n].first; }
+};
+
+// Provide global constructors that automatically figure out correct types...
+//
+template <class T>
+df_iterator<T> df_begin(const T& G) {
+ return df_iterator<T>::begin(G);
+}
+
+template <class T>
+df_iterator<T> df_end(const T& G) {
+ return df_iterator<T>::end(G);
+}
+
+// Provide an accessor method to use them in range-based patterns.
+template <class T>
+iterator_range<df_iterator<T>> depth_first(const T& G) {
+ return make_range(df_begin(G), df_end(G));
+}
+
+// Provide global definitions of external depth first iterators...
+template <class T, class SetTy = df_iterator_default_set<typename GraphTraits<T>::NodeRef>>
+struct df_ext_iterator : public df_iterator<T, SetTy, true> {
+ df_ext_iterator(const df_iterator<T, SetTy, true> &V)
+ : df_iterator<T, SetTy, true>(V) {}
+};
+
+template <class T, class SetTy>
+df_ext_iterator<T, SetTy> df_ext_begin(const T& G, SetTy &S) {
+ return df_ext_iterator<T, SetTy>::begin(G, S);
+}
+
+template <class T, class SetTy>
+df_ext_iterator<T, SetTy> df_ext_end(const T& G, SetTy &S) {
+ return df_ext_iterator<T, SetTy>::end(G, S);
+}
+
+template <class T, class SetTy>
+iterator_range<df_ext_iterator<T, SetTy>> depth_first_ext(const T& G,
+ SetTy &S) {
+ return make_range(df_ext_begin(G, S), df_ext_end(G, S));
+}
+
+// Provide global definitions of inverse depth first iterators...
+template <class T,
+ class SetTy =
+ df_iterator_default_set<typename GraphTraits<T>::NodeRef>,
+ bool External = false>
+struct idf_iterator : public df_iterator<Inverse<T>, SetTy, External> {
+ idf_iterator(const df_iterator<Inverse<T>, SetTy, External> &V)
+ : df_iterator<Inverse<T>, SetTy, External>(V) {}
+};
+
+template <class T>
+idf_iterator<T> idf_begin(const T& G) {
+ return idf_iterator<T>::begin(Inverse<T>(G));
+}
+
+template <class T>
+idf_iterator<T> idf_end(const T& G){
+ return idf_iterator<T>::end(Inverse<T>(G));
+}
+
+// Provide an accessor method to use them in range-based patterns.
+template <class T>
+iterator_range<idf_iterator<T>> inverse_depth_first(const T& G) {
+ return make_range(idf_begin(G), idf_end(G));
+}
+
+// Provide global definitions of external inverse depth first iterators...
+template <class T, class SetTy = df_iterator_default_set<typename GraphTraits<T>::NodeRef>>
+struct idf_ext_iterator : public idf_iterator<T, SetTy, true> {
+ idf_ext_iterator(const idf_iterator<T, SetTy, true> &V)
+ : idf_iterator<T, SetTy, true>(V) {}
+ idf_ext_iterator(const df_iterator<Inverse<T>, SetTy, true> &V)
+ : idf_iterator<T, SetTy, true>(V) {}
+};
+
+template <class T, class SetTy>
+idf_ext_iterator<T, SetTy> idf_ext_begin(const T& G, SetTy &S) {
+ return idf_ext_iterator<T, SetTy>::begin(Inverse<T>(G), S);
+}
+
+template <class T, class SetTy>
+idf_ext_iterator<T, SetTy> idf_ext_end(const T& G, SetTy &S) {
+ return idf_ext_iterator<T, SetTy>::end(Inverse<T>(G), S);
+}
+
+template <class T, class SetTy>
+iterator_range<idf_ext_iterator<T, SetTy>> inverse_depth_first_ext(const T& G,
+ SetTy &S) {
+ return make_range(idf_ext_begin(G, S), idf_ext_end(G, S));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_DEPTHFIRSTITERATOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/DirectedGraph.h b/contrib/libs/llvm14/include/llvm/ADT/DirectedGraph.h
new file mode 100644
index 0000000000..bc560c4f27
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/DirectedGraph.h
@@ -0,0 +1,291 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/DirectedGraph.h - Directed Graph ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the interface and a base class implementation for a
+/// directed graph.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DIRECTEDGRAPH_H
+#define LLVM_ADT_DIRECTEDGRAPH_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+/// Represent an edge in the directed graph.
+/// The edge contains the target node it connects to.
+template <class NodeType, class EdgeType> class DGEdge {
+public:
+ DGEdge() = delete;
+ /// Create an edge pointing to the given node \p N.
+ explicit DGEdge(NodeType &N) : TargetNode(N) {}
+ explicit DGEdge(const DGEdge<NodeType, EdgeType> &E)
+ : TargetNode(E.TargetNode) {}
+ DGEdge<NodeType, EdgeType> &operator=(const DGEdge<NodeType, EdgeType> &E) {
+ TargetNode = E.TargetNode;
+ return *this;
+ }
+
+ /// Static polymorphism: delegate implementation (via isEqualTo) to the
+ /// derived class.
+ bool operator==(const DGEdge &E) const {
+ return getDerived().isEqualTo(E.getDerived());
+ }
+ bool operator!=(const DGEdge &E) const { return !operator==(E); }
+
+ /// Retrieve the target node this edge connects to.
+ const NodeType &getTargetNode() const { return TargetNode; }
+ NodeType &getTargetNode() {
+ return const_cast<NodeType &>(
+ static_cast<const DGEdge<NodeType, EdgeType> &>(*this).getTargetNode());
+ }
+
+ /// Set the target node this edge connects to.
+ void setTargetNode(const NodeType &N) { TargetNode = N; }
+
+protected:
+ // As the default implementation use address comparison for equality.
+ bool isEqualTo(const EdgeType &E) const { return this == &E; }
+
+ // Cast the 'this' pointer to the derived type and return a reference.
+ EdgeType &getDerived() { return *static_cast<EdgeType *>(this); }
+ const EdgeType &getDerived() const {
+ return *static_cast<const EdgeType *>(this);
+ }
+
+ // The target node this edge connects to.
+ NodeType &TargetNode;
+};
+
+/// Represent a node in the directed graph.
+/// The node has a (possibly empty) list of outgoing edges.
+template <class NodeType, class EdgeType> class DGNode {
+public:
+ using EdgeListTy = SetVector<EdgeType *>;
+ using iterator = typename EdgeListTy::iterator;
+ using const_iterator = typename EdgeListTy::const_iterator;
+
+ /// Create a node with a single outgoing edge \p E.
+ explicit DGNode(EdgeType &E) : Edges() { Edges.insert(&E); }
+ DGNode() = default;
+
+ explicit DGNode(const DGNode<NodeType, EdgeType> &N) : Edges(N.Edges) {}
+ DGNode(DGNode<NodeType, EdgeType> &&N) : Edges(std::move(N.Edges)) {}
+
+ DGNode<NodeType, EdgeType> &operator=(const DGNode<NodeType, EdgeType> &N) {
+ Edges = N.Edges;
+ return *this;
+ }
+ DGNode<NodeType, EdgeType> &operator=(const DGNode<NodeType, EdgeType> &&N) {
+ Edges = std::move(N.Edges);
+ return *this;
+ }
+
+ /// Static polymorphism: delegate implementation (via isEqualTo) to the
+ /// derived class.
+ friend bool operator==(const NodeType &M, const NodeType &N) {
+ return M.isEqualTo(N);
+ }
+ friend bool operator!=(const NodeType &M, const NodeType &N) {
+ return !(M == N);
+ }
+
+ const_iterator begin() const { return Edges.begin(); }
+ const_iterator end() const { return Edges.end(); }
+ iterator begin() { return Edges.begin(); }
+ iterator end() { return Edges.end(); }
+ const EdgeType &front() const { return *Edges.front(); }
+ EdgeType &front() { return *Edges.front(); }
+ const EdgeType &back() const { return *Edges.back(); }
+ EdgeType &back() { return *Edges.back(); }
+
+ /// Collect in \p EL, all the edges from this node to \p N.
+ /// Return true if at least one edge was found, and false otherwise.
+ /// Note that this implementation allows more than one edge to connect
+ /// a given pair of nodes.
+ bool findEdgesTo(const NodeType &N, SmallVectorImpl<EdgeType *> &EL) const {
+ assert(EL.empty() && "Expected the list of edges to be empty.");
+ for (auto *E : Edges)
+ if (E->getTargetNode() == N)
+ EL.push_back(E);
+ return !EL.empty();
+ }
+
+ /// Add the given edge \p E to this node, if it doesn't exist already. Returns
+ /// true if the edge is added and false otherwise.
+ bool addEdge(EdgeType &E) { return Edges.insert(&E); }
+
+ /// Remove the given edge \p E from this node, if it exists.
+ void removeEdge(EdgeType &E) { Edges.remove(&E); }
+
+ /// Test whether there is an edge that goes from this node to \p N.
+ bool hasEdgeTo(const NodeType &N) const {
+ return (findEdgeTo(N) != Edges.end());
+ }
+
+ /// Retrieve the outgoing edges for the node.
+ const EdgeListTy &getEdges() const { return Edges; }
+ EdgeListTy &getEdges() {
+ return const_cast<EdgeListTy &>(
+ static_cast<const DGNode<NodeType, EdgeType> &>(*this).Edges);
+ }
+
+ /// Clear the outgoing edges.
+ void clear() { Edges.clear(); }
+
+protected:
+ // As the default implementation use address comparison for equality.
+ bool isEqualTo(const NodeType &N) const { return this == &N; }
+
+ // Cast the 'this' pointer to the derived type and return a reference.
+ NodeType &getDerived() { return *static_cast<NodeType *>(this); }
+ const NodeType &getDerived() const {
+ return *static_cast<const NodeType *>(this);
+ }
+
+ /// Find an edge to \p N. If more than one edge exists, this will return
+ /// the first one in the list of edges.
+ const_iterator findEdgeTo(const NodeType &N) const {
+ return llvm::find_if(
+ Edges, [&N](const EdgeType *E) { return E->getTargetNode() == N; });
+ }
+
+ // The list of outgoing edges.
+ EdgeListTy Edges;
+};
+
+/// Directed graph
+///
+/// The graph is represented by a table of nodes.
+/// Each node contains a (possibly empty) list of outgoing edges.
+/// Each edge contains the target node it connects to.
+template <class NodeType, class EdgeType> class DirectedGraph {
+protected:
+ using NodeListTy = SmallVector<NodeType *, 10>;
+ using EdgeListTy = SmallVector<EdgeType *, 10>;
+public:
+ using iterator = typename NodeListTy::iterator;
+ using const_iterator = typename NodeListTy::const_iterator;
+ using DGraphType = DirectedGraph<NodeType, EdgeType>;
+
+ DirectedGraph() = default;
+ explicit DirectedGraph(NodeType &N) : Nodes() { addNode(N); }
+ DirectedGraph(const DGraphType &G) : Nodes(G.Nodes) {}
+ DirectedGraph(DGraphType &&RHS) : Nodes(std::move(RHS.Nodes)) {}
+ DGraphType &operator=(const DGraphType &G) {
+ Nodes = G.Nodes;
+ return *this;
+ }
+ DGraphType &operator=(const DGraphType &&G) {
+ Nodes = std::move(G.Nodes);
+ return *this;
+ }
+
+ const_iterator begin() const { return Nodes.begin(); }
+ const_iterator end() const { return Nodes.end(); }
+ iterator begin() { return Nodes.begin(); }
+ iterator end() { return Nodes.end(); }
+ const NodeType &front() const { return *Nodes.front(); }
+ NodeType &front() { return *Nodes.front(); }
+ const NodeType &back() const { return *Nodes.back(); }
+ NodeType &back() { return *Nodes.back(); }
+
+ size_t size() const { return Nodes.size(); }
+
+ /// Find the given node \p N in the table.
+ const_iterator findNode(const NodeType &N) const {
+ return llvm::find_if(Nodes,
+ [&N](const NodeType *Node) { return *Node == N; });
+ }
+ iterator findNode(const NodeType &N) {
+ return const_cast<iterator>(
+ static_cast<const DGraphType &>(*this).findNode(N));
+ }
+
+ /// Add the given node \p N to the graph if it is not already present.
+ bool addNode(NodeType &N) {
+ if (findNode(N) != Nodes.end())
+ return false;
+ Nodes.push_back(&N);
+ return true;
+ }
+
+ /// Collect in \p EL all edges that are coming into node \p N. Return true
+ /// if at least one edge was found, and false otherwise.
+ bool findIncomingEdgesToNode(const NodeType &N, SmallVectorImpl<EdgeType*> &EL) const {
+ assert(EL.empty() && "Expected the list of edges to be empty.");
+ EdgeListTy TempList;
+ for (auto *Node : Nodes) {
+ if (*Node == N)
+ continue;
+ Node->findEdgesTo(N, TempList);
+ llvm::append_range(EL, TempList);
+ TempList.clear();
+ }
+ return !EL.empty();
+ }
+
+ /// Remove the given node \p N from the graph. If the node has incoming or
+ /// outgoing edges, they are also removed. Return true if the node was found
+ /// and then removed, and false if the node was not found in the graph to
+ /// begin with.
+ bool removeNode(NodeType &N) {
+ iterator IT = findNode(N);
+ if (IT == Nodes.end())
+ return false;
+ // Remove incoming edges.
+ EdgeListTy EL;
+ for (auto *Node : Nodes) {
+ if (*Node == N)
+ continue;
+ Node->findEdgesTo(N, EL);
+ for (auto *E : EL)
+ Node->removeEdge(*E);
+ EL.clear();
+ }
+ N.clear();
+ Nodes.erase(IT);
+ return true;
+ }
+
+ /// Assuming nodes \p Src and \p Dst are already in the graph, connect node \p
+ /// Src to node \p Dst using the provided edge \p E. Return true if \p Src is
+ /// not already connected to \p Dst via \p E, and false otherwise.
+ bool connect(NodeType &Src, NodeType &Dst, EdgeType &E) {
+ assert(findNode(Src) != Nodes.end() && "Src node should be present.");
+ assert(findNode(Dst) != Nodes.end() && "Dst node should be present.");
+ assert((E.getTargetNode() == Dst) &&
+ "Target of the given edge does not match Dst.");
+ return Src.addEdge(E);
+ }
+
+protected:
+ // The list of nodes in the graph.
+ NodeListTy Nodes;
+};
+
+} // namespace llvm
+
+#endif // LLVM_ADT_DIRECTEDGRAPH_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/EnumeratedArray.h b/contrib/libs/llvm14/include/llvm/ADT/EnumeratedArray.h
new file mode 100644
index 0000000000..ec637168c1
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/EnumeratedArray.h
@@ -0,0 +1,62 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/EnumeratedArray.h - Enumerated Array-------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines an array type that can be indexed using scoped enum
+/// values.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ENUMERATEDARRAY_H
+#define LLVM_ADT_ENUMERATEDARRAY_H
+
+#include <cassert>
+
+namespace llvm {
+
+template <typename ValueType, typename Enumeration,
+ Enumeration LargestEnum = Enumeration::Last, typename IndexType = int,
+ IndexType Size = 1 + static_cast<IndexType>(LargestEnum)>
+class EnumeratedArray {
+public:
+ EnumeratedArray() = default;
+ EnumeratedArray(ValueType V) {
+ for (IndexType IX = 0; IX < Size; ++IX) {
+ Underlying[IX] = V;
+ }
+ }
+ inline const ValueType &operator[](const Enumeration Index) const {
+ auto IX = static_cast<const IndexType>(Index);
+ assert(IX >= 0 && IX < Size && "Index is out of bounds.");
+ return Underlying[IX];
+ }
+ inline ValueType &operator[](const Enumeration Index) {
+ return const_cast<ValueType &>(
+ static_cast<const EnumeratedArray<ValueType, Enumeration, LargestEnum,
+ IndexType, Size> &>(*this)[Index]);
+ }
+ inline IndexType size() { return Size; }
+
+private:
+ ValueType Underlying[Size];
+};
+
+} // namespace llvm
+
+#endif // LLVM_ADT_ENUMERATEDARRAY_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/EpochTracker.h b/contrib/libs/llvm14/include/llvm/ADT/EpochTracker.h
new file mode 100644
index 0000000000..21eb21cf5d
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/EpochTracker.h
@@ -0,0 +1,110 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/EpochTracker.h - ADT epoch tracking --------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the DebugEpochBase and DebugEpochBase::HandleBase classes.
+/// These can be used to write iterators that are fail-fast when LLVM is built
+/// with asserts enabled.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_EPOCHTRACKER_H
+#define LLVM_ADT_EPOCHTRACKER_H
+
+#include "llvm/Config/abi-breaking.h"
+
+#include <cstdint>
+
+namespace llvm {
+
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+
+/// A base class for data structure classes wishing to make iterators
+/// ("handles") pointing into themselves fail-fast. When building without
+/// asserts, this class is empty and does nothing.
+///
+/// DebugEpochBase does not by itself track handles pointing into itself. The
+/// expectation is that routines touching the handles will poll on
+/// isHandleInSync at appropriate points to assert that the handle they're using
+/// is still valid.
+///
+class DebugEpochBase {
+ uint64_t Epoch;
+
+public:
+ DebugEpochBase() : Epoch(0) {}
+
+ /// Calling incrementEpoch invalidates all handles pointing into the
+ /// calling instance.
+ void incrementEpoch() { ++Epoch; }
+
+ /// The destructor calls incrementEpoch to make use-after-free bugs
+ /// more likely to crash deterministically.
+ ~DebugEpochBase() { incrementEpoch(); }
+
+ /// A base class for iterator classes ("handles") that wish to poll for
+ /// iterator invalidating modifications in the underlying data structure.
+ /// When LLVM is built without asserts, this class is empty and does nothing.
+ ///
+ /// HandleBase does not track the parent data structure by itself. It expects
+ /// the routines modifying the data structure to call incrementEpoch when they
+ /// make an iterator-invalidating modification.
+ ///
+ class HandleBase {
+ const uint64_t *EpochAddress;
+ uint64_t EpochAtCreation;
+
+ public:
+ HandleBase() : EpochAddress(nullptr), EpochAtCreation(UINT64_MAX) {}
+
+ explicit HandleBase(const DebugEpochBase *Parent)
+ : EpochAddress(&Parent->Epoch), EpochAtCreation(Parent->Epoch) {}
+
+ /// Returns true if the DebugEpochBase this Handle is linked to has
+ /// not called incrementEpoch on itself since the creation of this
+ /// HandleBase instance.
+ bool isHandleInSync() const { return *EpochAddress == EpochAtCreation; }
+
+ /// Returns a pointer to the epoch word stored in the data structure
+ /// this handle points into. Can be used to check if two iterators point
+ /// into the same data structure.
+ const void *getEpochAddress() const { return EpochAddress; }
+ };
+};
+
+#else
+
+class DebugEpochBase {
+public:
+ void incrementEpoch() {}
+
+ class HandleBase {
+ public:
+ HandleBase() = default;
+ explicit HandleBase(const DebugEpochBase *) {}
+ bool isHandleInSync() const { return true; }
+ const void *getEpochAddress() const { return nullptr; }
+ };
+};
+
+#endif // LLVM_ENABLE_ABI_BREAKING_CHECKS
+
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/EquivalenceClasses.h b/contrib/libs/llvm14/include/llvm/ADT/EquivalenceClasses.h
new file mode 100644
index 0000000000..0d05148713
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/EquivalenceClasses.h
@@ -0,0 +1,327 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/EquivalenceClasses.h - Generic Equiv. Classes ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Generic implementation of equivalence classes through the use Tarjan's
+/// efficient union-find algorithm.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_EQUIVALENCECLASSES_H
+#define LLVM_ADT_EQUIVALENCECLASSES_H
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <set>
+
+namespace llvm {
+
+/// EquivalenceClasses - This represents a collection of equivalence classes and
+/// supports three efficient operations: insert an element into a class of its
+/// own, union two classes, and find the class for a given element. In
+/// addition to these modification methods, it is possible to iterate over all
+/// of the equivalence classes and all of the elements in a class.
+///
+/// This implementation is an efficient implementation that only stores one copy
+/// of the element being indexed per entry in the set, and allows any arbitrary
+/// type to be indexed (as long as it can be ordered with operator< or a
+/// comparator is provided).
+///
+/// Here is a simple example using integers:
+///
+/// \code
+/// EquivalenceClasses<int> EC;
+/// EC.unionSets(1, 2); // insert 1, 2 into the same set
+/// EC.insert(4); EC.insert(5); // insert 4, 5 into own sets
+/// EC.unionSets(5, 1); // merge the set for 1 with 5's set.
+///
+/// for (EquivalenceClasses<int>::iterator I = EC.begin(), E = EC.end();
+/// I != E; ++I) { // Iterate over all of the equivalence sets.
+/// if (!I->isLeader()) continue; // Ignore non-leader sets.
+/// for (EquivalenceClasses<int>::member_iterator MI = EC.member_begin(I);
+/// MI != EC.member_end(); ++MI) // Loop over members in this set.
+/// cerr << *MI << " "; // Print member.
+/// cerr << "\n"; // Finish set.
+/// }
+/// \endcode
+///
+/// This example prints:
+/// 4
+/// 5 1 2
+///
+template <class ElemTy, class Compare = std::less<ElemTy>>
+class EquivalenceClasses {
+ /// ECValue - The EquivalenceClasses data structure is just a set of these.
+ /// Each of these represents a relation for a value. First it stores the
+ /// value itself, which provides the ordering that the set queries. Next, it
+ /// provides a "next pointer", which is used to enumerate all of the elements
+ /// in the unioned set. Finally, it defines either a "end of list pointer" or
+ /// "leader pointer" depending on whether the value itself is a leader. A
+ /// "leader pointer" points to the node that is the leader for this element,
+ /// if the node is not a leader. A "end of list pointer" points to the last
+ /// node in the list of members of this list. Whether or not a node is a
+ /// leader is determined by a bit stolen from one of the pointers.
+ class ECValue {
+ friend class EquivalenceClasses;
+
+ mutable const ECValue *Leader, *Next;
+ ElemTy Data;
+
+ // ECValue ctor - Start out with EndOfList pointing to this node, Next is
+ // Null, isLeader = true.
+ ECValue(const ElemTy &Elt)
+ : Leader(this), Next((ECValue*)(intptr_t)1), Data(Elt) {}
+
+ const ECValue *getLeader() const {
+ if (isLeader()) return this;
+ if (Leader->isLeader()) return Leader;
+ // Path compression.
+ return Leader = Leader->getLeader();
+ }
+
+ const ECValue *getEndOfList() const {
+ assert(isLeader() && "Cannot get the end of a list for a non-leader!");
+ return Leader;
+ }
+
+ void setNext(const ECValue *NewNext) const {
+ assert(getNext() == nullptr && "Already has a next pointer!");
+ Next = (const ECValue*)((intptr_t)NewNext | (intptr_t)isLeader());
+ }
+
+ public:
+ ECValue(const ECValue &RHS) : Leader(this), Next((ECValue*)(intptr_t)1),
+ Data(RHS.Data) {
+ // Only support copying of singleton nodes.
+ assert(RHS.isLeader() && RHS.getNext() == nullptr && "Not a singleton!");
+ }
+
+ bool isLeader() const { return (intptr_t)Next & 1; }
+ const ElemTy &getData() const { return Data; }
+
+ const ECValue *getNext() const {
+ return (ECValue*)((intptr_t)Next & ~(intptr_t)1);
+ }
+ };
+
+ /// A wrapper of the comparator, to be passed to the set.
+ struct ECValueComparator {
+ using is_transparent = void;
+
+ ECValueComparator() : compare(Compare()) {}
+
+ bool operator()(const ECValue &lhs, const ECValue &rhs) const {
+ return compare(lhs.Data, rhs.Data);
+ }
+
+ template <typename T>
+ bool operator()(const T &lhs, const ECValue &rhs) const {
+ return compare(lhs, rhs.Data);
+ }
+
+ template <typename T>
+ bool operator()(const ECValue &lhs, const T &rhs) const {
+ return compare(lhs.Data, rhs);
+ }
+
+ const Compare compare;
+ };
+
+ /// TheMapping - This implicitly provides a mapping from ElemTy values to the
+ /// ECValues, it just keeps the key as part of the value.
+ std::set<ECValue, ECValueComparator> TheMapping;
+
+public:
+ EquivalenceClasses() = default;
+ EquivalenceClasses(const EquivalenceClasses &RHS) {
+ operator=(RHS);
+ }
+
+ const EquivalenceClasses &operator=(const EquivalenceClasses &RHS) {
+ TheMapping.clear();
+ for (iterator I = RHS.begin(), E = RHS.end(); I != E; ++I)
+ if (I->isLeader()) {
+ member_iterator MI = RHS.member_begin(I);
+ member_iterator LeaderIt = member_begin(insert(*MI));
+ for (++MI; MI != member_end(); ++MI)
+ unionSets(LeaderIt, member_begin(insert(*MI)));
+ }
+ return *this;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Inspection methods
+ //
+
+ /// iterator* - Provides a way to iterate over all values in the set.
+ using iterator = typename std::set<ECValue>::const_iterator;
+
+ iterator begin() const { return TheMapping.begin(); }
+ iterator end() const { return TheMapping.end(); }
+
+ bool empty() const { return TheMapping.empty(); }
+
+ /// member_* Iterate over the members of an equivalence class.
+ class member_iterator;
+ member_iterator member_begin(iterator I) const {
+ // Only leaders provide anything to iterate over.
+ return member_iterator(I->isLeader() ? &*I : nullptr);
+ }
+ member_iterator member_end() const {
+ return member_iterator(nullptr);
+ }
+
+ /// findValue - Return an iterator to the specified value. If it does not
+ /// exist, end() is returned.
+ iterator findValue(const ElemTy &V) const {
+ return TheMapping.find(V);
+ }
+
+ /// getLeaderValue - Return the leader for the specified value that is in the
+ /// set. It is an error to call this method for a value that is not yet in
+ /// the set. For that, call getOrInsertLeaderValue(V).
+ const ElemTy &getLeaderValue(const ElemTy &V) const {
+ member_iterator MI = findLeader(V);
+ assert(MI != member_end() && "Value is not in the set!");
+ return *MI;
+ }
+
+ /// getOrInsertLeaderValue - Return the leader for the specified value that is
+ /// in the set. If the member is not in the set, it is inserted, then
+ /// returned.
+ const ElemTy &getOrInsertLeaderValue(const ElemTy &V) {
+ member_iterator MI = findLeader(insert(V));
+ assert(MI != member_end() && "Value is not in the set!");
+ return *MI;
+ }
+
+ /// getNumClasses - Return the number of equivalence classes in this set.
+ /// Note that this is a linear time operation.
+ unsigned getNumClasses() const {
+ unsigned NC = 0;
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ if (I->isLeader()) ++NC;
+ return NC;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Mutation methods
+
+ /// insert - Insert a new value into the union/find set, ignoring the request
+ /// if the value already exists.
+ iterator insert(const ElemTy &Data) {
+ return TheMapping.insert(ECValue(Data)).first;
+ }
+
+ /// findLeader - Given a value in the set, return a member iterator for the
+ /// equivalence class it is in. This does the path-compression part that
+ /// makes union-find "union findy". This returns an end iterator if the value
+ /// is not in the equivalence class.
+ member_iterator findLeader(iterator I) const {
+ if (I == TheMapping.end()) return member_end();
+ return member_iterator(I->getLeader());
+ }
+ member_iterator findLeader(const ElemTy &V) const {
+ return findLeader(TheMapping.find(V));
+ }
+
+ /// union - Merge the two equivalence sets for the specified values, inserting
+ /// them if they do not already exist in the equivalence set.
+ member_iterator unionSets(const ElemTy &V1, const ElemTy &V2) {
+ iterator V1I = insert(V1), V2I = insert(V2);
+ return unionSets(findLeader(V1I), findLeader(V2I));
+ }
+ member_iterator unionSets(member_iterator L1, member_iterator L2) {
+ assert(L1 != member_end() && L2 != member_end() && "Illegal inputs!");
+ if (L1 == L2) return L1; // Unifying the same two sets, noop.
+
+ // Otherwise, this is a real union operation. Set the end of the L1 list to
+ // point to the L2 leader node.
+ const ECValue &L1LV = *L1.Node, &L2LV = *L2.Node;
+ L1LV.getEndOfList()->setNext(&L2LV);
+
+ // Update L1LV's end of list pointer.
+ L1LV.Leader = L2LV.getEndOfList();
+
+ // Clear L2's leader flag:
+ L2LV.Next = L2LV.getNext();
+
+ // L2's leader is now L1.
+ L2LV.Leader = &L1LV;
+ return L1;
+ }
+
+ // isEquivalent - Return true if V1 is equivalent to V2. This can happen if
+ // V1 is equal to V2 or if they belong to one equivalence class.
+ bool isEquivalent(const ElemTy &V1, const ElemTy &V2) const {
+ // Fast path: any element is equivalent to itself.
+ if (V1 == V2)
+ return true;
+ auto It = findLeader(V1);
+ return It != member_end() && It == findLeader(V2);
+ }
+
+ class member_iterator {
+ friend class EquivalenceClasses;
+
+ const ECValue *Node;
+
+ public:
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = const ElemTy;
+ using size_type = std::size_t;
+ using difference_type = std::ptrdiff_t;
+ using pointer = value_type *;
+ using reference = value_type &;
+
+ explicit member_iterator() = default;
+ explicit member_iterator(const ECValue *N) : Node(N) {}
+
+ reference operator*() const {
+ assert(Node != nullptr && "Dereferencing end()!");
+ return Node->getData();
+ }
+ pointer operator->() const { return &operator*(); }
+
+ member_iterator &operator++() {
+ assert(Node != nullptr && "++'d off the end of the list!");
+ Node = Node->getNext();
+ return *this;
+ }
+
+ member_iterator operator++(int) { // postincrement operators.
+ member_iterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ bool operator==(const member_iterator &RHS) const {
+ return Node == RHS.Node;
+ }
+ bool operator!=(const member_iterator &RHS) const {
+ return Node != RHS.Node;
+ }
+ };
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_EQUIVALENCECLASSES_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/FloatingPointMode.h b/contrib/libs/llvm14/include/llvm/ADT/FloatingPointMode.h
new file mode 100644
index 0000000000..7f33ca09b8
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/FloatingPointMode.h
@@ -0,0 +1,207 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/Support/FloatingPointMode.h -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Utilities for dealing with flags related to floating point mode controls.
+///
+//===----------------------------------------------------------------------===/
+
+#ifndef LLVM_ADT_FLOATINGPOINTMODE_H
+#define LLVM_ADT_FLOATINGPOINTMODE_H
+
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+/// Rounding mode.
+///
+/// Enumerates supported rounding modes, as well as some special values. The set
+/// of the modes must agree with IEEE-754, 4.3.1 and 4.3.2. The constants
+/// assigned to the IEEE rounding modes must agree with the values used by
+/// FLT_ROUNDS (C11, 5.2.4.2.2p8).
+///
+/// This value is packed into bitfield in some cases, including \c FPOptions, so
+/// the rounding mode values and the special value \c Dynamic must fit into the
+/// the bit field (now - 3 bits). The value \c Invalid is used only in values
+/// returned by intrinsics to indicate errors, it should never be stored as
+/// rounding mode value, so it does not need to fit the bit fields.
+///
+enum class RoundingMode : int8_t {
+ // Rounding mode defined in IEEE-754.
+ TowardZero = 0, ///< roundTowardZero.
+ NearestTiesToEven = 1, ///< roundTiesToEven.
+ TowardPositive = 2, ///< roundTowardPositive.
+ TowardNegative = 3, ///< roundTowardNegative.
+ NearestTiesToAway = 4, ///< roundTiesToAway.
+
+ // Special values.
+ Dynamic = 7, ///< Denotes mode unknown at compile time.
+ Invalid = -1 ///< Denotes invalid value.
+};
+
+/// Returns text representation of the given rounding mode.
+inline StringRef spell(RoundingMode RM) {
+ switch (RM) {
+ case RoundingMode::TowardZero: return "towardzero";
+ case RoundingMode::NearestTiesToEven: return "tonearest";
+ case RoundingMode::TowardPositive: return "upward";
+ case RoundingMode::TowardNegative: return "downward";
+ case RoundingMode::NearestTiesToAway: return "tonearestaway";
+ case RoundingMode::Dynamic: return "dynamic";
+ default: return "invalid";
+ }
+}
+
+inline raw_ostream &operator << (raw_ostream &OS, RoundingMode RM) {
+ OS << spell(RM);
+ return OS;
+}
+
+/// Represent subnormal handling kind for floating point instruction inputs and
+/// outputs.
+struct DenormalMode {
+ /// Represent handled modes for denormal (aka subnormal) modes in the floating
+ /// point environment.
+ enum DenormalModeKind : int8_t {
+ Invalid = -1,
+
+ /// IEEE-754 denormal numbers preserved.
+ IEEE,
+
+ /// The sign of a flushed-to-zero number is preserved in the sign of 0
+ PreserveSign,
+
+ /// Denormals are flushed to positive zero.
+ PositiveZero
+ };
+
+ /// Denormal flushing mode for floating point instruction results in the
+ /// default floating point environment.
+ DenormalModeKind Output = DenormalModeKind::Invalid;
+
+ /// Denormal treatment kind for floating point instruction inputs in the
+ /// default floating-point environment. If this is not DenormalModeKind::IEEE,
+ /// floating-point instructions implicitly treat the input value as 0.
+ DenormalModeKind Input = DenormalModeKind::Invalid;
+
+ constexpr DenormalMode() = default;
+ constexpr DenormalMode(DenormalModeKind Out, DenormalModeKind In) :
+ Output(Out), Input(In) {}
+
+
+ static constexpr DenormalMode getInvalid() {
+ return DenormalMode(DenormalModeKind::Invalid, DenormalModeKind::Invalid);
+ }
+
+ static constexpr DenormalMode getIEEE() {
+ return DenormalMode(DenormalModeKind::IEEE, DenormalModeKind::IEEE);
+ }
+
+ static constexpr DenormalMode getPreserveSign() {
+ return DenormalMode(DenormalModeKind::PreserveSign,
+ DenormalModeKind::PreserveSign);
+ }
+
+ static constexpr DenormalMode getPositiveZero() {
+ return DenormalMode(DenormalModeKind::PositiveZero,
+ DenormalModeKind::PositiveZero);
+ }
+
+ bool operator==(DenormalMode Other) const {
+ return Output == Other.Output && Input == Other.Input;
+ }
+
+ bool operator!=(DenormalMode Other) const {
+ return !(*this == Other);
+ }
+
+ bool isSimple() const {
+ return Input == Output;
+ }
+
+ bool isValid() const {
+ return Output != DenormalModeKind::Invalid &&
+ Input != DenormalModeKind::Invalid;
+ }
+
+ inline void print(raw_ostream &OS) const;
+
+ inline std::string str() const {
+ std::string storage;
+ raw_string_ostream OS(storage);
+ print(OS);
+ return OS.str();
+ }
+};
+
+inline raw_ostream& operator<<(raw_ostream &OS, DenormalMode Mode) {
+ Mode.print(OS);
+ return OS;
+}
+
+/// Parse the expected names from the denormal-fp-math attribute.
+inline DenormalMode::DenormalModeKind
+parseDenormalFPAttributeComponent(StringRef Str) {
+ // Assume ieee on unspecified attribute.
+ return StringSwitch<DenormalMode::DenormalModeKind>(Str)
+ .Cases("", "ieee", DenormalMode::IEEE)
+ .Case("preserve-sign", DenormalMode::PreserveSign)
+ .Case("positive-zero", DenormalMode::PositiveZero)
+ .Default(DenormalMode::Invalid);
+}
+
+/// Return the name used for the denormal handling mode used by the the
+/// expected names from the denormal-fp-math attribute.
+inline StringRef denormalModeKindName(DenormalMode::DenormalModeKind Mode) {
+ switch (Mode) {
+ case DenormalMode::IEEE:
+ return "ieee";
+ case DenormalMode::PreserveSign:
+ return "preserve-sign";
+ case DenormalMode::PositiveZero:
+ return "positive-zero";
+ default:
+ return "";
+ }
+}
+
+/// Returns the denormal mode to use for inputs and outputs.
+inline DenormalMode parseDenormalFPAttribute(StringRef Str) {
+ StringRef OutputStr, InputStr;
+ std::tie(OutputStr, InputStr) = Str.split(',');
+
+ DenormalMode Mode;
+ Mode.Output = parseDenormalFPAttributeComponent(OutputStr);
+
+ // Maintain compatability with old form of the attribute which only specified
+ // one component.
+ Mode.Input = InputStr.empty() ? Mode.Output :
+ parseDenormalFPAttributeComponent(InputStr);
+
+ return Mode;
+}
+
+void DenormalMode::print(raw_ostream &OS) const {
+ OS << denormalModeKindName(Output) << ',' << denormalModeKindName(Input);
+}
+
+}
+
+#endif // LLVM_ADT_FLOATINGPOINTMODE_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/FoldingSet.h b/contrib/libs/llvm14/include/llvm/ADT/FoldingSet.h
new file mode 100644
index 0000000000..37e2ce98a9
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/FoldingSet.h
@@ -0,0 +1,819 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/FoldingSet.h - Uniquing Hash Set ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines a hash set that can be used to remove duplication of nodes
+/// in a graph. This code was originally created by Chris Lattner for use with
+/// SelectionDAGCSEMap, but was isolated to provide use across the llvm code
+/// set.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_FOLDINGSET_H
+#define LLVM_ADT_FOLDINGSET_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/Support/Allocator.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <utility>
+
+namespace llvm {
+
+/// This folding set used for two purposes:
+/// 1. Given information about a node we want to create, look up the unique
+/// instance of the node in the set. If the node already exists, return
+/// it, otherwise return the bucket it should be inserted into.
+/// 2. Given a node that has already been created, remove it from the set.
+///
+/// This class is implemented as a single-link chained hash table, where the
+/// "buckets" are actually the nodes themselves (the next pointer is in the
+/// node). The last node points back to the bucket to simplify node removal.
+///
+/// Any node that is to be included in the folding set must be a subclass of
+/// FoldingSetNode. The node class must also define a Profile method used to
+/// establish the unique bits of data for the node. The Profile method is
+/// passed a FoldingSetNodeID object which is used to gather the bits. Just
+/// call one of the Add* functions defined in the FoldingSetBase::NodeID class.
+/// NOTE: That the folding set does not own the nodes and it is the
+/// responsibility of the user to dispose of the nodes.
+///
+/// Eg.
+/// class MyNode : public FoldingSetNode {
+/// private:
+/// std::string Name;
+/// unsigned Value;
+/// public:
+/// MyNode(const char *N, unsigned V) : Name(N), Value(V) {}
+/// ...
+/// void Profile(FoldingSetNodeID &ID) const {
+/// ID.AddString(Name);
+/// ID.AddInteger(Value);
+/// }
+/// ...
+/// };
+///
+/// To define the folding set itself use the FoldingSet template;
+///
+/// Eg.
+/// FoldingSet<MyNode> MyFoldingSet;
+///
+/// Four public methods are available to manipulate the folding set;
+///
+/// 1) If you have an existing node that you want add to the set but unsure
+/// that the node might already exist then call;
+///
+/// MyNode *M = MyFoldingSet.GetOrInsertNode(N);
+///
+/// If The result is equal to the input then the node has been inserted.
+/// Otherwise, the result is the node existing in the folding set, and the
+/// input can be discarded (use the result instead.)
+///
+/// 2) If you are ready to construct a node but want to check if it already
+/// exists, then call FindNodeOrInsertPos with a FoldingSetNodeID of the bits to
+/// check;
+///
+/// FoldingSetNodeID ID;
+/// ID.AddString(Name);
+/// ID.AddInteger(Value);
+/// void *InsertPoint;
+///
+/// MyNode *M = MyFoldingSet.FindNodeOrInsertPos(ID, InsertPoint);
+///
+/// If found then M will be non-NULL, else InsertPoint will point to where it
+/// should be inserted using InsertNode.
+///
+/// 3) If you get a NULL result from FindNodeOrInsertPos then you can insert a
+/// new node with InsertNode;
+///
+/// MyFoldingSet.InsertNode(M, InsertPoint);
+///
+/// 4) Finally, if you want to remove a node from the folding set call;
+///
+/// bool WasRemoved = MyFoldingSet.RemoveNode(M);
+///
+/// The result indicates whether the node existed in the folding set.
+
+class FoldingSetNodeID;
+class StringRef;
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetBase - Implements the folding set functionality. The main
+/// structure is an array of buckets. Each bucket is indexed by the hash of
+/// the nodes it contains. The bucket itself points to the nodes contained
+/// in the bucket via a singly linked list. The last node in the list points
+/// back to the bucket to facilitate node removal.
+///
+class FoldingSetBase {
+protected:
+ /// Buckets - Array of bucket chains.
+ void **Buckets;
+
+ /// NumBuckets - Length of the Buckets array. Always a power of 2.
+ unsigned NumBuckets;
+
+ /// NumNodes - Number of nodes in the folding set. Growth occurs when NumNodes
+ /// is greater than twice the number of buckets.
+ unsigned NumNodes;
+
+ explicit FoldingSetBase(unsigned Log2InitSize = 6);
+ FoldingSetBase(FoldingSetBase &&Arg);
+ FoldingSetBase &operator=(FoldingSetBase &&RHS);
+ ~FoldingSetBase();
+
+public:
+ //===--------------------------------------------------------------------===//
+ /// Node - This class is used to maintain the singly linked bucket list in
+ /// a folding set.
+ class Node {
+ private:
+ // NextInFoldingSetBucket - next link in the bucket list.
+ void *NextInFoldingSetBucket = nullptr;
+
+ public:
+ Node() = default;
+
+ // Accessors
+ void *getNextInBucket() const { return NextInFoldingSetBucket; }
+ void SetNextInBucket(void *N) { NextInFoldingSetBucket = N; }
+ };
+
+ /// clear - Remove all nodes from the folding set.
+ void clear();
+
+ /// size - Returns the number of nodes in the folding set.
+ unsigned size() const { return NumNodes; }
+
+ /// empty - Returns true if there are no nodes in the folding set.
+ bool empty() const { return NumNodes == 0; }
+
+ /// capacity - Returns the number of nodes permitted in the folding set
+ /// before a rebucket operation is performed.
+ unsigned capacity() {
+ // We allow a load factor of up to 2.0,
+ // so that means our capacity is NumBuckets * 2
+ return NumBuckets * 2;
+ }
+
+protected:
+ /// Functions provided by the derived class to compute folding properties.
+ /// This is effectively a vtable for FoldingSetBase, except that we don't
+ /// actually store a pointer to it in the object.
+ struct FoldingSetInfo {
+ /// GetNodeProfile - Instantiations of the FoldingSet template implement
+ /// this function to gather data bits for the given node.
+ void (*GetNodeProfile)(const FoldingSetBase *Self, Node *N,
+ FoldingSetNodeID &ID);
+
+ /// NodeEquals - Instantiations of the FoldingSet template implement
+ /// this function to compare the given node with the given ID.
+ bool (*NodeEquals)(const FoldingSetBase *Self, Node *N,
+ const FoldingSetNodeID &ID, unsigned IDHash,
+ FoldingSetNodeID &TempID);
+
+ /// ComputeNodeHash - Instantiations of the FoldingSet template implement
+ /// this function to compute a hash value for the given node.
+ unsigned (*ComputeNodeHash)(const FoldingSetBase *Self, Node *N,
+ FoldingSetNodeID &TempID);
+ };
+
+private:
+ /// GrowHashTable - Double the size of the hash table and rehash everything.
+ void GrowHashTable(const FoldingSetInfo &Info);
+
+ /// GrowBucketCount - resize the hash table and rehash everything.
+ /// NewBucketCount must be a power of two, and must be greater than the old
+ /// bucket count.
+ void GrowBucketCount(unsigned NewBucketCount, const FoldingSetInfo &Info);
+
+protected:
+ // The below methods are protected to encourage subclasses to provide a more
+ // type-safe API.
+
+ /// reserve - Increase the number of buckets such that adding the
+ /// EltCount-th node won't cause a rebucket operation. reserve is permitted
+ /// to allocate more space than requested by EltCount.
+ void reserve(unsigned EltCount, const FoldingSetInfo &Info);
+
+ /// RemoveNode - Remove a node from the folding set, returning true if one
+ /// was removed or false if the node was not in the folding set.
+ bool RemoveNode(Node *N);
+
+ /// GetOrInsertNode - If there is an existing simple Node exactly
+ /// equal to the specified node, return it. Otherwise, insert 'N' and return
+ /// it instead.
+ Node *GetOrInsertNode(Node *N, const FoldingSetInfo &Info);
+
+ /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
+ /// return it. If not, return the insertion token that will make insertion
+ /// faster.
+ Node *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos,
+ const FoldingSetInfo &Info);
+
+ /// InsertNode - Insert the specified node into the folding set, knowing that
+ /// it is not already in the folding set. InsertPos must be obtained from
+ /// FindNodeOrInsertPos.
+ void InsertNode(Node *N, void *InsertPos, const FoldingSetInfo &Info);
+};
+
+//===----------------------------------------------------------------------===//
+
+/// DefaultFoldingSetTrait - This class provides default implementations
+/// for FoldingSetTrait implementations.
+template<typename T> struct DefaultFoldingSetTrait {
+ static void Profile(const T &X, FoldingSetNodeID &ID) {
+ X.Profile(ID);
+ }
+ static void Profile(T &X, FoldingSetNodeID &ID) {
+ X.Profile(ID);
+ }
+
+ // Equals - Test if the profile for X would match ID, using TempID
+ // to compute a temporary ID if necessary. The default implementation
+ // just calls Profile and does a regular comparison. Implementations
+ // can override this to provide more efficient implementations.
+ static inline bool Equals(T &X, const FoldingSetNodeID &ID, unsigned IDHash,
+ FoldingSetNodeID &TempID);
+
+ // ComputeHash - Compute a hash value for X, using TempID to
+ // compute a temporary ID if necessary. The default implementation
+ // just calls Profile and does a regular hash computation.
+ // Implementations can override this to provide more efficient
+ // implementations.
+ static inline unsigned ComputeHash(T &X, FoldingSetNodeID &TempID);
+};
+
+/// FoldingSetTrait - This trait class is used to define behavior of how
+/// to "profile" (in the FoldingSet parlance) an object of a given type.
+/// The default behavior is to invoke a 'Profile' method on an object, but
+/// through template specialization the behavior can be tailored for specific
+/// types. Combined with the FoldingSetNodeWrapper class, one can add objects
+/// to FoldingSets that were not originally designed to have that behavior.
+template<typename T> struct FoldingSetTrait
+ : public DefaultFoldingSetTrait<T> {};
+
+/// DefaultContextualFoldingSetTrait - Like DefaultFoldingSetTrait, but
+/// for ContextualFoldingSets.
+template<typename T, typename Ctx>
+struct DefaultContextualFoldingSetTrait {
+ static void Profile(T &X, FoldingSetNodeID &ID, Ctx Context) {
+ X.Profile(ID, Context);
+ }
+
+ static inline bool Equals(T &X, const FoldingSetNodeID &ID, unsigned IDHash,
+ FoldingSetNodeID &TempID, Ctx Context);
+ static inline unsigned ComputeHash(T &X, FoldingSetNodeID &TempID,
+ Ctx Context);
+};
+
+/// ContextualFoldingSetTrait - Like FoldingSetTrait, but for
+/// ContextualFoldingSets.
+template<typename T, typename Ctx> struct ContextualFoldingSetTrait
+ : public DefaultContextualFoldingSetTrait<T, Ctx> {};
+
+//===--------------------------------------------------------------------===//
+/// FoldingSetNodeIDRef - This class describes a reference to an interned
+/// FoldingSetNodeID, which can be a useful to store node id data rather
+/// than using plain FoldingSetNodeIDs, since the 32-element SmallVector
+/// is often much larger than necessary, and the possibility of heap
+/// allocation means it requires a non-trivial destructor call.
+class FoldingSetNodeIDRef {
+ const unsigned *Data = nullptr;
+ size_t Size = 0;
+
+public:
+ FoldingSetNodeIDRef() = default;
+ FoldingSetNodeIDRef(const unsigned *D, size_t S) : Data(D), Size(S) {}
+
+ /// ComputeHash - Compute a strong hash value for this FoldingSetNodeIDRef,
+ /// used to lookup the node in the FoldingSetBase.
+ unsigned ComputeHash() const;
+
+ bool operator==(FoldingSetNodeIDRef) const;
+
+ bool operator!=(FoldingSetNodeIDRef RHS) const { return !(*this == RHS); }
+
+ /// Used to compare the "ordering" of two nodes as defined by the
+ /// profiled bits and their ordering defined by memcmp().
+ bool operator<(FoldingSetNodeIDRef) const;
+
+ const unsigned *getData() const { return Data; }
+ size_t getSize() const { return Size; }
+};
+
+//===--------------------------------------------------------------------===//
+/// FoldingSetNodeID - This class is used to gather all the unique data bits of
+/// a node. When all the bits are gathered this class is used to produce a
+/// hash value for the node.
+class FoldingSetNodeID {
+ /// Bits - Vector of all the data bits that make the node unique.
+ /// Use a SmallVector to avoid a heap allocation in the common case.
+ SmallVector<unsigned, 32> Bits;
+
+public:
+ FoldingSetNodeID() = default;
+
+ FoldingSetNodeID(FoldingSetNodeIDRef Ref)
+ : Bits(Ref.getData(), Ref.getData() + Ref.getSize()) {}
+
+ /// Add* - Add various data types to Bit data.
+ void AddPointer(const void *Ptr);
+ void AddInteger(signed I);
+ void AddInteger(unsigned I);
+ void AddInteger(long I);
+ void AddInteger(unsigned long I);
+ void AddInteger(long long I);
+ void AddInteger(unsigned long long I);
+ void AddBoolean(bool B) { AddInteger(B ? 1U : 0U); }
+ void AddString(StringRef String);
+ void AddNodeID(const FoldingSetNodeID &ID);
+
+ template <typename T>
+ inline void Add(const T &x) { FoldingSetTrait<T>::Profile(x, *this); }
+
+ /// clear - Clear the accumulated profile, allowing this FoldingSetNodeID
+ /// object to be used to compute a new profile.
+ inline void clear() { Bits.clear(); }
+
+ /// ComputeHash - Compute a strong hash value for this FoldingSetNodeID, used
+ /// to lookup the node in the FoldingSetBase.
+ unsigned ComputeHash() const;
+
+ /// operator== - Used to compare two nodes to each other.
+ bool operator==(const FoldingSetNodeID &RHS) const;
+ bool operator==(const FoldingSetNodeIDRef RHS) const;
+
+ bool operator!=(const FoldingSetNodeID &RHS) const { return !(*this == RHS); }
+ bool operator!=(const FoldingSetNodeIDRef RHS) const { return !(*this ==RHS);}
+
+ /// Used to compare the "ordering" of two nodes as defined by the
+ /// profiled bits and their ordering defined by memcmp().
+ bool operator<(const FoldingSetNodeID &RHS) const;
+ bool operator<(const FoldingSetNodeIDRef RHS) const;
+
+ /// Intern - Copy this node's data to a memory region allocated from the
+ /// given allocator and return a FoldingSetNodeIDRef describing the
+ /// interned data.
+ FoldingSetNodeIDRef Intern(BumpPtrAllocator &Allocator) const;
+};
+
+// Convenience type to hide the implementation of the folding set.
+using FoldingSetNode = FoldingSetBase::Node;
+template<class T> class FoldingSetIterator;
+template<class T> class FoldingSetBucketIterator;
+
+// Definitions of FoldingSetTrait and ContextualFoldingSetTrait functions, which
+// require the definition of FoldingSetNodeID.
+template<typename T>
+inline bool
+DefaultFoldingSetTrait<T>::Equals(T &X, const FoldingSetNodeID &ID,
+ unsigned /*IDHash*/,
+ FoldingSetNodeID &TempID) {
+ FoldingSetTrait<T>::Profile(X, TempID);
+ return TempID == ID;
+}
+template<typename T>
+inline unsigned
+DefaultFoldingSetTrait<T>::ComputeHash(T &X, FoldingSetNodeID &TempID) {
+ FoldingSetTrait<T>::Profile(X, TempID);
+ return TempID.ComputeHash();
+}
+template<typename T, typename Ctx>
+inline bool
+DefaultContextualFoldingSetTrait<T, Ctx>::Equals(T &X,
+ const FoldingSetNodeID &ID,
+ unsigned /*IDHash*/,
+ FoldingSetNodeID &TempID,
+ Ctx Context) {
+ ContextualFoldingSetTrait<T, Ctx>::Profile(X, TempID, Context);
+ return TempID == ID;
+}
+template<typename T, typename Ctx>
+inline unsigned
+DefaultContextualFoldingSetTrait<T, Ctx>::ComputeHash(T &X,
+ FoldingSetNodeID &TempID,
+ Ctx Context) {
+ ContextualFoldingSetTrait<T, Ctx>::Profile(X, TempID, Context);
+ return TempID.ComputeHash();
+}
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetImpl - An implementation detail that lets us share code between
+/// FoldingSet and ContextualFoldingSet.
+template <class Derived, class T> class FoldingSetImpl : public FoldingSetBase {
+protected:
+ explicit FoldingSetImpl(unsigned Log2InitSize)
+ : FoldingSetBase(Log2InitSize) {}
+
+ FoldingSetImpl(FoldingSetImpl &&Arg) = default;
+ FoldingSetImpl &operator=(FoldingSetImpl &&RHS) = default;
+ ~FoldingSetImpl() = default;
+
+public:
+ using iterator = FoldingSetIterator<T>;
+
+ iterator begin() { return iterator(Buckets); }
+ iterator end() { return iterator(Buckets+NumBuckets); }
+
+ using const_iterator = FoldingSetIterator<const T>;
+
+ const_iterator begin() const { return const_iterator(Buckets); }
+ const_iterator end() const { return const_iterator(Buckets+NumBuckets); }
+
+ using bucket_iterator = FoldingSetBucketIterator<T>;
+
+ bucket_iterator bucket_begin(unsigned hash) {
+ return bucket_iterator(Buckets + (hash & (NumBuckets-1)));
+ }
+
+ bucket_iterator bucket_end(unsigned hash) {
+ return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true);
+ }
+
+ /// reserve - Increase the number of buckets such that adding the
+ /// EltCount-th node won't cause a rebucket operation. reserve is permitted
+ /// to allocate more space than requested by EltCount.
+ void reserve(unsigned EltCount) {
+ return FoldingSetBase::reserve(EltCount, Derived::getFoldingSetInfo());
+ }
+
+ /// RemoveNode - Remove a node from the folding set, returning true if one
+ /// was removed or false if the node was not in the folding set.
+ bool RemoveNode(T *N) {
+ return FoldingSetBase::RemoveNode(N);
+ }
+
+ /// GetOrInsertNode - If there is an existing simple Node exactly
+ /// equal to the specified node, return it. Otherwise, insert 'N' and
+ /// return it instead.
+ T *GetOrInsertNode(T *N) {
+ return static_cast<T *>(
+ FoldingSetBase::GetOrInsertNode(N, Derived::getFoldingSetInfo()));
+ }
+
+ /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
+ /// return it. If not, return the insertion token that will make insertion
+ /// faster.
+ T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
+ return static_cast<T *>(FoldingSetBase::FindNodeOrInsertPos(
+ ID, InsertPos, Derived::getFoldingSetInfo()));
+ }
+
+ /// InsertNode - Insert the specified node into the folding set, knowing that
+ /// it is not already in the folding set. InsertPos must be obtained from
+ /// FindNodeOrInsertPos.
+ void InsertNode(T *N, void *InsertPos) {
+ FoldingSetBase::InsertNode(N, InsertPos, Derived::getFoldingSetInfo());
+ }
+
+ /// InsertNode - Insert the specified node into the folding set, knowing that
+ /// it is not already in the folding set.
+ void InsertNode(T *N) {
+ T *Inserted = GetOrInsertNode(N);
+ (void)Inserted;
+ assert(Inserted == N && "Node already inserted!");
+ }
+};
+
+//===----------------------------------------------------------------------===//
+/// FoldingSet - This template class is used to instantiate a specialized
+/// implementation of the folding set to the node class T. T must be a
+/// subclass of FoldingSetNode and implement a Profile function.
+///
+/// Note that this set type is movable and move-assignable. However, its
+/// moved-from state is not a valid state for anything other than
+/// move-assigning and destroying. This is primarily to enable movable APIs
+/// that incorporate these objects.
+template <class T>
+class FoldingSet : public FoldingSetImpl<FoldingSet<T>, T> {
+ using Super = FoldingSetImpl<FoldingSet, T>;
+ using Node = typename Super::Node;
+
+ /// GetNodeProfile - Each instantiation of the FoldingSet needs to provide a
+ /// way to convert nodes into a unique specifier.
+ static void GetNodeProfile(const FoldingSetBase *, Node *N,
+ FoldingSetNodeID &ID) {
+ T *TN = static_cast<T *>(N);
+ FoldingSetTrait<T>::Profile(*TN, ID);
+ }
+
+ /// NodeEquals - Instantiations may optionally provide a way to compare a
+ /// node with a specified ID.
+ static bool NodeEquals(const FoldingSetBase *, Node *N,
+ const FoldingSetNodeID &ID, unsigned IDHash,
+ FoldingSetNodeID &TempID) {
+ T *TN = static_cast<T *>(N);
+ return FoldingSetTrait<T>::Equals(*TN, ID, IDHash, TempID);
+ }
+
+ /// ComputeNodeHash - Instantiations may optionally provide a way to compute a
+ /// hash value directly from a node.
+ static unsigned ComputeNodeHash(const FoldingSetBase *, Node *N,
+ FoldingSetNodeID &TempID) {
+ T *TN = static_cast<T *>(N);
+ return FoldingSetTrait<T>::ComputeHash(*TN, TempID);
+ }
+
+ static const FoldingSetBase::FoldingSetInfo &getFoldingSetInfo() {
+ static constexpr FoldingSetBase::FoldingSetInfo Info = {
+ GetNodeProfile, NodeEquals, ComputeNodeHash};
+ return Info;
+ }
+ friend Super;
+
+public:
+ explicit FoldingSet(unsigned Log2InitSize = 6) : Super(Log2InitSize) {}
+ FoldingSet(FoldingSet &&Arg) = default;
+ FoldingSet &operator=(FoldingSet &&RHS) = default;
+};
+
+//===----------------------------------------------------------------------===//
+/// ContextualFoldingSet - This template class is a further refinement
+/// of FoldingSet which provides a context argument when calling
+/// Profile on its nodes. Currently, that argument is fixed at
+/// initialization time.
+///
+/// T must be a subclass of FoldingSetNode and implement a Profile
+/// function with signature
+/// void Profile(FoldingSetNodeID &, Ctx);
+template <class T, class Ctx>
+class ContextualFoldingSet
+ : public FoldingSetImpl<ContextualFoldingSet<T, Ctx>, T> {
+ // Unfortunately, this can't derive from FoldingSet<T> because the
+ // construction of the vtable for FoldingSet<T> requires
+ // FoldingSet<T>::GetNodeProfile to be instantiated, which in turn
+ // requires a single-argument T::Profile().
+
+ using Super = FoldingSetImpl<ContextualFoldingSet, T>;
+ using Node = typename Super::Node;
+
+ Ctx Context;
+
+ static const Ctx &getContext(const FoldingSetBase *Base) {
+ return static_cast<const ContextualFoldingSet*>(Base)->Context;
+ }
+
+ /// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a
+ /// way to convert nodes into a unique specifier.
+ static void GetNodeProfile(const FoldingSetBase *Base, Node *N,
+ FoldingSetNodeID &ID) {
+ T *TN = static_cast<T *>(N);
+ ContextualFoldingSetTrait<T, Ctx>::Profile(*TN, ID, getContext(Base));
+ }
+
+ static bool NodeEquals(const FoldingSetBase *Base, Node *N,
+ const FoldingSetNodeID &ID, unsigned IDHash,
+ FoldingSetNodeID &TempID) {
+ T *TN = static_cast<T *>(N);
+ return ContextualFoldingSetTrait<T, Ctx>::Equals(*TN, ID, IDHash, TempID,
+ getContext(Base));
+ }
+
+ static unsigned ComputeNodeHash(const FoldingSetBase *Base, Node *N,
+ FoldingSetNodeID &TempID) {
+ T *TN = static_cast<T *>(N);
+ return ContextualFoldingSetTrait<T, Ctx>::ComputeHash(*TN, TempID,
+ getContext(Base));
+ }
+
+ static const FoldingSetBase::FoldingSetInfo &getFoldingSetInfo() {
+ static constexpr FoldingSetBase::FoldingSetInfo Info = {
+ GetNodeProfile, NodeEquals, ComputeNodeHash};
+ return Info;
+ }
+ friend Super;
+
+public:
+ explicit ContextualFoldingSet(Ctx Context, unsigned Log2InitSize = 6)
+ : Super(Log2InitSize), Context(Context) {}
+
+ Ctx getContext() const { return Context; }
+};
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetVector - This template class combines a FoldingSet and a vector
+/// to provide the interface of FoldingSet but with deterministic iteration
+/// order based on the insertion order. T must be a subclass of FoldingSetNode
+/// and implement a Profile function.
+template <class T, class VectorT = SmallVector<T*, 8>>
+class FoldingSetVector {
+ FoldingSet<T> Set;
+ VectorT Vector;
+
+public:
+ explicit FoldingSetVector(unsigned Log2InitSize = 6) : Set(Log2InitSize) {}
+
+ using iterator = pointee_iterator<typename VectorT::iterator>;
+
+ iterator begin() { return Vector.begin(); }
+ iterator end() { return Vector.end(); }
+
+ using const_iterator = pointee_iterator<typename VectorT::const_iterator>;
+
+ const_iterator begin() const { return Vector.begin(); }
+ const_iterator end() const { return Vector.end(); }
+
+ /// clear - Remove all nodes from the folding set.
+ void clear() { Set.clear(); Vector.clear(); }
+
+ /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
+ /// return it. If not, return the insertion token that will make insertion
+ /// faster.
+ T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
+ return Set.FindNodeOrInsertPos(ID, InsertPos);
+ }
+
+ /// GetOrInsertNode - If there is an existing simple Node exactly
+ /// equal to the specified node, return it. Otherwise, insert 'N' and
+ /// return it instead.
+ T *GetOrInsertNode(T *N) {
+ T *Result = Set.GetOrInsertNode(N);
+ if (Result == N) Vector.push_back(N);
+ return Result;
+ }
+
+ /// InsertNode - Insert the specified node into the folding set, knowing that
+ /// it is not already in the folding set. InsertPos must be obtained from
+ /// FindNodeOrInsertPos.
+ void InsertNode(T *N, void *InsertPos) {
+ Set.InsertNode(N, InsertPos);
+ Vector.push_back(N);
+ }
+
+ /// InsertNode - Insert the specified node into the folding set, knowing that
+ /// it is not already in the folding set.
+ void InsertNode(T *N) {
+ Set.InsertNode(N);
+ Vector.push_back(N);
+ }
+
+ /// size - Returns the number of nodes in the folding set.
+ unsigned size() const { return Set.size(); }
+
+ /// empty - Returns true if there are no nodes in the folding set.
+ bool empty() const { return Set.empty(); }
+};
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetIteratorImpl - This is the common iterator support shared by all
+/// folding sets, which knows how to walk the folding set hash table.
+class FoldingSetIteratorImpl {
+protected:
+ FoldingSetNode *NodePtr;
+
+ FoldingSetIteratorImpl(void **Bucket);
+
+ void advance();
+
+public:
+ bool operator==(const FoldingSetIteratorImpl &RHS) const {
+ return NodePtr == RHS.NodePtr;
+ }
+ bool operator!=(const FoldingSetIteratorImpl &RHS) const {
+ return NodePtr != RHS.NodePtr;
+ }
+};
+
+template <class T> class FoldingSetIterator : public FoldingSetIteratorImpl {
+public:
+ explicit FoldingSetIterator(void **Bucket) : FoldingSetIteratorImpl(Bucket) {}
+
+ T &operator*() const {
+ return *static_cast<T*>(NodePtr);
+ }
+
+ T *operator->() const {
+ return static_cast<T*>(NodePtr);
+ }
+
+ inline FoldingSetIterator &operator++() { // Preincrement
+ advance();
+ return *this;
+ }
+ FoldingSetIterator operator++(int) { // Postincrement
+ FoldingSetIterator tmp = *this; ++*this; return tmp;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetBucketIteratorImpl - This is the common bucket iterator support
+/// shared by all folding sets, which knows how to walk a particular bucket
+/// of a folding set hash table.
+class FoldingSetBucketIteratorImpl {
+protected:
+ void *Ptr;
+
+ explicit FoldingSetBucketIteratorImpl(void **Bucket);
+
+ FoldingSetBucketIteratorImpl(void **Bucket, bool) : Ptr(Bucket) {}
+
+ void advance() {
+ void *Probe = static_cast<FoldingSetNode*>(Ptr)->getNextInBucket();
+ uintptr_t x = reinterpret_cast<uintptr_t>(Probe) & ~0x1;
+ Ptr = reinterpret_cast<void*>(x);
+ }
+
+public:
+ bool operator==(const FoldingSetBucketIteratorImpl &RHS) const {
+ return Ptr == RHS.Ptr;
+ }
+ bool operator!=(const FoldingSetBucketIteratorImpl &RHS) const {
+ return Ptr != RHS.Ptr;
+ }
+};
+
+template <class T>
+class FoldingSetBucketIterator : public FoldingSetBucketIteratorImpl {
+public:
+ explicit FoldingSetBucketIterator(void **Bucket) :
+ FoldingSetBucketIteratorImpl(Bucket) {}
+
+ FoldingSetBucketIterator(void **Bucket, bool) :
+ FoldingSetBucketIteratorImpl(Bucket, true) {}
+
+ T &operator*() const { return *static_cast<T*>(Ptr); }
+ T *operator->() const { return static_cast<T*>(Ptr); }
+
+ inline FoldingSetBucketIterator &operator++() { // Preincrement
+ advance();
+ return *this;
+ }
+ FoldingSetBucketIterator operator++(int) { // Postincrement
+ FoldingSetBucketIterator tmp = *this; ++*this; return tmp;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetNodeWrapper - This template class is used to "wrap" arbitrary
+/// types in an enclosing object so that they can be inserted into FoldingSets.
+template <typename T>
+class FoldingSetNodeWrapper : public FoldingSetNode {
+ T data;
+
+public:
+ template <typename... Ts>
+ explicit FoldingSetNodeWrapper(Ts &&... Args)
+ : data(std::forward<Ts>(Args)...) {}
+
+ void Profile(FoldingSetNodeID &ID) { FoldingSetTrait<T>::Profile(data, ID); }
+
+ T &getValue() { return data; }
+ const T &getValue() const { return data; }
+
+ operator T&() { return data; }
+ operator const T&() const { return data; }
+};
+
+//===----------------------------------------------------------------------===//
+/// FastFoldingSetNode - This is a subclass of FoldingSetNode which stores
+/// a FoldingSetNodeID value rather than requiring the node to recompute it
+/// each time it is needed. This trades space for speed (which can be
+/// significant if the ID is long), and it also permits nodes to drop
+/// information that would otherwise only be required for recomputing an ID.
+class FastFoldingSetNode : public FoldingSetNode {
+ FoldingSetNodeID FastID;
+
+protected:
+ explicit FastFoldingSetNode(const FoldingSetNodeID &ID) : FastID(ID) {}
+
+public:
+ void Profile(FoldingSetNodeID &ID) const { ID.AddNodeID(FastID); }
+};
+
+//===----------------------------------------------------------------------===//
+// Partial specializations of FoldingSetTrait.
+
+template<typename T> struct FoldingSetTrait<T*> {
+ static inline void Profile(T *X, FoldingSetNodeID &ID) {
+ ID.AddPointer(X);
+ }
+};
+template <typename T1, typename T2>
+struct FoldingSetTrait<std::pair<T1, T2>> {
+ static inline void Profile(const std::pair<T1, T2> &P,
+ FoldingSetNodeID &ID) {
+ ID.Add(P.first);
+ ID.Add(P.second);
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_FOLDINGSET_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/FunctionExtras.h b/contrib/libs/llvm14/include/llvm/ADT/FunctionExtras.h
new file mode 100644
index 0000000000..4792a3f364
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/FunctionExtras.h
@@ -0,0 +1,427 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- FunctionExtras.h - Function type erasure utilities -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides a collection of function (or more generally, callable)
+/// type erasure utilities supplementing those provided by the standard library
+/// in `<function>`.
+///
+/// It provides `unique_function`, which works like `std::function` but supports
+/// move-only callable objects and const-qualification.
+///
+/// Future plans:
+/// - Add a `function` that provides ref-qualified support, which doesn't work
+/// with `std::function`.
+/// - Provide support for specifying multiple signatures to type erase callable
+/// objects with an overload set, such as those produced by generic lambdas.
+/// - Expand to include a copyable utility that directly replaces std::function
+/// but brings the above improvements.
+///
+/// Note that LLVM's utilities are greatly simplified by not supporting
+/// allocators.
+///
+/// If the standard library ever begins to provide comparable facilities we can
+/// consider switching to those.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_FUNCTIONEXTRAS_H
+#define LLVM_ADT_FUNCTIONEXTRAS_H
+
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/STLForwardCompat.h"
+#include "llvm/Support/MemAlloc.h"
+#include "llvm/Support/type_traits.h"
+#include <cstring>
+#include <memory>
+#include <type_traits>
+
+namespace llvm {
+
+/// unique_function is a type-erasing functor similar to std::function.
+///
+/// It can hold move-only function objects, like lambdas capturing unique_ptrs.
+/// Accordingly, it is movable but not copyable.
+///
+/// It supports const-qualification:
+/// - unique_function<int() const> has a const operator().
+/// It can only hold functions which themselves have a const operator().
+/// - unique_function<int()> has a non-const operator().
+/// It can hold functions with a non-const operator(), like mutable lambdas.
+template <typename FunctionT> class unique_function;
+
+namespace detail {
+
+template <typename T>
+using EnableIfTrivial =
+ std::enable_if_t<llvm::is_trivially_move_constructible<T>::value &&
+ std::is_trivially_destructible<T>::value>;
+template <typename CallableT, typename ThisT>
+using EnableUnlessSameType =
+ std::enable_if_t<!std::is_same<remove_cvref_t<CallableT>, ThisT>::value>;
+template <typename CallableT, typename Ret, typename... Params>
+using EnableIfCallable = std::enable_if_t<llvm::disjunction<
+ std::is_void<Ret>,
+ std::is_same<decltype(std::declval<CallableT>()(std::declval<Params>()...)),
+ Ret>,
+ std::is_same<const decltype(std::declval<CallableT>()(
+ std::declval<Params>()...)),
+ Ret>,
+ std::is_convertible<decltype(std::declval<CallableT>()(
+ std::declval<Params>()...)),
+ Ret>>::value>;
+
+template <typename ReturnT, typename... ParamTs> class UniqueFunctionBase {
+protected:
+ static constexpr size_t InlineStorageSize = sizeof(void *) * 3;
+
+ template <typename T, class = void>
+ struct IsSizeLessThanThresholdT : std::false_type {};
+
+ template <typename T>
+ struct IsSizeLessThanThresholdT<
+ T, std::enable_if_t<sizeof(T) <= 2 * sizeof(void *)>> : std::true_type {};
+
+ // Provide a type function to map parameters that won't observe extra copies
+ // or moves and which are small enough to likely pass in register to values
+ // and all other types to l-value reference types. We use this to compute the
+ // types used in our erased call utility to minimize copies and moves unless
+ // doing so would force things unnecessarily into memory.
+ //
+ // The heuristic used is related to common ABI register passing conventions.
+ // It doesn't have to be exact though, and in one way it is more strict
+ // because we want to still be able to observe either moves *or* copies.
+ template <typename T> struct AdjustedParamTBase {
+ static_assert(!std::is_reference<T>::value,
+ "references should be handled by template specialization");
+ using type = typename std::conditional<
+ llvm::is_trivially_copy_constructible<T>::value &&
+ llvm::is_trivially_move_constructible<T>::value &&
+ IsSizeLessThanThresholdT<T>::value,
+ T, T &>::type;
+ };
+
+ // This specialization ensures that 'AdjustedParam<V<T>&>' or
+ // 'AdjustedParam<V<T>&&>' does not trigger a compile-time error when 'T' is
+ // an incomplete type and V a templated type.
+ template <typename T> struct AdjustedParamTBase<T &> { using type = T &; };
+ template <typename T> struct AdjustedParamTBase<T &&> { using type = T &; };
+
+ template <typename T>
+ using AdjustedParamT = typename AdjustedParamTBase<T>::type;
+
+ // The type of the erased function pointer we use as a callback to dispatch to
+ // the stored callable when it is trivial to move and destroy.
+ using CallPtrT = ReturnT (*)(void *CallableAddr,
+ AdjustedParamT<ParamTs>... Params);
+ using MovePtrT = void (*)(void *LHSCallableAddr, void *RHSCallableAddr);
+ using DestroyPtrT = void (*)(void *CallableAddr);
+
+ /// A struct to hold a single trivial callback with sufficient alignment for
+ /// our bitpacking.
+ struct alignas(8) TrivialCallback {
+ CallPtrT CallPtr;
+ };
+
+ /// A struct we use to aggregate three callbacks when we need full set of
+ /// operations.
+ struct alignas(8) NonTrivialCallbacks {
+ CallPtrT CallPtr;
+ MovePtrT MovePtr;
+ DestroyPtrT DestroyPtr;
+ };
+
+ // Create a pointer union between either a pointer to a static trivial call
+ // pointer in a struct or a pointer to a static struct of the call, move, and
+ // destroy pointers.
+ using CallbackPointerUnionT =
+ PointerUnion<TrivialCallback *, NonTrivialCallbacks *>;
+
+ // The main storage buffer. This will either have a pointer to out-of-line
+ // storage or an inline buffer storing the callable.
+ union StorageUnionT {
+ // For out-of-line storage we keep a pointer to the underlying storage and
+ // the size. This is enough to deallocate the memory.
+ struct OutOfLineStorageT {
+ void *StoragePtr;
+ size_t Size;
+ size_t Alignment;
+ } OutOfLineStorage;
+ static_assert(
+ sizeof(OutOfLineStorageT) <= InlineStorageSize,
+ "Should always use all of the out-of-line storage for inline storage!");
+
+ // For in-line storage, we just provide an aligned character buffer. We
+ // provide three pointers worth of storage here.
+ // This is mutable as an inlined `const unique_function<void() const>` may
+ // still modify its own mutable members.
+ mutable
+ typename std::aligned_storage<InlineStorageSize, alignof(void *)>::type
+ InlineStorage;
+ } StorageUnion;
+
+ // A compressed pointer to either our dispatching callback or our table of
+ // dispatching callbacks and the flag for whether the callable itself is
+ // stored inline or not.
+ PointerIntPair<CallbackPointerUnionT, 1, bool> CallbackAndInlineFlag;
+
+ bool isInlineStorage() const { return CallbackAndInlineFlag.getInt(); }
+
+ bool isTrivialCallback() const {
+ return CallbackAndInlineFlag.getPointer().template is<TrivialCallback *>();
+ }
+
+ CallPtrT getTrivialCallback() const {
+ return CallbackAndInlineFlag.getPointer().template get<TrivialCallback *>()->CallPtr;
+ }
+
+ NonTrivialCallbacks *getNonTrivialCallbacks() const {
+ return CallbackAndInlineFlag.getPointer()
+ .template get<NonTrivialCallbacks *>();
+ }
+
+ CallPtrT getCallPtr() const {
+ return isTrivialCallback() ? getTrivialCallback()
+ : getNonTrivialCallbacks()->CallPtr;
+ }
+
+ // These three functions are only const in the narrow sense. They return
+ // mutable pointers to function state.
+ // This allows unique_function<T const>::operator() to be const, even if the
+ // underlying functor may be internally mutable.
+ //
+ // const callers must ensure they're only used in const-correct ways.
+ void *getCalleePtr() const {
+ return isInlineStorage() ? getInlineStorage() : getOutOfLineStorage();
+ }
+ void *getInlineStorage() const { return &StorageUnion.InlineStorage; }
+ void *getOutOfLineStorage() const {
+ return StorageUnion.OutOfLineStorage.StoragePtr;
+ }
+
+ size_t getOutOfLineStorageSize() const {
+ return StorageUnion.OutOfLineStorage.Size;
+ }
+ size_t getOutOfLineStorageAlignment() const {
+ return StorageUnion.OutOfLineStorage.Alignment;
+ }
+
+ void setOutOfLineStorage(void *Ptr, size_t Size, size_t Alignment) {
+ StorageUnion.OutOfLineStorage = {Ptr, Size, Alignment};
+ }
+
+ template <typename CalledAsT>
+ static ReturnT CallImpl(void *CallableAddr,
+ AdjustedParamT<ParamTs>... Params) {
+ auto &Func = *reinterpret_cast<CalledAsT *>(CallableAddr);
+ return Func(std::forward<ParamTs>(Params)...);
+ }
+
+ template <typename CallableT>
+ static void MoveImpl(void *LHSCallableAddr, void *RHSCallableAddr) noexcept {
+ new (LHSCallableAddr)
+ CallableT(std::move(*reinterpret_cast<CallableT *>(RHSCallableAddr)));
+ }
+
+ template <typename CallableT>
+ static void DestroyImpl(void *CallableAddr) noexcept {
+ reinterpret_cast<CallableT *>(CallableAddr)->~CallableT();
+ }
+
+ // The pointers to call/move/destroy functions are determined for each
+ // callable type (and called-as type, which determines the overload chosen).
+ // (definitions are out-of-line).
+
+ // By default, we need an object that contains all the different
+ // type erased behaviors needed. Create a static instance of the struct type
+ // here and each instance will contain a pointer to it.
+ // Wrap in a struct to avoid https://gcc.gnu.org/PR71954
+ template <typename CallableT, typename CalledAs, typename Enable = void>
+ struct CallbacksHolder {
+ static NonTrivialCallbacks Callbacks;
+ };
+ // See if we can create a trivial callback. We need the callable to be
+ // trivially moved and trivially destroyed so that we don't have to store
+ // type erased callbacks for those operations.
+ template <typename CallableT, typename CalledAs>
+ struct CallbacksHolder<CallableT, CalledAs, EnableIfTrivial<CallableT>> {
+ static TrivialCallback Callbacks;
+ };
+
+ // A simple tag type so the call-as type to be passed to the constructor.
+ template <typename T> struct CalledAs {};
+
+ // Essentially the "main" unique_function constructor, but subclasses
+ // provide the qualified type to be used for the call.
+ // (We always store a T, even if the call will use a pointer to const T).
+ template <typename CallableT, typename CalledAsT>
+ UniqueFunctionBase(CallableT Callable, CalledAs<CalledAsT>) {
+ bool IsInlineStorage = true;
+ void *CallableAddr = getInlineStorage();
+ if (sizeof(CallableT) > InlineStorageSize ||
+ alignof(CallableT) > alignof(decltype(StorageUnion.InlineStorage))) {
+ IsInlineStorage = false;
+ // Allocate out-of-line storage. FIXME: Use an explicit alignment
+ // parameter in C++17 mode.
+ auto Size = sizeof(CallableT);
+ auto Alignment = alignof(CallableT);
+ CallableAddr = allocate_buffer(Size, Alignment);
+ setOutOfLineStorage(CallableAddr, Size, Alignment);
+ }
+
+ // Now move into the storage.
+ new (CallableAddr) CallableT(std::move(Callable));
+ CallbackAndInlineFlag.setPointerAndInt(
+ &CallbacksHolder<CallableT, CalledAsT>::Callbacks, IsInlineStorage);
+ }
+
+ ~UniqueFunctionBase() {
+ if (!CallbackAndInlineFlag.getPointer())
+ return;
+
+ // Cache this value so we don't re-check it after type-erased operations.
+ bool IsInlineStorage = isInlineStorage();
+
+ if (!isTrivialCallback())
+ getNonTrivialCallbacks()->DestroyPtr(
+ IsInlineStorage ? getInlineStorage() : getOutOfLineStorage());
+
+ if (!IsInlineStorage)
+ deallocate_buffer(getOutOfLineStorage(), getOutOfLineStorageSize(),
+ getOutOfLineStorageAlignment());
+ }
+
+ UniqueFunctionBase(UniqueFunctionBase &&RHS) noexcept {
+ // Copy the callback and inline flag.
+ CallbackAndInlineFlag = RHS.CallbackAndInlineFlag;
+
+ // If the RHS is empty, just copying the above is sufficient.
+ if (!RHS)
+ return;
+
+ if (!isInlineStorage()) {
+ // The out-of-line case is easiest to move.
+ StorageUnion.OutOfLineStorage = RHS.StorageUnion.OutOfLineStorage;
+ } else if (isTrivialCallback()) {
+ // Move is trivial, just memcpy the bytes across.
+ memcpy(getInlineStorage(), RHS.getInlineStorage(), InlineStorageSize);
+ } else {
+ // Non-trivial move, so dispatch to a type-erased implementation.
+ getNonTrivialCallbacks()->MovePtr(getInlineStorage(),
+ RHS.getInlineStorage());
+ }
+
+ // Clear the old callback and inline flag to get back to as-if-null.
+ RHS.CallbackAndInlineFlag = {};
+
+#ifndef NDEBUG
+ // In debug builds, we also scribble across the rest of the storage.
+ memset(RHS.getInlineStorage(), 0xAD, InlineStorageSize);
+#endif
+ }
+
+ UniqueFunctionBase &operator=(UniqueFunctionBase &&RHS) noexcept {
+ if (this == &RHS)
+ return *this;
+
+ // Because we don't try to provide any exception safety guarantees we can
+ // implement move assignment very simply by first destroying the current
+ // object and then move-constructing over top of it.
+ this->~UniqueFunctionBase();
+ new (this) UniqueFunctionBase(std::move(RHS));
+ return *this;
+ }
+
+ UniqueFunctionBase() = default;
+
+public:
+ explicit operator bool() const {
+ return (bool)CallbackAndInlineFlag.getPointer();
+ }
+};
+
+template <typename R, typename... P>
+template <typename CallableT, typename CalledAsT, typename Enable>
+typename UniqueFunctionBase<R, P...>::NonTrivialCallbacks UniqueFunctionBase<
+ R, P...>::CallbacksHolder<CallableT, CalledAsT, Enable>::Callbacks = {
+ &CallImpl<CalledAsT>, &MoveImpl<CallableT>, &DestroyImpl<CallableT>};
+
+template <typename R, typename... P>
+template <typename CallableT, typename CalledAsT>
+typename UniqueFunctionBase<R, P...>::TrivialCallback
+ UniqueFunctionBase<R, P...>::CallbacksHolder<
+ CallableT, CalledAsT, EnableIfTrivial<CallableT>>::Callbacks{
+ &CallImpl<CalledAsT>};
+
+} // namespace detail
+
+template <typename R, typename... P>
+class unique_function<R(P...)> : public detail::UniqueFunctionBase<R, P...> {
+ using Base = detail::UniqueFunctionBase<R, P...>;
+
+public:
+ unique_function() = default;
+ unique_function(std::nullptr_t) {}
+ unique_function(unique_function &&) = default;
+ unique_function(const unique_function &) = delete;
+ unique_function &operator=(unique_function &&) = default;
+ unique_function &operator=(const unique_function &) = delete;
+
+ template <typename CallableT>
+ unique_function(
+ CallableT Callable,
+ detail::EnableUnlessSameType<CallableT, unique_function> * = nullptr,
+ detail::EnableIfCallable<CallableT, R, P...> * = nullptr)
+ : Base(std::forward<CallableT>(Callable),
+ typename Base::template CalledAs<CallableT>{}) {}
+
+ R operator()(P... Params) {
+ return this->getCallPtr()(this->getCalleePtr(), Params...);
+ }
+};
+
+template <typename R, typename... P>
+class unique_function<R(P...) const>
+ : public detail::UniqueFunctionBase<R, P...> {
+ using Base = detail::UniqueFunctionBase<R, P...>;
+
+public:
+ unique_function() = default;
+ unique_function(std::nullptr_t) {}
+ unique_function(unique_function &&) = default;
+ unique_function(const unique_function &) = delete;
+ unique_function &operator=(unique_function &&) = default;
+ unique_function &operator=(const unique_function &) = delete;
+
+ template <typename CallableT>
+ unique_function(
+ CallableT Callable,
+ detail::EnableUnlessSameType<CallableT, unique_function> * = nullptr,
+ detail::EnableIfCallable<const CallableT, R, P...> * = nullptr)
+ : Base(std::forward<CallableT>(Callable),
+ typename Base::template CalledAs<const CallableT>{}) {}
+
+ R operator()(P... Params) const {
+ return this->getCallPtr()(this->getCalleePtr(), Params...);
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_FUNCTIONEXTRAS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/GenericCycleImpl.h b/contrib/libs/llvm14/include/llvm/ADT/GenericCycleImpl.h
new file mode 100644
index 0000000000..c1c8c0c455
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/GenericCycleImpl.h
@@ -0,0 +1,423 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- GenericCycleImpl.h -------------------------------------*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This template implementation resides in a separate file so that it
+/// does not get injected into every .cpp file that includes the
+/// generic header.
+///
+/// DO NOT INCLUDE THIS FILE WHEN MERELY USING CYCLEINFO.
+///
+/// This file should only be included by files that implement a
+/// specialization of the relevant templates. Currently these are:
+/// - CycleAnalysis.cpp
+/// - MachineCycleAnalysis.cpp
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_GENERICCYCLEIMPL_H
+#define LLVM_ADT_GENERICCYCLEIMPL_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/GenericCycleInfo.h"
+
+#define DEBUG_TYPE "generic-cycle-impl"
+
+namespace llvm {
+
+template <typename ContextT>
+bool GenericCycle<ContextT>::contains(const GenericCycle *C) const {
+ if (!C)
+ return false;
+
+ if (Depth > C->Depth)
+ return false;
+ while (Depth < C->Depth)
+ C = C->ParentCycle;
+ return this == C;
+}
+
+template <typename ContextT>
+void GenericCycle<ContextT>::getExitBlocks(
+ SmallVectorImpl<BlockT *> &TmpStorage) const {
+ TmpStorage.clear();
+
+ size_t NumExitBlocks = 0;
+ for (BlockT *Block : blocks()) {
+ llvm::append_range(TmpStorage, successors(Block));
+
+ for (size_t Idx = NumExitBlocks, End = TmpStorage.size(); Idx < End;
+ ++Idx) {
+ BlockT *Succ = TmpStorage[Idx];
+ if (!contains(Succ)) {
+ auto ExitEndIt = TmpStorage.begin() + NumExitBlocks;
+ if (std::find(TmpStorage.begin(), ExitEndIt, Succ) == ExitEndIt)
+ TmpStorage[NumExitBlocks++] = Succ;
+ }
+ }
+
+ TmpStorage.resize(NumExitBlocks);
+ }
+}
+
+/// \brief Helper class for computing cycle information.
+template <typename ContextT> class GenericCycleInfoCompute {
+ using BlockT = typename ContextT::BlockT;
+ using CycleInfoT = GenericCycleInfo<ContextT>;
+ using CycleT = typename CycleInfoT::CycleT;
+
+ CycleInfoT &Info;
+
+ struct DFSInfo {
+ unsigned Start = 0; // DFS start; positive if block is found
+ unsigned End = 0; // DFS end
+
+ DFSInfo() = default;
+ explicit DFSInfo(unsigned Start) : Start(Start) {}
+
+ /// Whether this node is an ancestor (or equal to) the node \p Other
+ /// in the DFS tree.
+ bool isAncestorOf(const DFSInfo &Other) const {
+ return Start <= Other.Start && Other.End <= End;
+ }
+ };
+
+ DenseMap<BlockT *, DFSInfo> BlockDFSInfo;
+ SmallVector<BlockT *, 8> BlockPreorder;
+
+ GenericCycleInfoCompute(const GenericCycleInfoCompute &) = delete;
+ GenericCycleInfoCompute &operator=(const GenericCycleInfoCompute &) = delete;
+
+public:
+ GenericCycleInfoCompute(CycleInfoT &Info) : Info(Info) {}
+
+ void run(BlockT *EntryBlock);
+
+ static void updateDepth(CycleT *SubTree);
+
+private:
+ void dfs(BlockT *EntryBlock);
+};
+
+template <typename ContextT>
+auto GenericCycleInfo<ContextT>::getTopLevelParentCycle(
+ const BlockT *Block) const -> CycleT * {
+ auto MapIt = BlockMap.find(Block);
+ if (MapIt == BlockMap.end())
+ return nullptr;
+
+ auto *C = MapIt->second;
+ while (C->ParentCycle)
+ C = C->ParentCycle;
+ return C;
+}
+
+template <typename ContextT>
+void GenericCycleInfo<ContextT>::moveToNewParent(CycleT *NewParent,
+ CycleT *Child) {
+ auto &CurrentContainer =
+ Child->ParentCycle ? Child->ParentCycle->Children : TopLevelCycles;
+ auto Pos = llvm::find_if(CurrentContainer, [=](const auto &Ptr) -> bool {
+ return Child == Ptr.get();
+ });
+ assert(Pos != CurrentContainer.end());
+ NewParent->Children.push_back(std::move(*Pos));
+ *Pos = std::move(CurrentContainer.back());
+ CurrentContainer.pop_back();
+ Child->ParentCycle = NewParent;
+}
+
+/// \brief Main function of the cycle info computations.
+template <typename ContextT>
+void GenericCycleInfoCompute<ContextT>::run(BlockT *EntryBlock) {
+ LLVM_DEBUG(errs() << "Entry block: " << Info.Context.print(EntryBlock)
+ << "\n");
+ dfs(EntryBlock);
+
+ SmallVector<BlockT *, 8> Worklist;
+
+ for (BlockT *HeaderCandidate : llvm::reverse(BlockPreorder)) {
+ const DFSInfo CandidateInfo = BlockDFSInfo.lookup(HeaderCandidate);
+
+ for (BlockT *Pred : predecessors(HeaderCandidate)) {
+ const DFSInfo PredDFSInfo = BlockDFSInfo.lookup(Pred);
+ if (CandidateInfo.isAncestorOf(PredDFSInfo))
+ Worklist.push_back(Pred);
+ }
+ if (Worklist.empty()) {
+ continue;
+ }
+
+ // Found a cycle with the candidate as its header.
+ LLVM_DEBUG(errs() << "Found cycle for header: "
+ << Info.Context.print(HeaderCandidate) << "\n");
+ std::unique_ptr<CycleT> NewCycle = std::make_unique<CycleT>();
+ NewCycle->appendEntry(HeaderCandidate);
+ NewCycle->appendBlock(HeaderCandidate);
+ Info.BlockMap.try_emplace(HeaderCandidate, NewCycle.get());
+
+ // Helper function to process (non-back-edge) predecessors of a discovered
+ // block and either add them to the worklist or recognize that the given
+ // block is an additional cycle entry.
+ auto ProcessPredecessors = [&](BlockT *Block) {
+ LLVM_DEBUG(errs() << " block " << Info.Context.print(Block) << ": ");
+
+ bool IsEntry = false;
+ for (BlockT *Pred : predecessors(Block)) {
+ const DFSInfo PredDFSInfo = BlockDFSInfo.lookup(Pred);
+ if (CandidateInfo.isAncestorOf(PredDFSInfo)) {
+ Worklist.push_back(Pred);
+ } else {
+ IsEntry = true;
+ }
+ }
+ if (IsEntry) {
+ assert(!NewCycle->isEntry(Block));
+ LLVM_DEBUG(errs() << "append as entry\n");
+ NewCycle->appendEntry(Block);
+ } else {
+ LLVM_DEBUG(errs() << "append as child\n");
+ }
+ };
+
+ do {
+ BlockT *Block = Worklist.pop_back_val();
+ if (Block == HeaderCandidate)
+ continue;
+
+ // If the block has already been discovered by some cycle
+ // (possibly by ourself), then the outermost cycle containing it
+ // should become our child.
+ if (auto *BlockParent = Info.getTopLevelParentCycle(Block)) {
+ LLVM_DEBUG(errs() << " block " << Info.Context.print(Block) << ": ");
+
+ if (BlockParent != NewCycle.get()) {
+ LLVM_DEBUG(errs()
+ << "discovered child cycle "
+ << Info.Context.print(BlockParent->getHeader()) << "\n");
+ // Make BlockParent the child of NewCycle.
+ Info.moveToNewParent(NewCycle.get(), BlockParent);
+ NewCycle->Blocks.insert(NewCycle->Blocks.end(),
+ BlockParent->block_begin(),
+ BlockParent->block_end());
+
+ for (auto *ChildEntry : BlockParent->entries())
+ ProcessPredecessors(ChildEntry);
+ } else {
+ LLVM_DEBUG(errs()
+ << "known child cycle "
+ << Info.Context.print(BlockParent->getHeader()) << "\n");
+ }
+ } else {
+ Info.BlockMap.try_emplace(Block, NewCycle.get());
+ assert(!is_contained(NewCycle->Blocks, Block));
+ NewCycle->Blocks.push_back(Block);
+ ProcessPredecessors(Block);
+ }
+ } while (!Worklist.empty());
+
+ Info.TopLevelCycles.push_back(std::move(NewCycle));
+ }
+
+ // Fix top-level cycle links and compute cycle depths.
+ for (auto *TLC : Info.toplevel_cycles()) {
+ LLVM_DEBUG(errs() << "top-level cycle: "
+ << Info.Context.print(TLC->getHeader()) << "\n");
+
+ TLC->ParentCycle = nullptr;
+ updateDepth(TLC);
+ }
+}
+
+/// \brief Recompute depth values of \p SubTree and all descendants.
+template <typename ContextT>
+void GenericCycleInfoCompute<ContextT>::updateDepth(CycleT *SubTree) {
+ for (CycleT *Cycle : depth_first(SubTree))
+ Cycle->Depth = Cycle->ParentCycle ? Cycle->ParentCycle->Depth + 1 : 1;
+}
+
+/// \brief Compute a DFS of basic blocks starting at the function entry.
+///
+/// Fills BlockDFSInfo with start/end counters and BlockPreorder.
+template <typename ContextT>
+void GenericCycleInfoCompute<ContextT>::dfs(BlockT *EntryBlock) {
+ SmallVector<unsigned, 8> DFSTreeStack;
+ SmallVector<BlockT *, 8> TraverseStack;
+ unsigned Counter = 0;
+ TraverseStack.emplace_back(EntryBlock);
+
+ do {
+ BlockT *Block = TraverseStack.back();
+ LLVM_DEBUG(errs() << "DFS visiting block: " << Info.Context.print(Block)
+ << "\n");
+ if (!BlockDFSInfo.count(Block)) {
+ // We're visiting the block for the first time. Open its DFSInfo, add
+ // successors to the traversal stack, and remember the traversal stack
+ // depth at which the block was opened, so that we can correctly record
+ // its end time.
+ LLVM_DEBUG(errs() << " first encountered at depth "
+ << TraverseStack.size() << "\n");
+
+ DFSTreeStack.emplace_back(TraverseStack.size());
+ llvm::append_range(TraverseStack, successors(Block));
+
+ LLVM_ATTRIBUTE_UNUSED
+ bool Added = BlockDFSInfo.try_emplace(Block, ++Counter).second;
+ assert(Added);
+ BlockPreorder.push_back(Block);
+ LLVM_DEBUG(errs() << " preorder number: " << Counter << "\n");
+ } else {
+ assert(!DFSTreeStack.empty());
+ if (DFSTreeStack.back() == TraverseStack.size()) {
+ LLVM_DEBUG(errs() << " ended at " << Counter << "\n");
+ BlockDFSInfo.find(Block)->second.End = Counter;
+ DFSTreeStack.pop_back();
+ } else {
+ LLVM_DEBUG(errs() << " already done\n");
+ }
+ TraverseStack.pop_back();
+ }
+ } while (!TraverseStack.empty());
+ assert(DFSTreeStack.empty());
+
+ LLVM_DEBUG(
+ errs() << "Preorder:\n";
+ for (int i = 0, e = BlockPreorder.size(); i != e; ++i) {
+ errs() << " " << Info.Context.print(BlockPreorder[i]) << ": " << i << "\n";
+ }
+ );
+}
+
+/// \brief Reset the object to its initial state.
+template <typename ContextT> void GenericCycleInfo<ContextT>::clear() {
+ TopLevelCycles.clear();
+ BlockMap.clear();
+}
+
+/// \brief Compute the cycle info for a function.
+template <typename ContextT>
+void GenericCycleInfo<ContextT>::compute(FunctionT &F) {
+ GenericCycleInfoCompute<ContextT> Compute(*this);
+ Context.setFunction(F);
+
+ LLVM_DEBUG(errs() << "Computing cycles for function: " << F.getName()
+ << "\n");
+ Compute.run(ContextT::getEntryBlock(F));
+
+ assert(validateTree());
+}
+
+/// \brief Find the innermost cycle containing a given block.
+///
+/// \returns the innermost cycle containing \p Block or nullptr if
+/// it is not contained in any cycle.
+template <typename ContextT>
+auto GenericCycleInfo<ContextT>::getCycle(const BlockT *Block) const
+ -> CycleT * {
+ auto MapIt = BlockMap.find(Block);
+ if (MapIt != BlockMap.end())
+ return MapIt->second;
+ return nullptr;
+}
+
+/// \brief Validate the internal consistency of the cycle tree.
+///
+/// Note that this does \em not check that cycles are really cycles in the CFG,
+/// or that the right set of cycles in the CFG were found.
+template <typename ContextT>
+bool GenericCycleInfo<ContextT>::validateTree() const {
+ DenseSet<BlockT *> Blocks;
+ DenseSet<BlockT *> Entries;
+
+ auto reportError = [](const char *File, int Line, const char *Cond) {
+ errs() << File << ':' << Line
+ << ": GenericCycleInfo::validateTree: " << Cond << '\n';
+ };
+#define check(cond) \
+ do { \
+ if (!(cond)) { \
+ reportError(__FILE__, __LINE__, #cond); \
+ return false; \
+ } \
+ } while (false)
+
+ for (const auto *TLC : toplevel_cycles()) {
+ for (const CycleT *Cycle : depth_first(TLC)) {
+ if (Cycle->ParentCycle)
+ check(is_contained(Cycle->ParentCycle->children(), Cycle));
+
+ for (BlockT *Block : Cycle->Blocks) {
+ auto MapIt = BlockMap.find(Block);
+ check(MapIt != BlockMap.end());
+ check(Cycle->contains(MapIt->second));
+ check(Blocks.insert(Block).second); // duplicates in block list?
+ }
+ Blocks.clear();
+
+ check(!Cycle->Entries.empty());
+ for (BlockT *Entry : Cycle->Entries) {
+ check(Entries.insert(Entry).second); // duplicate entry?
+ check(is_contained(Cycle->Blocks, Entry));
+ }
+ Entries.clear();
+
+ unsigned ChildDepth = 0;
+ for (const CycleT *Child : Cycle->children()) {
+ check(Child->Depth > Cycle->Depth);
+ if (!ChildDepth) {
+ ChildDepth = Child->Depth;
+ } else {
+ check(ChildDepth == Child->Depth);
+ }
+ }
+ }
+ }
+
+ for (const auto &Entry : BlockMap) {
+ BlockT *Block = Entry.first;
+ for (const CycleT *Cycle = Entry.second; Cycle;
+ Cycle = Cycle->ParentCycle) {
+ check(is_contained(Cycle->Blocks, Block));
+ }
+ }
+
+#undef check
+
+ return true;
+}
+
+/// \brief Print the cycle info.
+template <typename ContextT>
+void GenericCycleInfo<ContextT>::print(raw_ostream &Out) const {
+ for (const auto *TLC : toplevel_cycles()) {
+ for (const CycleT *Cycle : depth_first(TLC)) {
+ for (unsigned I = 0; I < Cycle->Depth; ++I)
+ Out << " ";
+
+ Out << Cycle->print(Context) << '\n';
+ }
+ }
+}
+
+} // namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_ADT_GENERICCYCLEIMPL_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/GenericCycleInfo.h b/contrib/libs/llvm14/include/llvm/ADT/GenericCycleInfo.h
new file mode 100644
index 0000000000..fc2df758ee
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/GenericCycleInfo.h
@@ -0,0 +1,345 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- GenericCycleInfo.h - Info for Cycles in any IR ------*- C++ -*------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Find all cycles in a control-flow graph, including irreducible loops.
+///
+/// See docs/CycleTerminology.rst for a formal definition of cycles.
+///
+/// Briefly:
+/// - A cycle is a generalization of a loop which can represent
+/// irreducible control flow.
+/// - Cycles identified in a program are implementation defined,
+/// depending on the DFS traversal chosen.
+/// - Cycles are well-nested, and form a forest with a parent-child
+/// relationship.
+/// - In any choice of DFS, every natural loop L is represented by a
+/// unique cycle C which is a superset of L.
+/// - In the absence of irreducible control flow, the cycles are
+/// exactly the natural loops in the program.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_GENERICCYCLEINFO_H
+#define LLVM_ADT_GENERICCYCLEINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/GenericSSAContext.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Printable.h"
+#include "llvm/Support/raw_ostream.h"
+#include <vector>
+
+namespace llvm {
+
+template <typename ContextT> class GenericCycleInfo;
+template <typename ContextT> class GenericCycleInfoCompute;
+
+/// A possibly irreducible generalization of a \ref Loop.
+template <typename ContextT> class GenericCycle {
+public:
+ using BlockT = typename ContextT::BlockT;
+ using FunctionT = typename ContextT::FunctionT;
+ template <typename> friend class GenericCycleInfo;
+ template <typename> friend class GenericCycleInfoCompute;
+
+private:
+ /// The parent cycle. Is null for the root "cycle". Top-level cycles point
+ /// at the root.
+ GenericCycle *ParentCycle = nullptr;
+
+ /// The entry block(s) of the cycle. The header is the only entry if
+ /// this is a loop. Is empty for the root "cycle", to avoid
+ /// unnecessary memory use.
+ SmallVector<BlockT *, 1> Entries;
+
+ /// Child cycles, if any.
+ std::vector<std::unique_ptr<GenericCycle>> Children;
+
+ /// Basic blocks that are contained in the cycle, including entry blocks,
+ /// and including blocks that are part of a child cycle.
+ std::vector<BlockT *> Blocks;
+
+ /// Depth of the cycle in the tree. The root "cycle" is at depth 0.
+ ///
+ /// \note Depths are not necessarily contiguous. However, child loops always
+ /// have strictly greater depth than their parents, and sibling loops
+ /// always have the same depth.
+ unsigned Depth = 0;
+
+ void clear() {
+ Entries.clear();
+ Children.clear();
+ Blocks.clear();
+ Depth = 0;
+ ParentCycle = nullptr;
+ }
+
+ void appendEntry(BlockT *Block) { Entries.push_back(Block); }
+ void appendBlock(BlockT *Block) { Blocks.push_back(Block); }
+
+ GenericCycle(const GenericCycle &) = delete;
+ GenericCycle &operator=(const GenericCycle &) = delete;
+ GenericCycle(GenericCycle &&Rhs) = delete;
+ GenericCycle &operator=(GenericCycle &&Rhs) = delete;
+
+public:
+ GenericCycle() = default;
+
+ /// \brief Whether the cycle is a natural loop.
+ bool isReducible() const { return Entries.size() == 1; }
+
+ BlockT *getHeader() const { return Entries[0]; }
+
+ /// \brief Return whether \p Block is an entry block of the cycle.
+ bool isEntry(BlockT *Block) const { return is_contained(Entries, Block); }
+
+ /// \brief Return whether \p Block is contained in the cycle.
+ bool contains(const BlockT *Block) const {
+ return is_contained(Blocks, Block);
+ }
+
+ /// \brief Returns true iff this cycle contains \p C.
+ ///
+ /// Note: Non-strict containment check, i.e. returns true if C is the
+ /// same cycle.
+ bool contains(const GenericCycle *C) const;
+
+ const GenericCycle *getParentCycle() const { return ParentCycle; }
+ GenericCycle *getParentCycle() { return ParentCycle; }
+ unsigned getDepth() const { return Depth; }
+
+ /// Return all of the successor blocks of this cycle.
+ ///
+ /// These are the blocks _outside of the current cycle_ which are
+ /// branched to.
+ void getExitBlocks(SmallVectorImpl<BlockT *> &TmpStorage) const;
+
+ /// Iteration over child cycles.
+ //@{
+ using const_child_iterator_base =
+ typename std::vector<std::unique_ptr<GenericCycle>>::const_iterator;
+ struct const_child_iterator
+ : iterator_adaptor_base<const_child_iterator, const_child_iterator_base> {
+ using Base =
+ iterator_adaptor_base<const_child_iterator, const_child_iterator_base>;
+
+ const_child_iterator() = default;
+ explicit const_child_iterator(const_child_iterator_base I) : Base(I) {}
+
+ const const_child_iterator_base &wrapped() { return Base::wrapped(); }
+ GenericCycle *operator*() const { return Base::I->get(); }
+ };
+
+ const_child_iterator child_begin() const {
+ return const_child_iterator{Children.begin()};
+ }
+ const_child_iterator child_end() const {
+ return const_child_iterator{Children.end()};
+ }
+ size_t getNumChildren() const { return Children.size(); }
+ iterator_range<const_child_iterator> children() const {
+ return llvm::make_range(const_child_iterator{Children.begin()},
+ const_child_iterator{Children.end()});
+ }
+ //@}
+
+ /// Iteration over blocks in the cycle (including entry blocks).
+ //@{
+ using const_block_iterator = typename std::vector<BlockT *>::const_iterator;
+
+ const_block_iterator block_begin() const {
+ return const_block_iterator{Blocks.begin()};
+ }
+ const_block_iterator block_end() const {
+ return const_block_iterator{Blocks.end()};
+ }
+ size_t getNumBlocks() const { return Blocks.size(); }
+ iterator_range<const_block_iterator> blocks() const {
+ return llvm::make_range(block_begin(), block_end());
+ }
+ //@}
+
+ /// Iteration over entry blocks.
+ //@{
+ using const_entry_iterator =
+ typename SmallVectorImpl<BlockT *>::const_iterator;
+
+ size_t getNumEntries() const { return Entries.size(); }
+ iterator_range<const_entry_iterator> entries() const {
+ return llvm::make_range(Entries.begin(), Entries.end());
+ }
+
+ Printable printEntries(const ContextT &Ctx) const {
+ return Printable([this, &Ctx](raw_ostream &Out) {
+ bool First = true;
+ for (auto *Entry : Entries) {
+ if (!First)
+ Out << ' ';
+ First = false;
+ Out << Ctx.print(Entry);
+ }
+ });
+ }
+
+ Printable print(const ContextT &Ctx) const {
+ return Printable([this, &Ctx](raw_ostream &Out) {
+ Out << "depth=" << Depth << ": entries(" << printEntries(Ctx) << ')';
+
+ for (auto *Block : Blocks) {
+ if (isEntry(Block))
+ continue;
+
+ Out << ' ' << Ctx.print(Block);
+ }
+ });
+ }
+};
+
+/// \brief Cycle information for a function.
+template <typename ContextT> class GenericCycleInfo {
+public:
+ using BlockT = typename ContextT::BlockT;
+ using CycleT = GenericCycle<ContextT>;
+ using FunctionT = typename ContextT::FunctionT;
+ template <typename> friend class GenericCycle;
+ template <typename> friend class GenericCycleInfoCompute;
+
+private:
+ ContextT Context;
+
+ /// Map basic blocks to their inner-most containing loop.
+ DenseMap<BlockT *, CycleT *> BlockMap;
+
+ /// Outermost cycles discovered by any DFS.
+ ///
+ /// Note: The implementation treats the nullptr as the parent of
+ /// every top-level cycle. See \ref contains for an example.
+ std::vector<std::unique_ptr<CycleT>> TopLevelCycles;
+
+public:
+ GenericCycleInfo() = default;
+ GenericCycleInfo(GenericCycleInfo &&) = default;
+ GenericCycleInfo &operator=(GenericCycleInfo &&) = default;
+
+ void clear();
+ void compute(FunctionT &F);
+
+ FunctionT *getFunction() const { return Context.getFunction(); }
+ const ContextT &getSSAContext() const { return Context; }
+
+ CycleT *getCycle(const BlockT *Block) const;
+ CycleT *getTopLevelParentCycle(const BlockT *Block) const;
+
+ /// Move \p Child to \p NewParent by manipulating Children vectors.
+ ///
+ /// Note: This is an incomplete operation that does not update the
+ /// list of blocks in the new parent or the depth of the subtree.
+ void moveToNewParent(CycleT *NewParent, CycleT *Child);
+
+ /// Methods for debug and self-test.
+ //@{
+ bool validateTree() const;
+ void print(raw_ostream &Out) const;
+ void dump() const { print(dbgs()); }
+ //@}
+
+ /// Iteration over top-level cycles.
+ //@{
+ using const_toplevel_iterator_base =
+ typename std::vector<std::unique_ptr<CycleT>>::const_iterator;
+ struct const_toplevel_iterator
+ : iterator_adaptor_base<const_toplevel_iterator,
+ const_toplevel_iterator_base> {
+ using Base = iterator_adaptor_base<const_toplevel_iterator,
+ const_toplevel_iterator_base>;
+
+ const_toplevel_iterator() = default;
+ explicit const_toplevel_iterator(const_toplevel_iterator_base I)
+ : Base(I) {}
+
+ const const_toplevel_iterator_base &wrapped() { return Base::wrapped(); }
+ CycleT *operator*() const { return Base::I->get(); }
+ };
+
+ const_toplevel_iterator toplevel_begin() const {
+ return const_toplevel_iterator{TopLevelCycles.begin()};
+ }
+ const_toplevel_iterator toplevel_end() const {
+ return const_toplevel_iterator{TopLevelCycles.end()};
+ }
+
+ iterator_range<const_toplevel_iterator> toplevel_cycles() const {
+ return llvm::make_range(const_toplevel_iterator{TopLevelCycles.begin()},
+ const_toplevel_iterator{TopLevelCycles.end()});
+ }
+ //@}
+};
+
+/// \brief GraphTraits for iterating over a sub-tree of the CycleT tree.
+template <typename CycleRefT, typename ChildIteratorT> struct CycleGraphTraits {
+ using NodeRef = CycleRefT;
+
+ using nodes_iterator = ChildIteratorT;
+ using ChildIteratorType = nodes_iterator;
+
+ static NodeRef getEntryNode(NodeRef Graph) { return Graph; }
+
+ static ChildIteratorType child_begin(NodeRef Ref) {
+ return Ref->child_begin();
+ }
+ static ChildIteratorType child_end(NodeRef Ref) { return Ref->child_end(); }
+
+ // Not implemented:
+ // static nodes_iterator nodes_begin(GraphType *G)
+ // static nodes_iterator nodes_end (GraphType *G)
+ // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+
+ // typedef EdgeRef - Type of Edge token in the graph, which should
+ // be cheap to copy.
+ // typedef ChildEdgeIteratorType - Type used to iterate over children edges in
+ // graph, dereference to a EdgeRef.
+
+ // static ChildEdgeIteratorType child_edge_begin(NodeRef)
+ // static ChildEdgeIteratorType child_edge_end(NodeRef)
+ // Return iterators that point to the beginning and ending of the
+ // edge list for the given callgraph node.
+ //
+ // static NodeRef edge_dest(EdgeRef)
+ // Return the destination node of an edge.
+ // static unsigned size (GraphType *G)
+ // Return total number of nodes in the graph
+};
+
+template <typename BlockT>
+struct GraphTraits<const GenericCycle<BlockT> *>
+ : CycleGraphTraits<const GenericCycle<BlockT> *,
+ typename GenericCycle<BlockT>::const_child_iterator> {};
+template <typename BlockT>
+struct GraphTraits<GenericCycle<BlockT> *>
+ : CycleGraphTraits<GenericCycle<BlockT> *,
+ typename GenericCycle<BlockT>::const_child_iterator> {};
+
+} // namespace llvm
+
+#endif // LLVM_ADT_GENERICCYCLEINFO_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/GenericSSAContext.h b/contrib/libs/llvm14/include/llvm/ADT/GenericSSAContext.h
new file mode 100644
index 0000000000..845e92df80
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/GenericSSAContext.h
@@ -0,0 +1,85 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- GenericSSAContext.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the little GenericSSAContext<X> template class
+/// that can be used to implement IR analyses as templates.
+/// Specializing these templates allows the analyses to be used over
+/// both LLVM IR and Machine IR.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_GENERICSSACONTEXT_H
+#define LLVM_ADT_GENERICSSACONTEXT_H
+
+#include "llvm/Support/Printable.h"
+
+namespace llvm {
+
+template <typename _FunctionT> class GenericSSAContext {
+public:
+ // Specializations should provide the following types that are similar to how
+ // LLVM IR is structured:
+
+ // The smallest unit of the IR is a ValueT. The SSA context uses a ValueRefT,
+ // which is a pointer to a ValueT, since Machine IR does not have the
+ // equivalent of a ValueT.
+ //
+ // using ValueRefT = ...
+
+ // An InstT is a subclass of ValueT that itself defines one or more ValueT
+ // objects.
+ //
+ // using InstT = ... must be a subclass of Value
+
+ // A BlockT is a sequence of InstT, and forms a node of the CFG. It
+ // has global methods predecessors() and successors() that return
+ // the list of incoming CFG edges and outgoing CFG edges
+ // respectively.
+ //
+ // using BlockT = ...
+
+ // A FunctionT represents a CFG along with arguments and return values. It is
+ // the smallest complete unit of code in a Module.
+ //
+ // The compiler produces an error here if this class is implicitly
+ // specialized due to an instantiation. An explicit specialization
+ // of this template needs to be added before the instantiation point
+ // indicated by the compiler.
+ using FunctionT = typename _FunctionT::invalidTemplateInstanceError;
+
+ // Every FunctionT has a unique BlockT marked as its entry.
+ //
+ // static BlockT* getEntryBlock(FunctionT &F);
+
+ // Initialize the SSA context with information about the FunctionT being
+ // processed.
+ //
+ // void setFunction(FunctionT &function);
+ // FunctionT* getFunction() const;
+
+ // Methods to print various objects.
+ //
+ // Printable print(BlockT *block) const;
+ // Printable print(InstructionT *inst) const;
+ // Printable print(ValueRefT value) const;
+};
+} // namespace llvm
+
+#endif // LLVM_ADT_GENERICSSACONTEXT_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/GraphTraits.h b/contrib/libs/llvm14/include/llvm/ADT/GraphTraits.h
new file mode 100644
index 0000000000..b0edc6adc4
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/GraphTraits.h
@@ -0,0 +1,155 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/GraphTraits.h - Graph traits template -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the little GraphTraits<X> template class that should be
+/// specialized by classes that want to be iteratable by generic graph
+/// iterators.
+///
+/// This file also defines the marker class Inverse that is used to iterate over
+/// graphs in a graph defined, inverse ordering...
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_GRAPHTRAITS_H
+#define LLVM_ADT_GRAPHTRAITS_H
+
+#include "llvm/ADT/iterator_range.h"
+
+namespace llvm {
+
+// GraphTraits - This class should be specialized by different graph types...
+// which is why the default version is empty.
+//
+// This template evolved from supporting `BasicBlock` to also later supporting
+// more complex types (e.g. CFG and DomTree).
+//
+// GraphTraits can be used to create a view over a graph interpreting it
+// differently without requiring a copy of the original graph. This could
+// be achieved by carrying more data in NodeRef. See LoopBodyTraits for one
+// example.
+template<class GraphType>
+struct GraphTraits {
+ // Elements to provide:
+
+ // typedef NodeRef - Type of Node token in the graph, which should
+ // be cheap to copy.
+ // typedef ChildIteratorType - Type used to iterate over children in graph,
+ // dereference to a NodeRef.
+
+ // static NodeRef getEntryNode(const GraphType &)
+ // Return the entry node of the graph
+
+ // static ChildIteratorType child_begin(NodeRef)
+ // static ChildIteratorType child_end (NodeRef)
+ // Return iterators that point to the beginning and ending of the child
+ // node list for the specified node.
+
+ // typedef ...iterator nodes_iterator; - dereference to a NodeRef
+ // static nodes_iterator nodes_begin(GraphType *G)
+ // static nodes_iterator nodes_end (GraphType *G)
+ // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+
+ // typedef EdgeRef - Type of Edge token in the graph, which should
+ // be cheap to copy.
+ // typedef ChildEdgeIteratorType - Type used to iterate over children edges in
+ // graph, dereference to a EdgeRef.
+
+ // static ChildEdgeIteratorType child_edge_begin(NodeRef)
+ // static ChildEdgeIteratorType child_edge_end(NodeRef)
+ // Return iterators that point to the beginning and ending of the
+ // edge list for the given callgraph node.
+ //
+ // static NodeRef edge_dest(EdgeRef)
+ // Return the destination node of an edge.
+
+ // static unsigned size (GraphType *G)
+ // Return total number of nodes in the graph
+
+ // If anyone tries to use this class without having an appropriate
+ // specialization, make an error. If you get this error, it's because you
+ // need to include the appropriate specialization of GraphTraits<> for your
+ // graph, or you need to define it for a new graph type. Either that or
+ // your argument to XXX_begin(...) is unknown or needs to have the proper .h
+ // file #include'd.
+ using NodeRef = typename GraphType::UnknownGraphTypeError;
+};
+
+// Inverse - This class is used as a little marker class to tell the graph
+// iterator to iterate over the graph in a graph defined "Inverse" ordering.
+// Not all graphs define an inverse ordering, and if they do, it depends on
+// the graph exactly what that is. Here's an example of usage with the
+// df_iterator:
+//
+// idf_iterator<Method*> I = idf_begin(M), E = idf_end(M);
+// for (; I != E; ++I) { ... }
+//
+// Which is equivalent to:
+// df_iterator<Inverse<Method*>> I = idf_begin(M), E = idf_end(M);
+// for (; I != E; ++I) { ... }
+//
+template <class GraphType>
+struct Inverse {
+ const GraphType &Graph;
+
+ inline Inverse(const GraphType &G) : Graph(G) {}
+};
+
+// Provide a partial specialization of GraphTraits so that the inverse of an
+// inverse falls back to the original graph.
+template <class T> struct GraphTraits<Inverse<Inverse<T>>> : GraphTraits<T> {};
+
+// Provide iterator ranges for the graph traits nodes and children
+template <class GraphType>
+iterator_range<typename GraphTraits<GraphType>::nodes_iterator>
+nodes(const GraphType &G) {
+ return make_range(GraphTraits<GraphType>::nodes_begin(G),
+ GraphTraits<GraphType>::nodes_end(G));
+}
+template <class GraphType>
+iterator_range<typename GraphTraits<Inverse<GraphType>>::nodes_iterator>
+inverse_nodes(const GraphType &G) {
+ return make_range(GraphTraits<Inverse<GraphType>>::nodes_begin(G),
+ GraphTraits<Inverse<GraphType>>::nodes_end(G));
+}
+
+template <class GraphType>
+iterator_range<typename GraphTraits<GraphType>::ChildIteratorType>
+children(const typename GraphTraits<GraphType>::NodeRef &G) {
+ return make_range(GraphTraits<GraphType>::child_begin(G),
+ GraphTraits<GraphType>::child_end(G));
+}
+
+template <class GraphType>
+iterator_range<typename GraphTraits<Inverse<GraphType>>::ChildIteratorType>
+inverse_children(const typename GraphTraits<GraphType>::NodeRef &G) {
+ return make_range(GraphTraits<Inverse<GraphType>>::child_begin(G),
+ GraphTraits<Inverse<GraphType>>::child_end(G));
+}
+
+template <class GraphType>
+iterator_range<typename GraphTraits<GraphType>::ChildEdgeIteratorType>
+children_edges(const typename GraphTraits<GraphType>::NodeRef &G) {
+ return make_range(GraphTraits<GraphType>::child_edge_begin(G),
+ GraphTraits<GraphType>::child_edge_end(G));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_GRAPHTRAITS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/Hashing.h b/contrib/libs/llvm14/include/llvm/ADT/Hashing.h
new file mode 100644
index 0000000000..4d61819a24
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/Hashing.h
@@ -0,0 +1,701 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- llvm/ADT/Hashing.h - Utilities for hashing --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the newly proposed standard C++ interfaces for hashing
+// arbitrary data and building hash functions for user-defined types. This
+// interface was originally proposed in N3333[1] and is currently under review
+// for inclusion in a future TR and/or standard.
+//
+// The primary interfaces provide are comprised of one type and three functions:
+//
+// -- 'hash_code' class is an opaque type representing the hash code for some
+// data. It is the intended product of hashing, and can be used to implement
+// hash tables, checksumming, and other common uses of hashes. It is not an
+// integer type (although it can be converted to one) because it is risky
+// to assume much about the internals of a hash_code. In particular, each
+// execution of the program has a high probability of producing a different
+// hash_code for a given input. Thus their values are not stable to save or
+// persist, and should only be used during the execution for the
+// construction of hashing datastructures.
+//
+// -- 'hash_value' is a function designed to be overloaded for each
+// user-defined type which wishes to be used within a hashing context. It
+// should be overloaded within the user-defined type's namespace and found
+// via ADL. Overloads for primitive types are provided by this library.
+//
+// -- 'hash_combine' and 'hash_combine_range' are functions designed to aid
+// programmers in easily and intuitively combining a set of data into
+// a single hash_code for their object. They should only logically be used
+// within the implementation of a 'hash_value' routine or similar context.
+//
+// Note that 'hash_combine_range' contains very special logic for hashing
+// a contiguous array of integers or pointers. This logic is *extremely* fast,
+// on a modern Intel "Gainestown" Xeon (Nehalem uarch) @2.2 GHz, these were
+// benchmarked at over 6.5 GiB/s for large keys, and <20 cycles/hash for keys
+// under 32-bytes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_HASHING_H
+#define LLVM_ADT_HASHING_H
+
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/SwapByteOrder.h"
+#include "llvm/Support/type_traits.h"
+#include <algorithm>
+#include <cassert>
+#include <cstring>
+#include <string>
+#include <tuple>
+#include <utility>
+
+namespace llvm {
+template <typename T, typename Enable> struct DenseMapInfo;
+
+/// An opaque object representing a hash code.
+///
+/// This object represents the result of hashing some entity. It is intended to
+/// be used to implement hashtables or other hashing-based data structures.
+/// While it wraps and exposes a numeric value, this value should not be
+/// trusted to be stable or predictable across processes or executions.
+///
+/// In order to obtain the hash_code for an object 'x':
+/// \code
+/// using llvm::hash_value;
+/// llvm::hash_code code = hash_value(x);
+/// \endcode
+class hash_code {
+ size_t value;
+
+public:
+ /// Default construct a hash_code.
+ /// Note that this leaves the value uninitialized.
+ hash_code() = default;
+
+ /// Form a hash code directly from a numerical value.
+ hash_code(size_t value) : value(value) {}
+
+ /// Convert the hash code to its numerical value for use.
+ /*explicit*/ operator size_t() const { return value; }
+
+ friend bool operator==(const hash_code &lhs, const hash_code &rhs) {
+ return lhs.value == rhs.value;
+ }
+ friend bool operator!=(const hash_code &lhs, const hash_code &rhs) {
+ return lhs.value != rhs.value;
+ }
+
+ /// Allow a hash_code to be directly run through hash_value.
+ friend size_t hash_value(const hash_code &code) { return code.value; }
+};
+
+/// Compute a hash_code for any integer value.
+///
+/// Note that this function is intended to compute the same hash_code for
+/// a particular value without regard to the pre-promotion type. This is in
+/// contrast to hash_combine which may produce different hash_codes for
+/// differing argument types even if they would implicit promote to a common
+/// type without changing the value.
+template <typename T>
+std::enable_if_t<is_integral_or_enum<T>::value, hash_code> hash_value(T value);
+
+/// Compute a hash_code for a pointer's address.
+///
+/// N.B.: This hashes the *address*. Not the value and not the type.
+template <typename T> hash_code hash_value(const T *ptr);
+
+/// Compute a hash_code for a pair of objects.
+template <typename T, typename U>
+hash_code hash_value(const std::pair<T, U> &arg);
+
+/// Compute a hash_code for a tuple.
+template <typename... Ts>
+hash_code hash_value(const std::tuple<Ts...> &arg);
+
+/// Compute a hash_code for a standard string.
+template <typename T>
+hash_code hash_value(const std::basic_string<T> &arg);
+
+
+/// Override the execution seed with a fixed value.
+///
+/// This hashing library uses a per-execution seed designed to change on each
+/// run with high probability in order to ensure that the hash codes are not
+/// attackable and to ensure that output which is intended to be stable does
+/// not rely on the particulars of the hash codes produced.
+///
+/// That said, there are use cases where it is important to be able to
+/// reproduce *exactly* a specific behavior. To that end, we provide a function
+/// which will forcibly set the seed to a fixed value. This must be done at the
+/// start of the program, before any hashes are computed. Also, it cannot be
+/// undone. This makes it thread-hostile and very hard to use outside of
+/// immediately on start of a simple program designed for reproducible
+/// behavior.
+void set_fixed_execution_hash_seed(uint64_t fixed_value);
+
+
+// All of the implementation details of actually computing the various hash
+// code values are held within this namespace. These routines are included in
+// the header file mainly to allow inlining and constant propagation.
+namespace hashing {
+namespace detail {
+
+inline uint64_t fetch64(const char *p) {
+ uint64_t result;
+ memcpy(&result, p, sizeof(result));
+ if (sys::IsBigEndianHost)
+ sys::swapByteOrder(result);
+ return result;
+}
+
+inline uint32_t fetch32(const char *p) {
+ uint32_t result;
+ memcpy(&result, p, sizeof(result));
+ if (sys::IsBigEndianHost)
+ sys::swapByteOrder(result);
+ return result;
+}
+
+/// Some primes between 2^63 and 2^64 for various uses.
+static constexpr uint64_t k0 = 0xc3a5c85c97cb3127ULL;
+static constexpr uint64_t k1 = 0xb492b66fbe98f273ULL;
+static constexpr uint64_t k2 = 0x9ae16a3b2f90404fULL;
+static constexpr uint64_t k3 = 0xc949d7c7509e6557ULL;
+
+/// Bitwise right rotate.
+/// Normally this will compile to a single instruction, especially if the
+/// shift is a manifest constant.
+inline uint64_t rotate(uint64_t val, size_t shift) {
+ // Avoid shifting by 64: doing so yields an undefined result.
+ return shift == 0 ? val : ((val >> shift) | (val << (64 - shift)));
+}
+
+inline uint64_t shift_mix(uint64_t val) {
+ return val ^ (val >> 47);
+}
+
+inline uint64_t hash_16_bytes(uint64_t low, uint64_t high) {
+ // Murmur-inspired hashing.
+ const uint64_t kMul = 0x9ddfea08eb382d69ULL;
+ uint64_t a = (low ^ high) * kMul;
+ a ^= (a >> 47);
+ uint64_t b = (high ^ a) * kMul;
+ b ^= (b >> 47);
+ b *= kMul;
+ return b;
+}
+
+inline uint64_t hash_1to3_bytes(const char *s, size_t len, uint64_t seed) {
+ uint8_t a = s[0];
+ uint8_t b = s[len >> 1];
+ uint8_t c = s[len - 1];
+ uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8);
+ uint32_t z = static_cast<uint32_t>(len) + (static_cast<uint32_t>(c) << 2);
+ return shift_mix(y * k2 ^ z * k3 ^ seed) * k2;
+}
+
+inline uint64_t hash_4to8_bytes(const char *s, size_t len, uint64_t seed) {
+ uint64_t a = fetch32(s);
+ return hash_16_bytes(len + (a << 3), seed ^ fetch32(s + len - 4));
+}
+
+inline uint64_t hash_9to16_bytes(const char *s, size_t len, uint64_t seed) {
+ uint64_t a = fetch64(s);
+ uint64_t b = fetch64(s + len - 8);
+ return hash_16_bytes(seed ^ a, rotate(b + len, len)) ^ b;
+}
+
+inline uint64_t hash_17to32_bytes(const char *s, size_t len, uint64_t seed) {
+ uint64_t a = fetch64(s) * k1;
+ uint64_t b = fetch64(s + 8);
+ uint64_t c = fetch64(s + len - 8) * k2;
+ uint64_t d = fetch64(s + len - 16) * k0;
+ return hash_16_bytes(rotate(a - b, 43) + rotate(c ^ seed, 30) + d,
+ a + rotate(b ^ k3, 20) - c + len + seed);
+}
+
+inline uint64_t hash_33to64_bytes(const char *s, size_t len, uint64_t seed) {
+ uint64_t z = fetch64(s + 24);
+ uint64_t a = fetch64(s) + (len + fetch64(s + len - 16)) * k0;
+ uint64_t b = rotate(a + z, 52);
+ uint64_t c = rotate(a, 37);
+ a += fetch64(s + 8);
+ c += rotate(a, 7);
+ a += fetch64(s + 16);
+ uint64_t vf = a + z;
+ uint64_t vs = b + rotate(a, 31) + c;
+ a = fetch64(s + 16) + fetch64(s + len - 32);
+ z = fetch64(s + len - 8);
+ b = rotate(a + z, 52);
+ c = rotate(a, 37);
+ a += fetch64(s + len - 24);
+ c += rotate(a, 7);
+ a += fetch64(s + len - 16);
+ uint64_t wf = a + z;
+ uint64_t ws = b + rotate(a, 31) + c;
+ uint64_t r = shift_mix((vf + ws) * k2 + (wf + vs) * k0);
+ return shift_mix((seed ^ (r * k0)) + vs) * k2;
+}
+
+inline uint64_t hash_short(const char *s, size_t length, uint64_t seed) {
+ if (length >= 4 && length <= 8)
+ return hash_4to8_bytes(s, length, seed);
+ if (length > 8 && length <= 16)
+ return hash_9to16_bytes(s, length, seed);
+ if (length > 16 && length <= 32)
+ return hash_17to32_bytes(s, length, seed);
+ if (length > 32)
+ return hash_33to64_bytes(s, length, seed);
+ if (length != 0)
+ return hash_1to3_bytes(s, length, seed);
+
+ return k2 ^ seed;
+}
+
+/// The intermediate state used during hashing.
+/// Currently, the algorithm for computing hash codes is based on CityHash and
+/// keeps 56 bytes of arbitrary state.
+struct hash_state {
+ uint64_t h0 = 0, h1 = 0, h2 = 0, h3 = 0, h4 = 0, h5 = 0, h6 = 0;
+
+ /// Create a new hash_state structure and initialize it based on the
+ /// seed and the first 64-byte chunk.
+ /// This effectively performs the initial mix.
+ static hash_state create(const char *s, uint64_t seed) {
+ hash_state state = {
+ 0, seed, hash_16_bytes(seed, k1), rotate(seed ^ k1, 49),
+ seed * k1, shift_mix(seed), 0 };
+ state.h6 = hash_16_bytes(state.h4, state.h5);
+ state.mix(s);
+ return state;
+ }
+
+ /// Mix 32-bytes from the input sequence into the 16-bytes of 'a'
+ /// and 'b', including whatever is already in 'a' and 'b'.
+ static void mix_32_bytes(const char *s, uint64_t &a, uint64_t &b) {
+ a += fetch64(s);
+ uint64_t c = fetch64(s + 24);
+ b = rotate(b + a + c, 21);
+ uint64_t d = a;
+ a += fetch64(s + 8) + fetch64(s + 16);
+ b += rotate(a, 44) + d;
+ a += c;
+ }
+
+ /// Mix in a 64-byte buffer of data.
+ /// We mix all 64 bytes even when the chunk length is smaller, but we
+ /// record the actual length.
+ void mix(const char *s) {
+ h0 = rotate(h0 + h1 + h3 + fetch64(s + 8), 37) * k1;
+ h1 = rotate(h1 + h4 + fetch64(s + 48), 42) * k1;
+ h0 ^= h6;
+ h1 += h3 + fetch64(s + 40);
+ h2 = rotate(h2 + h5, 33) * k1;
+ h3 = h4 * k1;
+ h4 = h0 + h5;
+ mix_32_bytes(s, h3, h4);
+ h5 = h2 + h6;
+ h6 = h1 + fetch64(s + 16);
+ mix_32_bytes(s + 32, h5, h6);
+ std::swap(h2, h0);
+ }
+
+ /// Compute the final 64-bit hash code value based on the current
+ /// state and the length of bytes hashed.
+ uint64_t finalize(size_t length) {
+ return hash_16_bytes(hash_16_bytes(h3, h5) + shift_mix(h1) * k1 + h2,
+ hash_16_bytes(h4, h6) + shift_mix(length) * k1 + h0);
+ }
+};
+
+
+/// A global, fixed seed-override variable.
+///
+/// This variable can be set using the \see llvm::set_fixed_execution_seed
+/// function. See that function for details. Do not, under any circumstances,
+/// set or read this variable.
+extern uint64_t fixed_seed_override;
+
+inline uint64_t get_execution_seed() {
+ // FIXME: This needs to be a per-execution seed. This is just a placeholder
+ // implementation. Switching to a per-execution seed is likely to flush out
+ // instability bugs and so will happen as its own commit.
+ //
+ // However, if there is a fixed seed override set the first time this is
+ // called, return that instead of the per-execution seed.
+ const uint64_t seed_prime = 0xff51afd7ed558ccdULL;
+ static uint64_t seed = fixed_seed_override ? fixed_seed_override : seed_prime;
+ return seed;
+}
+
+
+/// Trait to indicate whether a type's bits can be hashed directly.
+///
+/// A type trait which is true if we want to combine values for hashing by
+/// reading the underlying data. It is false if values of this type must
+/// first be passed to hash_value, and the resulting hash_codes combined.
+//
+// FIXME: We want to replace is_integral_or_enum and is_pointer here with
+// a predicate which asserts that comparing the underlying storage of two
+// values of the type for equality is equivalent to comparing the two values
+// for equality. For all the platforms we care about, this holds for integers
+// and pointers, but there are platforms where it doesn't and we would like to
+// support user-defined types which happen to satisfy this property.
+template <typename T> struct is_hashable_data
+ : std::integral_constant<bool, ((is_integral_or_enum<T>::value ||
+ std::is_pointer<T>::value) &&
+ 64 % sizeof(T) == 0)> {};
+
+// Special case std::pair to detect when both types are viable and when there
+// is no alignment-derived padding in the pair. This is a bit of a lie because
+// std::pair isn't truly POD, but it's close enough in all reasonable
+// implementations for our use case of hashing the underlying data.
+template <typename T, typename U> struct is_hashable_data<std::pair<T, U> >
+ : std::integral_constant<bool, (is_hashable_data<T>::value &&
+ is_hashable_data<U>::value &&
+ (sizeof(T) + sizeof(U)) ==
+ sizeof(std::pair<T, U>))> {};
+
+/// Helper to get the hashable data representation for a type.
+/// This variant is enabled when the type itself can be used.
+template <typename T>
+std::enable_if_t<is_hashable_data<T>::value, T>
+get_hashable_data(const T &value) {
+ return value;
+}
+/// Helper to get the hashable data representation for a type.
+/// This variant is enabled when we must first call hash_value and use the
+/// result as our data.
+template <typename T>
+std::enable_if_t<!is_hashable_data<T>::value, size_t>
+get_hashable_data(const T &value) {
+ using ::llvm::hash_value;
+ return hash_value(value);
+}
+
+/// Helper to store data from a value into a buffer and advance the
+/// pointer into that buffer.
+///
+/// This routine first checks whether there is enough space in the provided
+/// buffer, and if not immediately returns false. If there is space, it
+/// copies the underlying bytes of value into the buffer, advances the
+/// buffer_ptr past the copied bytes, and returns true.
+template <typename T>
+bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value,
+ size_t offset = 0) {
+ size_t store_size = sizeof(value) - offset;
+ if (buffer_ptr + store_size > buffer_end)
+ return false;
+ const char *value_data = reinterpret_cast<const char *>(&value);
+ memcpy(buffer_ptr, value_data + offset, store_size);
+ buffer_ptr += store_size;
+ return true;
+}
+
+/// Implement the combining of integral values into a hash_code.
+///
+/// This overload is selected when the value type of the iterator is
+/// integral. Rather than computing a hash_code for each object and then
+/// combining them, this (as an optimization) directly combines the integers.
+template <typename InputIteratorT>
+hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) {
+ const uint64_t seed = get_execution_seed();
+ char buffer[64], *buffer_ptr = buffer;
+ char *const buffer_end = std::end(buffer);
+ while (first != last && store_and_advance(buffer_ptr, buffer_end,
+ get_hashable_data(*first)))
+ ++first;
+ if (first == last)
+ return hash_short(buffer, buffer_ptr - buffer, seed);
+ assert(buffer_ptr == buffer_end);
+
+ hash_state state = state.create(buffer, seed);
+ size_t length = 64;
+ while (first != last) {
+ // Fill up the buffer. We don't clear it, which re-mixes the last round
+ // when only a partial 64-byte chunk is left.
+ buffer_ptr = buffer;
+ while (first != last && store_and_advance(buffer_ptr, buffer_end,
+ get_hashable_data(*first)))
+ ++first;
+
+ // Rotate the buffer if we did a partial fill in order to simulate doing
+ // a mix of the last 64-bytes. That is how the algorithm works when we
+ // have a contiguous byte sequence, and we want to emulate that here.
+ std::rotate(buffer, buffer_ptr, buffer_end);
+
+ // Mix this chunk into the current state.
+ state.mix(buffer);
+ length += buffer_ptr - buffer;
+ };
+
+ return state.finalize(length);
+}
+
+/// Implement the combining of integral values into a hash_code.
+///
+/// This overload is selected when the value type of the iterator is integral
+/// and when the input iterator is actually a pointer. Rather than computing
+/// a hash_code for each object and then combining them, this (as an
+/// optimization) directly combines the integers. Also, because the integers
+/// are stored in contiguous memory, this routine avoids copying each value
+/// and directly reads from the underlying memory.
+template <typename ValueT>
+std::enable_if_t<is_hashable_data<ValueT>::value, hash_code>
+hash_combine_range_impl(ValueT *first, ValueT *last) {
+ const uint64_t seed = get_execution_seed();
+ const char *s_begin = reinterpret_cast<const char *>(first);
+ const char *s_end = reinterpret_cast<const char *>(last);
+ const size_t length = std::distance(s_begin, s_end);
+ if (length <= 64)
+ return hash_short(s_begin, length, seed);
+
+ const char *s_aligned_end = s_begin + (length & ~63);
+ hash_state state = state.create(s_begin, seed);
+ s_begin += 64;
+ while (s_begin != s_aligned_end) {
+ state.mix(s_begin);
+ s_begin += 64;
+ }
+ if (length & 63)
+ state.mix(s_end - 64);
+
+ return state.finalize(length);
+}
+
+} // namespace detail
+} // namespace hashing
+
+
+/// Compute a hash_code for a sequence of values.
+///
+/// This hashes a sequence of values. It produces the same hash_code as
+/// 'hash_combine(a, b, c, ...)', but can run over arbitrary sized sequences
+/// and is significantly faster given pointers and types which can be hashed as
+/// a sequence of bytes.
+template <typename InputIteratorT>
+hash_code hash_combine_range(InputIteratorT first, InputIteratorT last) {
+ return ::llvm::hashing::detail::hash_combine_range_impl(first, last);
+}
+
+
+// Implementation details for hash_combine.
+namespace hashing {
+namespace detail {
+
+/// Helper class to manage the recursive combining of hash_combine
+/// arguments.
+///
+/// This class exists to manage the state and various calls involved in the
+/// recursive combining of arguments used in hash_combine. It is particularly
+/// useful at minimizing the code in the recursive calls to ease the pain
+/// caused by a lack of variadic functions.
+struct hash_combine_recursive_helper {
+ char buffer[64] = {};
+ hash_state state;
+ const uint64_t seed;
+
+public:
+ /// Construct a recursive hash combining helper.
+ ///
+ /// This sets up the state for a recursive hash combine, including getting
+ /// the seed and buffer setup.
+ hash_combine_recursive_helper()
+ : seed(get_execution_seed()) {}
+
+ /// Combine one chunk of data into the current in-flight hash.
+ ///
+ /// This merges one chunk of data into the hash. First it tries to buffer
+ /// the data. If the buffer is full, it hashes the buffer into its
+ /// hash_state, empties it, and then merges the new chunk in. This also
+ /// handles cases where the data straddles the end of the buffer.
+ template <typename T>
+ char *combine_data(size_t &length, char *buffer_ptr, char *buffer_end, T data) {
+ if (!store_and_advance(buffer_ptr, buffer_end, data)) {
+ // Check for skew which prevents the buffer from being packed, and do
+ // a partial store into the buffer to fill it. This is only a concern
+ // with the variadic combine because that formation can have varying
+ // argument types.
+ size_t partial_store_size = buffer_end - buffer_ptr;
+ memcpy(buffer_ptr, &data, partial_store_size);
+
+ // If the store fails, our buffer is full and ready to hash. We have to
+ // either initialize the hash state (on the first full buffer) or mix
+ // this buffer into the existing hash state. Length tracks the *hashed*
+ // length, not the buffered length.
+ if (length == 0) {
+ state = state.create(buffer, seed);
+ length = 64;
+ } else {
+ // Mix this chunk into the current state and bump length up by 64.
+ state.mix(buffer);
+ length += 64;
+ }
+ // Reset the buffer_ptr to the head of the buffer for the next chunk of
+ // data.
+ buffer_ptr = buffer;
+
+ // Try again to store into the buffer -- this cannot fail as we only
+ // store types smaller than the buffer.
+ if (!store_and_advance(buffer_ptr, buffer_end, data,
+ partial_store_size))
+ llvm_unreachable("buffer smaller than stored type");
+ }
+ return buffer_ptr;
+ }
+
+ /// Recursive, variadic combining method.
+ ///
+ /// This function recurses through each argument, combining that argument
+ /// into a single hash.
+ template <typename T, typename ...Ts>
+ hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
+ const T &arg, const Ts &...args) {
+ buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg));
+
+ // Recurse to the next argument.
+ return combine(length, buffer_ptr, buffer_end, args...);
+ }
+
+ /// Base case for recursive, variadic combining.
+ ///
+ /// The base case when combining arguments recursively is reached when all
+ /// arguments have been handled. It flushes the remaining buffer and
+ /// constructs a hash_code.
+ hash_code combine(size_t length, char *buffer_ptr, char *buffer_end) {
+ // Check whether the entire set of values fit in the buffer. If so, we'll
+ // use the optimized short hashing routine and skip state entirely.
+ if (length == 0)
+ return hash_short(buffer, buffer_ptr - buffer, seed);
+
+ // Mix the final buffer, rotating it if we did a partial fill in order to
+ // simulate doing a mix of the last 64-bytes. That is how the algorithm
+ // works when we have a contiguous byte sequence, and we want to emulate
+ // that here.
+ std::rotate(buffer, buffer_ptr, buffer_end);
+
+ // Mix this chunk into the current state.
+ state.mix(buffer);
+ length += buffer_ptr - buffer;
+
+ return state.finalize(length);
+ }
+};
+
+} // namespace detail
+} // namespace hashing
+
+/// Combine values into a single hash_code.
+///
+/// This routine accepts a varying number of arguments of any type. It will
+/// attempt to combine them into a single hash_code. For user-defined types it
+/// attempts to call a \see hash_value overload (via ADL) for the type. For
+/// integer and pointer types it directly combines their data into the
+/// resulting hash_code.
+///
+/// The result is suitable for returning from a user's hash_value
+/// *implementation* for their user-defined type. Consumers of a type should
+/// *not* call this routine, they should instead call 'hash_value'.
+template <typename ...Ts> hash_code hash_combine(const Ts &...args) {
+ // Recursively hash each argument using a helper class.
+ ::llvm::hashing::detail::hash_combine_recursive_helper helper;
+ return helper.combine(0, helper.buffer, helper.buffer + 64, args...);
+}
+
+// Implementation details for implementations of hash_value overloads provided
+// here.
+namespace hashing {
+namespace detail {
+
+/// Helper to hash the value of a single integer.
+///
+/// Overloads for smaller integer types are not provided to ensure consistent
+/// behavior in the presence of integral promotions. Essentially,
+/// "hash_value('4')" and "hash_value('0' + 4)" should be the same.
+inline hash_code hash_integer_value(uint64_t value) {
+ // Similar to hash_4to8_bytes but using a seed instead of length.
+ const uint64_t seed = get_execution_seed();
+ const char *s = reinterpret_cast<const char *>(&value);
+ const uint64_t a = fetch32(s);
+ return hash_16_bytes(seed + (a << 3), fetch32(s + 4));
+}
+
+} // namespace detail
+} // namespace hashing
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T>
+std::enable_if_t<is_integral_or_enum<T>::value, hash_code> hash_value(T value) {
+ return ::llvm::hashing::detail::hash_integer_value(
+ static_cast<uint64_t>(value));
+}
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T> hash_code hash_value(const T *ptr) {
+ return ::llvm::hashing::detail::hash_integer_value(
+ reinterpret_cast<uintptr_t>(ptr));
+}
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T, typename U>
+hash_code hash_value(const std::pair<T, U> &arg) {
+ return hash_combine(arg.first, arg.second);
+}
+
+// Implementation details for the hash_value overload for std::tuple<...>(...).
+namespace hashing {
+namespace detail {
+
+template <typename... Ts, std::size_t... Indices>
+hash_code hash_value_tuple_helper(const std::tuple<Ts...> &arg,
+ std::index_sequence<Indices...>) {
+ return hash_combine(std::get<Indices>(arg)...);
+}
+
+} // namespace detail
+} // namespace hashing
+
+template <typename... Ts>
+hash_code hash_value(const std::tuple<Ts...> &arg) {
+ // TODO: Use std::apply when LLVM starts using C++17.
+ return ::llvm::hashing::detail::hash_value_tuple_helper(
+ arg, typename std::index_sequence_for<Ts...>());
+}
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T>
+hash_code hash_value(const std::basic_string<T> &arg) {
+ return hash_combine_range(arg.begin(), arg.end());
+}
+
+template <> struct DenseMapInfo<hash_code, void> {
+ static inline hash_code getEmptyKey() { return hash_code(-1); }
+ static inline hash_code getTombstoneKey() { return hash_code(-2); }
+ static unsigned getHashValue(hash_code val) { return val; }
+ static bool isEqual(hash_code LHS, hash_code RHS) { return LHS == RHS; }
+};
+
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/ImmutableList.h b/contrib/libs/llvm14/include/llvm/ADT/ImmutableList.h
new file mode 100644
index 0000000000..0be8d88a92
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/ImmutableList.h
@@ -0,0 +1,257 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//==--- ImmutableList.h - Immutable (functional) list interface --*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the ImmutableList class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_IMMUTABLELIST_H
+#define LLVM_ADT_IMMUTABLELIST_H
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Support/Allocator.h"
+#include <cassert>
+#include <cstdint>
+#include <new>
+
+namespace llvm {
+
+template <typename T> class ImmutableListFactory;
+
+template <typename T>
+class ImmutableListImpl : public FoldingSetNode {
+ friend class ImmutableListFactory<T>;
+
+ T Head;
+ const ImmutableListImpl* Tail;
+
+ template <typename ElemT>
+ ImmutableListImpl(ElemT &&head, const ImmutableListImpl *tail = nullptr)
+ : Head(std::forward<ElemT>(head)), Tail(tail) {}
+
+public:
+ ImmutableListImpl(const ImmutableListImpl &) = delete;
+ ImmutableListImpl &operator=(const ImmutableListImpl &) = delete;
+
+ const T& getHead() const { return Head; }
+ const ImmutableListImpl* getTail() const { return Tail; }
+
+ static inline void Profile(FoldingSetNodeID& ID, const T& H,
+ const ImmutableListImpl* L){
+ ID.AddPointer(L);
+ ID.Add(H);
+ }
+
+ void Profile(FoldingSetNodeID& ID) {
+ Profile(ID, Head, Tail);
+ }
+};
+
+/// ImmutableList - This class represents an immutable (functional) list.
+/// It is implemented as a smart pointer (wraps ImmutableListImpl), so it
+/// it is intended to always be copied by value as if it were a pointer.
+/// This interface matches ImmutableSet and ImmutableMap. ImmutableList
+/// objects should almost never be created directly, and instead should
+/// be created by ImmutableListFactory objects that manage the lifetime
+/// of a group of lists. When the factory object is reclaimed, all lists
+/// created by that factory are released as well.
+template <typename T>
+class ImmutableList {
+public:
+ using value_type = T;
+ using Factory = ImmutableListFactory<T>;
+
+ static_assert(std::is_trivially_destructible<T>::value,
+ "T must be trivially destructible!");
+
+private:
+ const ImmutableListImpl<T>* X;
+
+public:
+ // This constructor should normally only be called by ImmutableListFactory<T>.
+ // There may be cases, however, when one needs to extract the internal pointer
+ // and reconstruct a list object from that pointer.
+ ImmutableList(const ImmutableListImpl<T>* x = nullptr) : X(x) {}
+
+ const ImmutableListImpl<T>* getInternalPointer() const {
+ return X;
+ }
+
+ class iterator {
+ const ImmutableListImpl<T>* L = nullptr;
+
+ public:
+ iterator() = default;
+ iterator(ImmutableList l) : L(l.getInternalPointer()) {}
+
+ iterator& operator++() { L = L->getTail(); return *this; }
+ bool operator==(const iterator& I) const { return L == I.L; }
+ bool operator!=(const iterator& I) const { return L != I.L; }
+ const value_type& operator*() const { return L->getHead(); }
+ const typename std::remove_reference<value_type>::type* operator->() const {
+ return &L->getHead();
+ }
+
+ ImmutableList getList() const { return L; }
+ };
+
+ /// begin - Returns an iterator referring to the head of the list, or
+ /// an iterator denoting the end of the list if the list is empty.
+ iterator begin() const { return iterator(X); }
+
+ /// end - Returns an iterator denoting the end of the list. This iterator
+ /// does not refer to a valid list element.
+ iterator end() const { return iterator(); }
+
+ /// isEmpty - Returns true if the list is empty.
+ bool isEmpty() const { return !X; }
+
+ bool contains(const T& V) const {
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ if (*I == V)
+ return true;
+ }
+ return false;
+ }
+
+ /// isEqual - Returns true if two lists are equal. Because all lists created
+ /// from the same ImmutableListFactory are uniqued, this has O(1) complexity
+ /// because it the contents of the list do not need to be compared. Note
+ /// that you should only compare two lists created from the same
+ /// ImmutableListFactory.
+ bool isEqual(const ImmutableList& L) const { return X == L.X; }
+
+ bool operator==(const ImmutableList& L) const { return isEqual(L); }
+
+ /// getHead - Returns the head of the list.
+ const T& getHead() const {
+ assert(!isEmpty() && "Cannot get the head of an empty list.");
+ return X->getHead();
+ }
+
+ /// getTail - Returns the tail of the list, which is another (possibly empty)
+ /// ImmutableList.
+ ImmutableList getTail() const {
+ return X ? X->getTail() : nullptr;
+ }
+
+ void Profile(FoldingSetNodeID& ID) const {
+ ID.AddPointer(X);
+ }
+};
+
+template <typename T>
+class ImmutableListFactory {
+ using ListTy = ImmutableListImpl<T>;
+ using CacheTy = FoldingSet<ListTy>;
+
+ CacheTy Cache;
+ uintptr_t Allocator;
+
+ bool ownsAllocator() const {
+ return (Allocator & 0x1) == 0;
+ }
+
+ BumpPtrAllocator& getAllocator() const {
+ return *reinterpret_cast<BumpPtrAllocator*>(Allocator & ~0x1);
+ }
+
+public:
+ ImmutableListFactory()
+ : Allocator(reinterpret_cast<uintptr_t>(new BumpPtrAllocator())) {}
+
+ ImmutableListFactory(BumpPtrAllocator& Alloc)
+ : Allocator(reinterpret_cast<uintptr_t>(&Alloc) | 0x1) {}
+
+ ~ImmutableListFactory() {
+ if (ownsAllocator()) delete &getAllocator();
+ }
+
+ template <typename ElemT>
+ LLVM_NODISCARD ImmutableList<T> concat(ElemT &&Head, ImmutableList<T> Tail) {
+ // Profile the new list to see if it already exists in our cache.
+ FoldingSetNodeID ID;
+ void* InsertPos;
+
+ const ListTy* TailImpl = Tail.getInternalPointer();
+ ListTy::Profile(ID, Head, TailImpl);
+ ListTy* L = Cache.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!L) {
+ // The list does not exist in our cache. Create it.
+ BumpPtrAllocator& A = getAllocator();
+ L = (ListTy*) A.Allocate<ListTy>();
+ new (L) ListTy(std::forward<ElemT>(Head), TailImpl);
+
+ // Insert the new list into the cache.
+ Cache.InsertNode(L, InsertPos);
+ }
+
+ return L;
+ }
+
+ template <typename ElemT>
+ LLVM_NODISCARD ImmutableList<T> add(ElemT &&Data, ImmutableList<T> L) {
+ return concat(std::forward<ElemT>(Data), L);
+ }
+
+ template <typename ...CtorArgs>
+ LLVM_NODISCARD ImmutableList<T> emplace(ImmutableList<T> Tail,
+ CtorArgs &&...Args) {
+ return concat(T(std::forward<CtorArgs>(Args)...), Tail);
+ }
+
+ ImmutableList<T> getEmptyList() const {
+ return ImmutableList<T>(nullptr);
+ }
+
+ template <typename ElemT>
+ ImmutableList<T> create(ElemT &&Data) {
+ return concat(std::forward<ElemT>(Data), getEmptyList());
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Partially-specialized Traits.
+//===----------------------------------------------------------------------===//
+
+template <typename T> struct DenseMapInfo<ImmutableList<T>, void> {
+ static inline ImmutableList<T> getEmptyKey() {
+ return reinterpret_cast<ImmutableListImpl<T>*>(-1);
+ }
+
+ static inline ImmutableList<T> getTombstoneKey() {
+ return reinterpret_cast<ImmutableListImpl<T>*>(-2);
+ }
+
+ static unsigned getHashValue(ImmutableList<T> X) {
+ uintptr_t PtrVal = reinterpret_cast<uintptr_t>(X.getInternalPointer());
+ return (unsigned((uintptr_t)PtrVal) >> 4) ^
+ (unsigned((uintptr_t)PtrVal) >> 9);
+ }
+
+ static bool isEqual(ImmutableList<T> X1, ImmutableList<T> X2) {
+ return X1 == X2;
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_IMMUTABLELIST_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/ImmutableMap.h b/contrib/libs/llvm14/include/llvm/ADT/ImmutableMap.h
new file mode 100644
index 0000000000..0ab26f6b2a
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/ImmutableMap.h
@@ -0,0 +1,341 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===--- ImmutableMap.h - Immutable (functional) map interface --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the ImmutableMap class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_IMMUTABLEMAP_H
+#define LLVM_ADT_IMMUTABLEMAP_H
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/ImmutableSet.h"
+#include "llvm/Support/Allocator.h"
+#include <utility>
+
+namespace llvm {
+
+/// ImutKeyValueInfo -Traits class used by ImmutableMap. While both the first
+/// and second elements in a pair are used to generate profile information,
+/// only the first element (the key) is used by isEqual and isLess.
+template <typename T, typename S>
+struct ImutKeyValueInfo {
+ using value_type = const std::pair<T,S>;
+ using value_type_ref = const value_type&;
+ using key_type = const T;
+ using key_type_ref = const T&;
+ using data_type = const S;
+ using data_type_ref = const S&;
+
+ static inline key_type_ref KeyOfValue(value_type_ref V) {
+ return V.first;
+ }
+
+ static inline data_type_ref DataOfValue(value_type_ref V) {
+ return V.second;
+ }
+
+ static inline bool isEqual(key_type_ref L, key_type_ref R) {
+ return ImutContainerInfo<T>::isEqual(L,R);
+ }
+ static inline bool isLess(key_type_ref L, key_type_ref R) {
+ return ImutContainerInfo<T>::isLess(L,R);
+ }
+
+ static inline bool isDataEqual(data_type_ref L, data_type_ref R) {
+ return ImutContainerInfo<S>::isEqual(L,R);
+ }
+
+ static inline void Profile(FoldingSetNodeID& ID, value_type_ref V) {
+ ImutContainerInfo<T>::Profile(ID, V.first);
+ ImutContainerInfo<S>::Profile(ID, V.second);
+ }
+};
+
+template <typename KeyT, typename ValT,
+ typename ValInfo = ImutKeyValueInfo<KeyT,ValT>>
+class ImmutableMap {
+public:
+ using value_type = typename ValInfo::value_type;
+ using value_type_ref = typename ValInfo::value_type_ref;
+ using key_type = typename ValInfo::key_type;
+ using key_type_ref = typename ValInfo::key_type_ref;
+ using data_type = typename ValInfo::data_type;
+ using data_type_ref = typename ValInfo::data_type_ref;
+ using TreeTy = ImutAVLTree<ValInfo>;
+
+protected:
+ IntrusiveRefCntPtr<TreeTy> Root;
+
+public:
+ /// Constructs a map from a pointer to a tree root. In general one
+ /// should use a Factory object to create maps instead of directly
+ /// invoking the constructor, but there are cases where make this
+ /// constructor public is useful.
+ explicit ImmutableMap(const TreeTy *R) : Root(const_cast<TreeTy *>(R)) {}
+
+ class Factory {
+ typename TreeTy::Factory F;
+ const bool Canonicalize;
+
+ public:
+ Factory(bool canonicalize = true) : Canonicalize(canonicalize) {}
+
+ Factory(BumpPtrAllocator &Alloc, bool canonicalize = true)
+ : F(Alloc), Canonicalize(canonicalize) {}
+
+ Factory(const Factory &) = delete;
+ Factory &operator=(const Factory &) = delete;
+
+ ImmutableMap getEmptyMap() { return ImmutableMap(F.getEmptyTree()); }
+
+ LLVM_NODISCARD ImmutableMap add(ImmutableMap Old, key_type_ref K,
+ data_type_ref D) {
+ TreeTy *T = F.add(Old.Root.get(), std::pair<key_type, data_type>(K, D));
+ return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T);
+ }
+
+ LLVM_NODISCARD ImmutableMap remove(ImmutableMap Old, key_type_ref K) {
+ TreeTy *T = F.remove(Old.Root.get(), K);
+ return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T);
+ }
+
+ typename TreeTy::Factory *getTreeFactory() const {
+ return const_cast<typename TreeTy::Factory *>(&F);
+ }
+ };
+
+ bool contains(key_type_ref K) const {
+ return Root ? Root->contains(K) : false;
+ }
+
+ bool operator==(const ImmutableMap &RHS) const {
+ return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root;
+ }
+
+ bool operator!=(const ImmutableMap &RHS) const {
+ return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get())
+ : Root != RHS.Root;
+ }
+
+ TreeTy *getRoot() const {
+ if (Root) { Root->retain(); }
+ return Root.get();
+ }
+
+ TreeTy *getRootWithoutRetain() const { return Root.get(); }
+
+ void manualRetain() {
+ if (Root) Root->retain();
+ }
+
+ void manualRelease() {
+ if (Root) Root->release();
+ }
+
+ bool isEmpty() const { return !Root; }
+
+public:
+ //===--------------------------------------------------===//
+ // For testing.
+ //===--------------------------------------------------===//
+
+ void verify() const { if (Root) Root->verify(); }
+
+ //===--------------------------------------------------===//
+ // Iterators.
+ //===--------------------------------------------------===//
+
+ class iterator : public ImutAVLValueIterator<ImmutableMap> {
+ friend class ImmutableMap;
+
+ iterator() = default;
+ explicit iterator(TreeTy *Tree) : iterator::ImutAVLValueIterator(Tree) {}
+
+ public:
+ key_type_ref getKey() const { return (*this)->first; }
+ data_type_ref getData() const { return (*this)->second; }
+ };
+
+ iterator begin() const { return iterator(Root.get()); }
+ iterator end() const { return iterator(); }
+
+ data_type* lookup(key_type_ref K) const {
+ if (Root) {
+ TreeTy* T = Root->find(K);
+ if (T) return &T->getValue().second;
+ }
+
+ return nullptr;
+ }
+
+ /// getMaxElement - Returns the <key,value> pair in the ImmutableMap for
+ /// which key is the highest in the ordering of keys in the map. This
+ /// method returns NULL if the map is empty.
+ value_type* getMaxElement() const {
+ return Root ? &(Root->getMaxElement()->getValue()) : nullptr;
+ }
+
+ //===--------------------------------------------------===//
+ // Utility methods.
+ //===--------------------------------------------------===//
+
+ unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
+
+ static inline void Profile(FoldingSetNodeID& ID, const ImmutableMap& M) {
+ ID.AddPointer(M.Root.get());
+ }
+
+ inline void Profile(FoldingSetNodeID& ID) const {
+ return Profile(ID,*this);
+ }
+};
+
+// NOTE: This will possibly become the new implementation of ImmutableMap some day.
+template <typename KeyT, typename ValT,
+typename ValInfo = ImutKeyValueInfo<KeyT,ValT>>
+class ImmutableMapRef {
+public:
+ using value_type = typename ValInfo::value_type;
+ using value_type_ref = typename ValInfo::value_type_ref;
+ using key_type = typename ValInfo::key_type;
+ using key_type_ref = typename ValInfo::key_type_ref;
+ using data_type = typename ValInfo::data_type;
+ using data_type_ref = typename ValInfo::data_type_ref;
+ using TreeTy = ImutAVLTree<ValInfo>;
+ using FactoryTy = typename TreeTy::Factory;
+
+protected:
+ IntrusiveRefCntPtr<TreeTy> Root;
+ FactoryTy *Factory;
+
+public:
+ /// Constructs a map from a pointer to a tree root. In general one
+ /// should use a Factory object to create maps instead of directly
+ /// invoking the constructor, but there are cases where make this
+ /// constructor public is useful.
+ ImmutableMapRef(const TreeTy *R, FactoryTy *F)
+ : Root(const_cast<TreeTy *>(R)), Factory(F) {}
+
+ ImmutableMapRef(const ImmutableMap<KeyT, ValT> &X,
+ typename ImmutableMap<KeyT, ValT>::Factory &F)
+ : Root(X.getRootWithoutRetain()), Factory(F.getTreeFactory()) {}
+
+ static inline ImmutableMapRef getEmptyMap(FactoryTy *F) {
+ return ImmutableMapRef(nullptr, F);
+ }
+
+ void manualRetain() {
+ if (Root) Root->retain();
+ }
+
+ void manualRelease() {
+ if (Root) Root->release();
+ }
+
+ ImmutableMapRef add(key_type_ref K, data_type_ref D) const {
+ TreeTy *NewT =
+ Factory->add(Root.get(), std::pair<key_type, data_type>(K, D));
+ return ImmutableMapRef(NewT, Factory);
+ }
+
+ ImmutableMapRef remove(key_type_ref K) const {
+ TreeTy *NewT = Factory->remove(Root.get(), K);
+ return ImmutableMapRef(NewT, Factory);
+ }
+
+ bool contains(key_type_ref K) const {
+ return Root ? Root->contains(K) : false;
+ }
+
+ ImmutableMap<KeyT, ValT> asImmutableMap() const {
+ return ImmutableMap<KeyT, ValT>(Factory->getCanonicalTree(Root.get()));
+ }
+
+ bool operator==(const ImmutableMapRef &RHS) const {
+ return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root;
+ }
+
+ bool operator!=(const ImmutableMapRef &RHS) const {
+ return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get())
+ : Root != RHS.Root;
+ }
+
+ bool isEmpty() const { return !Root; }
+
+ //===--------------------------------------------------===//
+ // For testing.
+ //===--------------------------------------------------===//
+
+ void verify() const {
+ if (Root)
+ Root->verify();
+ }
+
+ //===--------------------------------------------------===//
+ // Iterators.
+ //===--------------------------------------------------===//
+
+ class iterator : public ImutAVLValueIterator<ImmutableMapRef> {
+ friend class ImmutableMapRef;
+
+ iterator() = default;
+ explicit iterator(TreeTy *Tree) : iterator::ImutAVLValueIterator(Tree) {}
+
+ public:
+ key_type_ref getKey() const { return (*this)->first; }
+ data_type_ref getData() const { return (*this)->second; }
+ };
+
+ iterator begin() const { return iterator(Root.get()); }
+ iterator end() const { return iterator(); }
+
+ data_type *lookup(key_type_ref K) const {
+ if (Root) {
+ TreeTy* T = Root->find(K);
+ if (T) return &T->getValue().second;
+ }
+
+ return nullptr;
+ }
+
+ /// getMaxElement - Returns the <key,value> pair in the ImmutableMap for
+ /// which key is the highest in the ordering of keys in the map. This
+ /// method returns NULL if the map is empty.
+ value_type* getMaxElement() const {
+ return Root ? &(Root->getMaxElement()->getValue()) : nullptr;
+ }
+
+ //===--------------------------------------------------===//
+ // Utility methods.
+ //===--------------------------------------------------===//
+
+ unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
+
+ static inline void Profile(FoldingSetNodeID &ID, const ImmutableMapRef &M) {
+ ID.AddPointer(M.Root.get());
+ }
+
+ inline void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_IMMUTABLEMAP_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/ImmutableSet.h b/contrib/libs/llvm14/include/llvm/ADT/ImmutableSet.h
new file mode 100644
index 0000000000..54100fcfbc
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/ImmutableSet.h
@@ -0,0 +1,1182 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===--- ImmutableSet.h - Immutable (functional) set interface --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the ImutAVLTree and ImmutableSet classes.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_IMMUTABLESET_H
+#define LLVM_ADT_IMMUTABLESET_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstdint>
+#include <functional>
+#include <iterator>
+#include <new>
+#include <vector>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// Immutable AVL-Tree Definition.
+//===----------------------------------------------------------------------===//
+
+template <typename ImutInfo> class ImutAVLFactory;
+template <typename ImutInfo> class ImutIntervalAVLFactory;
+template <typename ImutInfo> class ImutAVLTreeInOrderIterator;
+template <typename ImutInfo> class ImutAVLTreeGenericIterator;
+
+template <typename ImutInfo >
+class ImutAVLTree {
+public:
+ using key_type_ref = typename ImutInfo::key_type_ref;
+ using value_type = typename ImutInfo::value_type;
+ using value_type_ref = typename ImutInfo::value_type_ref;
+ using Factory = ImutAVLFactory<ImutInfo>;
+ using iterator = ImutAVLTreeInOrderIterator<ImutInfo>;
+
+ friend class ImutAVLFactory<ImutInfo>;
+ friend class ImutIntervalAVLFactory<ImutInfo>;
+ friend class ImutAVLTreeGenericIterator<ImutInfo>;
+
+ //===----------------------------------------------------===//
+ // Public Interface.
+ //===----------------------------------------------------===//
+
+ /// Return a pointer to the left subtree. This value
+ /// is NULL if there is no left subtree.
+ ImutAVLTree *getLeft() const { return left; }
+
+ /// Return a pointer to the right subtree. This value is
+ /// NULL if there is no right subtree.
+ ImutAVLTree *getRight() const { return right; }
+
+ /// getHeight - Returns the height of the tree. A tree with no subtrees
+ /// has a height of 1.
+ unsigned getHeight() const { return height; }
+
+ /// getValue - Returns the data value associated with the tree node.
+ const value_type& getValue() const { return value; }
+
+ /// find - Finds the subtree associated with the specified key value.
+ /// This method returns NULL if no matching subtree is found.
+ ImutAVLTree* find(key_type_ref K) {
+ ImutAVLTree *T = this;
+ while (T) {
+ key_type_ref CurrentKey = ImutInfo::KeyOfValue(T->getValue());
+ if (ImutInfo::isEqual(K,CurrentKey))
+ return T;
+ else if (ImutInfo::isLess(K,CurrentKey))
+ T = T->getLeft();
+ else
+ T = T->getRight();
+ }
+ return nullptr;
+ }
+
+ /// getMaxElement - Find the subtree associated with the highest ranged
+ /// key value.
+ ImutAVLTree* getMaxElement() {
+ ImutAVLTree *T = this;
+ ImutAVLTree *Right = T->getRight();
+ while (Right) { T = Right; Right = T->getRight(); }
+ return T;
+ }
+
+ /// size - Returns the number of nodes in the tree, which includes
+ /// both leaves and non-leaf nodes.
+ unsigned size() const {
+ unsigned n = 1;
+ if (const ImutAVLTree* L = getLeft())
+ n += L->size();
+ if (const ImutAVLTree* R = getRight())
+ n += R->size();
+ return n;
+ }
+
+ /// begin - Returns an iterator that iterates over the nodes of the tree
+ /// in an inorder traversal. The returned iterator thus refers to the
+ /// the tree node with the minimum data element.
+ iterator begin() const { return iterator(this); }
+
+ /// end - Returns an iterator for the tree that denotes the end of an
+ /// inorder traversal.
+ iterator end() const { return iterator(); }
+
+ bool isElementEqual(value_type_ref V) const {
+ // Compare the keys.
+ if (!ImutInfo::isEqual(ImutInfo::KeyOfValue(getValue()),
+ ImutInfo::KeyOfValue(V)))
+ return false;
+
+ // Also compare the data values.
+ if (!ImutInfo::isDataEqual(ImutInfo::DataOfValue(getValue()),
+ ImutInfo::DataOfValue(V)))
+ return false;
+
+ return true;
+ }
+
+ bool isElementEqual(const ImutAVLTree* RHS) const {
+ return isElementEqual(RHS->getValue());
+ }
+
+ /// isEqual - Compares two trees for structural equality and returns true
+ /// if they are equal. This worst case performance of this operation is
+ // linear in the sizes of the trees.
+ bool isEqual(const ImutAVLTree& RHS) const {
+ if (&RHS == this)
+ return true;
+
+ iterator LItr = begin(), LEnd = end();
+ iterator RItr = RHS.begin(), REnd = RHS.end();
+
+ while (LItr != LEnd && RItr != REnd) {
+ if (&*LItr == &*RItr) {
+ LItr.skipSubTree();
+ RItr.skipSubTree();
+ continue;
+ }
+
+ if (!LItr->isElementEqual(&*RItr))
+ return false;
+
+ ++LItr;
+ ++RItr;
+ }
+
+ return LItr == LEnd && RItr == REnd;
+ }
+
+ /// isNotEqual - Compares two trees for structural inequality. Performance
+ /// is the same is isEqual.
+ bool isNotEqual(const ImutAVLTree& RHS) const { return !isEqual(RHS); }
+
+ /// contains - Returns true if this tree contains a subtree (node) that
+ /// has an data element that matches the specified key. Complexity
+ /// is logarithmic in the size of the tree.
+ bool contains(key_type_ref K) { return (bool) find(K); }
+
+ /// validateTree - A utility method that checks that the balancing and
+ /// ordering invariants of the tree are satisfied. It is a recursive
+ /// method that returns the height of the tree, which is then consumed
+ /// by the enclosing validateTree call. External callers should ignore the
+ /// return value. An invalid tree will cause an assertion to fire in
+ /// a debug build.
+ unsigned validateTree() const {
+ unsigned HL = getLeft() ? getLeft()->validateTree() : 0;
+ unsigned HR = getRight() ? getRight()->validateTree() : 0;
+ (void) HL;
+ (void) HR;
+
+ assert(getHeight() == ( HL > HR ? HL : HR ) + 1
+ && "Height calculation wrong");
+
+ assert((HL > HR ? HL-HR : HR-HL) <= 2
+ && "Balancing invariant violated");
+
+ assert((!getLeft() ||
+ ImutInfo::isLess(ImutInfo::KeyOfValue(getLeft()->getValue()),
+ ImutInfo::KeyOfValue(getValue()))) &&
+ "Value in left child is not less that current value");
+
+ assert((!getRight() ||
+ ImutInfo::isLess(ImutInfo::KeyOfValue(getValue()),
+ ImutInfo::KeyOfValue(getRight()->getValue()))) &&
+ "Current value is not less that value of right child");
+
+ return getHeight();
+ }
+
+ //===----------------------------------------------------===//
+ // Internal values.
+ //===----------------------------------------------------===//
+
+private:
+ Factory *factory;
+ ImutAVLTree *left;
+ ImutAVLTree *right;
+ ImutAVLTree *prev = nullptr;
+ ImutAVLTree *next = nullptr;
+
+ unsigned height : 28;
+ bool IsMutable : 1;
+ bool IsDigestCached : 1;
+ bool IsCanonicalized : 1;
+
+ value_type value;
+ uint32_t digest = 0;
+ uint32_t refCount = 0;
+
+ //===----------------------------------------------------===//
+ // Internal methods (node manipulation; used by Factory).
+ //===----------------------------------------------------===//
+
+private:
+ /// ImutAVLTree - Internal constructor that is only called by
+ /// ImutAVLFactory.
+ ImutAVLTree(Factory *f, ImutAVLTree* l, ImutAVLTree* r, value_type_ref v,
+ unsigned height)
+ : factory(f), left(l), right(r), height(height), IsMutable(true),
+ IsDigestCached(false), IsCanonicalized(false), value(v)
+ {
+ if (left) left->retain();
+ if (right) right->retain();
+ }
+
+ /// isMutable - Returns true if the left and right subtree references
+ /// (as well as height) can be changed. If this method returns false,
+ /// the tree is truly immutable. Trees returned from an ImutAVLFactory
+ /// object should always have this method return true. Further, if this
+ /// method returns false for an instance of ImutAVLTree, all subtrees
+ /// will also have this method return false. The converse is not true.
+ bool isMutable() const { return IsMutable; }
+
+ /// hasCachedDigest - Returns true if the digest for this tree is cached.
+ /// This can only be true if the tree is immutable.
+ bool hasCachedDigest() const { return IsDigestCached; }
+
+ //===----------------------------------------------------===//
+ // Mutating operations. A tree root can be manipulated as
+ // long as its reference has not "escaped" from internal
+ // methods of a factory object (see below). When a tree
+ // pointer is externally viewable by client code, the
+ // internal "mutable bit" is cleared to mark the tree
+ // immutable. Note that a tree that still has its mutable
+ // bit set may have children (subtrees) that are themselves
+ // immutable.
+ //===----------------------------------------------------===//
+
+ /// markImmutable - Clears the mutable flag for a tree. After this happens,
+ /// it is an error to call setLeft(), setRight(), and setHeight().
+ void markImmutable() {
+ assert(isMutable() && "Mutable flag already removed.");
+ IsMutable = false;
+ }
+
+ /// markedCachedDigest - Clears the NoCachedDigest flag for a tree.
+ void markedCachedDigest() {
+ assert(!hasCachedDigest() && "NoCachedDigest flag already removed.");
+ IsDigestCached = true;
+ }
+
+ /// setHeight - Changes the height of the tree. Used internally by
+ /// ImutAVLFactory.
+ void setHeight(unsigned h) {
+ assert(isMutable() && "Only a mutable tree can have its height changed.");
+ height = h;
+ }
+
+ static uint32_t computeDigest(ImutAVLTree *L, ImutAVLTree *R,
+ value_type_ref V) {
+ uint32_t digest = 0;
+
+ if (L)
+ digest += L->computeDigest();
+
+ // Compute digest of stored data.
+ FoldingSetNodeID ID;
+ ImutInfo::Profile(ID,V);
+ digest += ID.ComputeHash();
+
+ if (R)
+ digest += R->computeDigest();
+
+ return digest;
+ }
+
+ uint32_t computeDigest() {
+ // Check the lowest bit to determine if digest has actually been
+ // pre-computed.
+ if (hasCachedDigest())
+ return digest;
+
+ uint32_t X = computeDigest(getLeft(), getRight(), getValue());
+ digest = X;
+ markedCachedDigest();
+ return X;
+ }
+
+ //===----------------------------------------------------===//
+ // Reference count operations.
+ //===----------------------------------------------------===//
+
+public:
+ void retain() { ++refCount; }
+
+ void release() {
+ assert(refCount > 0);
+ if (--refCount == 0)
+ destroy();
+ }
+
+ void destroy() {
+ if (left)
+ left->release();
+ if (right)
+ right->release();
+ if (IsCanonicalized) {
+ if (next)
+ next->prev = prev;
+
+ if (prev)
+ prev->next = next;
+ else
+ factory->Cache[factory->maskCacheIndex(computeDigest())] = next;
+ }
+
+ // We need to clear the mutability bit in case we are
+ // destroying the node as part of a sweep in ImutAVLFactory::recoverNodes().
+ IsMutable = false;
+ factory->freeNodes.push_back(this);
+ }
+};
+
+template <typename ImutInfo>
+struct IntrusiveRefCntPtrInfo<ImutAVLTree<ImutInfo>> {
+ static void retain(ImutAVLTree<ImutInfo> *Tree) { Tree->retain(); }
+ static void release(ImutAVLTree<ImutInfo> *Tree) { Tree->release(); }
+};
+
+//===----------------------------------------------------------------------===//
+// Immutable AVL-Tree Factory class.
+//===----------------------------------------------------------------------===//
+
+template <typename ImutInfo >
+class ImutAVLFactory {
+ friend class ImutAVLTree<ImutInfo>;
+
+ using TreeTy = ImutAVLTree<ImutInfo>;
+ using value_type_ref = typename TreeTy::value_type_ref;
+ using key_type_ref = typename TreeTy::key_type_ref;
+ using CacheTy = DenseMap<unsigned, TreeTy*>;
+
+ CacheTy Cache;
+ uintptr_t Allocator;
+ std::vector<TreeTy*> createdNodes;
+ std::vector<TreeTy*> freeNodes;
+
+ bool ownsAllocator() const {
+ return (Allocator & 0x1) == 0;
+ }
+
+ BumpPtrAllocator& getAllocator() const {
+ return *reinterpret_cast<BumpPtrAllocator*>(Allocator & ~0x1);
+ }
+
+ //===--------------------------------------------------===//
+ // Public interface.
+ //===--------------------------------------------------===//
+
+public:
+ ImutAVLFactory()
+ : Allocator(reinterpret_cast<uintptr_t>(new BumpPtrAllocator())) {}
+
+ ImutAVLFactory(BumpPtrAllocator& Alloc)
+ : Allocator(reinterpret_cast<uintptr_t>(&Alloc) | 0x1) {}
+
+ ~ImutAVLFactory() {
+ if (ownsAllocator()) delete &getAllocator();
+ }
+
+ TreeTy* add(TreeTy* T, value_type_ref V) {
+ T = add_internal(V,T);
+ markImmutable(T);
+ recoverNodes();
+ return T;
+ }
+
+ TreeTy* remove(TreeTy* T, key_type_ref V) {
+ T = remove_internal(V,T);
+ markImmutable(T);
+ recoverNodes();
+ return T;
+ }
+
+ TreeTy* getEmptyTree() const { return nullptr; }
+
+protected:
+ //===--------------------------------------------------===//
+ // A bunch of quick helper functions used for reasoning
+ // about the properties of trees and their children.
+ // These have succinct names so that the balancing code
+ // is as terse (and readable) as possible.
+ //===--------------------------------------------------===//
+
+ bool isEmpty(TreeTy* T) const { return !T; }
+ unsigned getHeight(TreeTy* T) const { return T ? T->getHeight() : 0; }
+ TreeTy* getLeft(TreeTy* T) const { return T->getLeft(); }
+ TreeTy* getRight(TreeTy* T) const { return T->getRight(); }
+ value_type_ref getValue(TreeTy* T) const { return T->value; }
+
+ // Make sure the index is not the Tombstone or Entry key of the DenseMap.
+ static unsigned maskCacheIndex(unsigned I) { return (I & ~0x02); }
+
+ unsigned incrementHeight(TreeTy* L, TreeTy* R) const {
+ unsigned hl = getHeight(L);
+ unsigned hr = getHeight(R);
+ return (hl > hr ? hl : hr) + 1;
+ }
+
+ static bool compareTreeWithSection(TreeTy* T,
+ typename TreeTy::iterator& TI,
+ typename TreeTy::iterator& TE) {
+ typename TreeTy::iterator I = T->begin(), E = T->end();
+ for ( ; I!=E ; ++I, ++TI) {
+ if (TI == TE || !I->isElementEqual(&*TI))
+ return false;
+ }
+ return true;
+ }
+
+ //===--------------------------------------------------===//
+ // "createNode" is used to generate new tree roots that link
+ // to other trees. The function may also simply move links
+ // in an existing root if that root is still marked mutable.
+ // This is necessary because otherwise our balancing code
+ // would leak memory as it would create nodes that are
+ // then discarded later before the finished tree is
+ // returned to the caller.
+ //===--------------------------------------------------===//
+
+ TreeTy* createNode(TreeTy* L, value_type_ref V, TreeTy* R) {
+ BumpPtrAllocator& A = getAllocator();
+ TreeTy* T;
+ if (!freeNodes.empty()) {
+ T = freeNodes.back();
+ freeNodes.pop_back();
+ assert(T != L);
+ assert(T != R);
+ } else {
+ T = (TreeTy*) A.Allocate<TreeTy>();
+ }
+ new (T) TreeTy(this, L, R, V, incrementHeight(L,R));
+ createdNodes.push_back(T);
+ return T;
+ }
+
+ TreeTy* createNode(TreeTy* newLeft, TreeTy* oldTree, TreeTy* newRight) {
+ return createNode(newLeft, getValue(oldTree), newRight);
+ }
+
+ void recoverNodes() {
+ for (unsigned i = 0, n = createdNodes.size(); i < n; ++i) {
+ TreeTy *N = createdNodes[i];
+ if (N->isMutable() && N->refCount == 0)
+ N->destroy();
+ }
+ createdNodes.clear();
+ }
+
+ /// balanceTree - Used by add_internal and remove_internal to
+ /// balance a newly created tree.
+ TreeTy* balanceTree(TreeTy* L, value_type_ref V, TreeTy* R) {
+ unsigned hl = getHeight(L);
+ unsigned hr = getHeight(R);
+
+ if (hl > hr + 2) {
+ assert(!isEmpty(L) && "Left tree cannot be empty to have a height >= 2");
+
+ TreeTy *LL = getLeft(L);
+ TreeTy *LR = getRight(L);
+
+ if (getHeight(LL) >= getHeight(LR))
+ return createNode(LL, L, createNode(LR,V,R));
+
+ assert(!isEmpty(LR) && "LR cannot be empty because it has a height >= 1");
+
+ TreeTy *LRL = getLeft(LR);
+ TreeTy *LRR = getRight(LR);
+
+ return createNode(createNode(LL,L,LRL), LR, createNode(LRR,V,R));
+ }
+
+ if (hr > hl + 2) {
+ assert(!isEmpty(R) && "Right tree cannot be empty to have a height >= 2");
+
+ TreeTy *RL = getLeft(R);
+ TreeTy *RR = getRight(R);
+
+ if (getHeight(RR) >= getHeight(RL))
+ return createNode(createNode(L,V,RL), R, RR);
+
+ assert(!isEmpty(RL) && "RL cannot be empty because it has a height >= 1");
+
+ TreeTy *RLL = getLeft(RL);
+ TreeTy *RLR = getRight(RL);
+
+ return createNode(createNode(L,V,RLL), RL, createNode(RLR,R,RR));
+ }
+
+ return createNode(L,V,R);
+ }
+
+ /// add_internal - Creates a new tree that includes the specified
+ /// data and the data from the original tree. If the original tree
+ /// already contained the data item, the original tree is returned.
+ TreeTy* add_internal(value_type_ref V, TreeTy* T) {
+ if (isEmpty(T))
+ return createNode(T, V, T);
+ assert(!T->isMutable());
+
+ key_type_ref K = ImutInfo::KeyOfValue(V);
+ key_type_ref KCurrent = ImutInfo::KeyOfValue(getValue(T));
+
+ if (ImutInfo::isEqual(K,KCurrent))
+ return createNode(getLeft(T), V, getRight(T));
+ else if (ImutInfo::isLess(K,KCurrent))
+ return balanceTree(add_internal(V, getLeft(T)), getValue(T), getRight(T));
+ else
+ return balanceTree(getLeft(T), getValue(T), add_internal(V, getRight(T)));
+ }
+
+ /// remove_internal - Creates a new tree that includes all the data
+ /// from the original tree except the specified data. If the
+ /// specified data did not exist in the original tree, the original
+ /// tree is returned.
+ TreeTy* remove_internal(key_type_ref K, TreeTy* T) {
+ if (isEmpty(T))
+ return T;
+
+ assert(!T->isMutable());
+
+ key_type_ref KCurrent = ImutInfo::KeyOfValue(getValue(T));
+
+ if (ImutInfo::isEqual(K,KCurrent)) {
+ return combineTrees(getLeft(T), getRight(T));
+ } else if (ImutInfo::isLess(K,KCurrent)) {
+ return balanceTree(remove_internal(K, getLeft(T)),
+ getValue(T), getRight(T));
+ } else {
+ return balanceTree(getLeft(T), getValue(T),
+ remove_internal(K, getRight(T)));
+ }
+ }
+
+ TreeTy* combineTrees(TreeTy* L, TreeTy* R) {
+ if (isEmpty(L))
+ return R;
+ if (isEmpty(R))
+ return L;
+ TreeTy* OldNode;
+ TreeTy* newRight = removeMinBinding(R,OldNode);
+ return balanceTree(L, getValue(OldNode), newRight);
+ }
+
+ TreeTy* removeMinBinding(TreeTy* T, TreeTy*& Noderemoved) {
+ assert(!isEmpty(T));
+ if (isEmpty(getLeft(T))) {
+ Noderemoved = T;
+ return getRight(T);
+ }
+ return balanceTree(removeMinBinding(getLeft(T), Noderemoved),
+ getValue(T), getRight(T));
+ }
+
+ /// markImmutable - Clears the mutable bits of a root and all of its
+ /// descendants.
+ void markImmutable(TreeTy* T) {
+ if (!T || !T->isMutable())
+ return;
+ T->markImmutable();
+ markImmutable(getLeft(T));
+ markImmutable(getRight(T));
+ }
+
+public:
+ TreeTy *getCanonicalTree(TreeTy *TNew) {
+ if (!TNew)
+ return nullptr;
+
+ if (TNew->IsCanonicalized)
+ return TNew;
+
+ // Search the hashtable for another tree with the same digest, and
+ // if find a collision compare those trees by their contents.
+ unsigned digest = TNew->computeDigest();
+ TreeTy *&entry = Cache[maskCacheIndex(digest)];
+ do {
+ if (!entry)
+ break;
+ for (TreeTy *T = entry ; T != nullptr; T = T->next) {
+ // Compare the Contents('T') with Contents('TNew')
+ typename TreeTy::iterator TI = T->begin(), TE = T->end();
+ if (!compareTreeWithSection(TNew, TI, TE))
+ continue;
+ if (TI != TE)
+ continue; // T has more contents than TNew.
+ // Trees did match! Return 'T'.
+ if (TNew->refCount == 0)
+ TNew->destroy();
+ return T;
+ }
+ entry->prev = TNew;
+ TNew->next = entry;
+ }
+ while (false);
+
+ entry = TNew;
+ TNew->IsCanonicalized = true;
+ return TNew;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Immutable AVL-Tree Iterators.
+//===----------------------------------------------------------------------===//
+
+template <typename ImutInfo> class ImutAVLTreeGenericIterator {
+ SmallVector<uintptr_t,20> stack;
+
+public:
+ using iterator_category = std::bidirectional_iterator_tag;
+ using value_type = ImutAVLTree<ImutInfo>;
+ using difference_type = std::ptrdiff_t;
+ using pointer = value_type *;
+ using reference = value_type &;
+
+ enum VisitFlag { VisitedNone=0x0, VisitedLeft=0x1, VisitedRight=0x3,
+ Flags=0x3 };
+
+ using TreeTy = ImutAVLTree<ImutInfo>;
+
+ ImutAVLTreeGenericIterator() = default;
+ ImutAVLTreeGenericIterator(const TreeTy *Root) {
+ if (Root) stack.push_back(reinterpret_cast<uintptr_t>(Root));
+ }
+
+ TreeTy &operator*() const {
+ assert(!stack.empty());
+ return *reinterpret_cast<TreeTy *>(stack.back() & ~Flags);
+ }
+ TreeTy *operator->() const { return &*this; }
+
+ uintptr_t getVisitState() const {
+ assert(!stack.empty());
+ return stack.back() & Flags;
+ }
+
+ bool atEnd() const { return stack.empty(); }
+
+ bool atBeginning() const {
+ return stack.size() == 1 && getVisitState() == VisitedNone;
+ }
+
+ void skipToParent() {
+ assert(!stack.empty());
+ stack.pop_back();
+ if (stack.empty())
+ return;
+ switch (getVisitState()) {
+ case VisitedNone:
+ stack.back() |= VisitedLeft;
+ break;
+ case VisitedLeft:
+ stack.back() |= VisitedRight;
+ break;
+ default:
+ llvm_unreachable("Unreachable.");
+ }
+ }
+
+ bool operator==(const ImutAVLTreeGenericIterator &x) const {
+ return stack == x.stack;
+ }
+
+ bool operator!=(const ImutAVLTreeGenericIterator &x) const {
+ return !(*this == x);
+ }
+
+ ImutAVLTreeGenericIterator &operator++() {
+ assert(!stack.empty());
+ TreeTy* Current = reinterpret_cast<TreeTy*>(stack.back() & ~Flags);
+ assert(Current);
+ switch (getVisitState()) {
+ case VisitedNone:
+ if (TreeTy* L = Current->getLeft())
+ stack.push_back(reinterpret_cast<uintptr_t>(L));
+ else
+ stack.back() |= VisitedLeft;
+ break;
+ case VisitedLeft:
+ if (TreeTy* R = Current->getRight())
+ stack.push_back(reinterpret_cast<uintptr_t>(R));
+ else
+ stack.back() |= VisitedRight;
+ break;
+ case VisitedRight:
+ skipToParent();
+ break;
+ default:
+ llvm_unreachable("Unreachable.");
+ }
+ return *this;
+ }
+
+ ImutAVLTreeGenericIterator &operator--() {
+ assert(!stack.empty());
+ TreeTy* Current = reinterpret_cast<TreeTy*>(stack.back() & ~Flags);
+ assert(Current);
+ switch (getVisitState()) {
+ case VisitedNone:
+ stack.pop_back();
+ break;
+ case VisitedLeft:
+ stack.back() &= ~Flags; // Set state to "VisitedNone."
+ if (TreeTy* L = Current->getLeft())
+ stack.push_back(reinterpret_cast<uintptr_t>(L) | VisitedRight);
+ break;
+ case VisitedRight:
+ stack.back() &= ~Flags;
+ stack.back() |= VisitedLeft;
+ if (TreeTy* R = Current->getRight())
+ stack.push_back(reinterpret_cast<uintptr_t>(R) | VisitedRight);
+ break;
+ default:
+ llvm_unreachable("Unreachable.");
+ }
+ return *this;
+ }
+};
+
+template <typename ImutInfo> class ImutAVLTreeInOrderIterator {
+ using InternalIteratorTy = ImutAVLTreeGenericIterator<ImutInfo>;
+
+ InternalIteratorTy InternalItr;
+
+public:
+ using iterator_category = std::bidirectional_iterator_tag;
+ using value_type = ImutAVLTree<ImutInfo>;
+ using difference_type = std::ptrdiff_t;
+ using pointer = value_type *;
+ using reference = value_type &;
+
+ using TreeTy = ImutAVLTree<ImutInfo>;
+
+ ImutAVLTreeInOrderIterator(const TreeTy* Root) : InternalItr(Root) {
+ if (Root)
+ ++*this; // Advance to first element.
+ }
+
+ ImutAVLTreeInOrderIterator() : InternalItr() {}
+
+ bool operator==(const ImutAVLTreeInOrderIterator &x) const {
+ return InternalItr == x.InternalItr;
+ }
+
+ bool operator!=(const ImutAVLTreeInOrderIterator &x) const {
+ return !(*this == x);
+ }
+
+ TreeTy &operator*() const { return *InternalItr; }
+ TreeTy *operator->() const { return &*InternalItr; }
+
+ ImutAVLTreeInOrderIterator &operator++() {
+ do ++InternalItr;
+ while (!InternalItr.atEnd() &&
+ InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft);
+
+ return *this;
+ }
+
+ ImutAVLTreeInOrderIterator &operator--() {
+ do --InternalItr;
+ while (!InternalItr.atBeginning() &&
+ InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft);
+
+ return *this;
+ }
+
+ void skipSubTree() {
+ InternalItr.skipToParent();
+
+ while (!InternalItr.atEnd() &&
+ InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft)
+ ++InternalItr;
+ }
+};
+
+/// Generic iterator that wraps a T::TreeTy::iterator and exposes
+/// iterator::getValue() on dereference.
+template <typename T>
+struct ImutAVLValueIterator
+ : iterator_adaptor_base<
+ ImutAVLValueIterator<T>, typename T::TreeTy::iterator,
+ typename std::iterator_traits<
+ typename T::TreeTy::iterator>::iterator_category,
+ const typename T::value_type> {
+ ImutAVLValueIterator() = default;
+ explicit ImutAVLValueIterator(typename T::TreeTy *Tree)
+ : ImutAVLValueIterator::iterator_adaptor_base(Tree) {}
+
+ typename ImutAVLValueIterator::reference operator*() const {
+ return this->I->getValue();
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Trait classes for Profile information.
+//===----------------------------------------------------------------------===//
+
+/// Generic profile template. The default behavior is to invoke the
+/// profile method of an object. Specializations for primitive integers
+/// and generic handling of pointers is done below.
+template <typename T>
+struct ImutProfileInfo {
+ using value_type = const T;
+ using value_type_ref = const T&;
+
+ static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
+ FoldingSetTrait<T>::Profile(X,ID);
+ }
+};
+
+/// Profile traits for integers.
+template <typename T>
+struct ImutProfileInteger {
+ using value_type = const T;
+ using value_type_ref = const T&;
+
+ static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
+ ID.AddInteger(X);
+ }
+};
+
+#define PROFILE_INTEGER_INFO(X)\
+template<> struct ImutProfileInfo<X> : ImutProfileInteger<X> {};
+
+PROFILE_INTEGER_INFO(char)
+PROFILE_INTEGER_INFO(unsigned char)
+PROFILE_INTEGER_INFO(short)
+PROFILE_INTEGER_INFO(unsigned short)
+PROFILE_INTEGER_INFO(unsigned)
+PROFILE_INTEGER_INFO(signed)
+PROFILE_INTEGER_INFO(long)
+PROFILE_INTEGER_INFO(unsigned long)
+PROFILE_INTEGER_INFO(long long)
+PROFILE_INTEGER_INFO(unsigned long long)
+
+#undef PROFILE_INTEGER_INFO
+
+/// Profile traits for booleans.
+template <>
+struct ImutProfileInfo<bool> {
+ using value_type = const bool;
+ using value_type_ref = const bool&;
+
+ static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
+ ID.AddBoolean(X);
+ }
+};
+
+/// Generic profile trait for pointer types. We treat pointers as
+/// references to unique objects.
+template <typename T>
+struct ImutProfileInfo<T*> {
+ using value_type = const T*;
+ using value_type_ref = value_type;
+
+ static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
+ ID.AddPointer(X);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Trait classes that contain element comparison operators and type
+// definitions used by ImutAVLTree, ImmutableSet, and ImmutableMap. These
+// inherit from the profile traits (ImutProfileInfo) to include operations
+// for element profiling.
+//===----------------------------------------------------------------------===//
+
+/// ImutContainerInfo - Generic definition of comparison operations for
+/// elements of immutable containers that defaults to using
+/// std::equal_to<> and std::less<> to perform comparison of elements.
+template <typename T>
+struct ImutContainerInfo : public ImutProfileInfo<T> {
+ using value_type = typename ImutProfileInfo<T>::value_type;
+ using value_type_ref = typename ImutProfileInfo<T>::value_type_ref;
+ using key_type = value_type;
+ using key_type_ref = value_type_ref;
+ using data_type = bool;
+ using data_type_ref = bool;
+
+ static key_type_ref KeyOfValue(value_type_ref D) { return D; }
+ static data_type_ref DataOfValue(value_type_ref) { return true; }
+
+ static bool isEqual(key_type_ref LHS, key_type_ref RHS) {
+ return std::equal_to<key_type>()(LHS,RHS);
+ }
+
+ static bool isLess(key_type_ref LHS, key_type_ref RHS) {
+ return std::less<key_type>()(LHS,RHS);
+ }
+
+ static bool isDataEqual(data_type_ref, data_type_ref) { return true; }
+};
+
+/// ImutContainerInfo - Specialization for pointer values to treat pointers
+/// as references to unique objects. Pointers are thus compared by
+/// their addresses.
+template <typename T>
+struct ImutContainerInfo<T*> : public ImutProfileInfo<T*> {
+ using value_type = typename ImutProfileInfo<T*>::value_type;
+ using value_type_ref = typename ImutProfileInfo<T*>::value_type_ref;
+ using key_type = value_type;
+ using key_type_ref = value_type_ref;
+ using data_type = bool;
+ using data_type_ref = bool;
+
+ static key_type_ref KeyOfValue(value_type_ref D) { return D; }
+ static data_type_ref DataOfValue(value_type_ref) { return true; }
+
+ static bool isEqual(key_type_ref LHS, key_type_ref RHS) { return LHS == RHS; }
+
+ static bool isLess(key_type_ref LHS, key_type_ref RHS) { return LHS < RHS; }
+
+ static bool isDataEqual(data_type_ref, data_type_ref) { return true; }
+};
+
+//===----------------------------------------------------------------------===//
+// Immutable Set
+//===----------------------------------------------------------------------===//
+
+template <typename ValT, typename ValInfo = ImutContainerInfo<ValT>>
+class ImmutableSet {
+public:
+ using value_type = typename ValInfo::value_type;
+ using value_type_ref = typename ValInfo::value_type_ref;
+ using TreeTy = ImutAVLTree<ValInfo>;
+
+private:
+ IntrusiveRefCntPtr<TreeTy> Root;
+
+public:
+ /// Constructs a set from a pointer to a tree root. In general one
+ /// should use a Factory object to create sets instead of directly
+ /// invoking the constructor, but there are cases where make this
+ /// constructor public is useful.
+ explicit ImmutableSet(TreeTy *R) : Root(R) {}
+
+ class Factory {
+ typename TreeTy::Factory F;
+ const bool Canonicalize;
+
+ public:
+ Factory(bool canonicalize = true)
+ : Canonicalize(canonicalize) {}
+
+ Factory(BumpPtrAllocator& Alloc, bool canonicalize = true)
+ : F(Alloc), Canonicalize(canonicalize) {}
+
+ Factory(const Factory& RHS) = delete;
+ void operator=(const Factory& RHS) = delete;
+
+ /// getEmptySet - Returns an immutable set that contains no elements.
+ ImmutableSet getEmptySet() {
+ return ImmutableSet(F.getEmptyTree());
+ }
+
+ /// add - Creates a new immutable set that contains all of the values
+ /// of the original set with the addition of the specified value. If
+ /// the original set already included the value, then the original set is
+ /// returned and no memory is allocated. The time and space complexity
+ /// of this operation is logarithmic in the size of the original set.
+ /// The memory allocated to represent the set is released when the
+ /// factory object that created the set is destroyed.
+ LLVM_NODISCARD ImmutableSet add(ImmutableSet Old, value_type_ref V) {
+ TreeTy *NewT = F.add(Old.Root.get(), V);
+ return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT);
+ }
+
+ /// remove - Creates a new immutable set that contains all of the values
+ /// of the original set with the exception of the specified value. If
+ /// the original set did not contain the value, the original set is
+ /// returned and no memory is allocated. The time and space complexity
+ /// of this operation is logarithmic in the size of the original set.
+ /// The memory allocated to represent the set is released when the
+ /// factory object that created the set is destroyed.
+ LLVM_NODISCARD ImmutableSet remove(ImmutableSet Old, value_type_ref V) {
+ TreeTy *NewT = F.remove(Old.Root.get(), V);
+ return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT);
+ }
+
+ BumpPtrAllocator& getAllocator() { return F.getAllocator(); }
+
+ typename TreeTy::Factory *getTreeFactory() const {
+ return const_cast<typename TreeTy::Factory *>(&F);
+ }
+ };
+
+ friend class Factory;
+
+ /// Returns true if the set contains the specified value.
+ bool contains(value_type_ref V) const {
+ return Root ? Root->contains(V) : false;
+ }
+
+ bool operator==(const ImmutableSet &RHS) const {
+ return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root;
+ }
+
+ bool operator!=(const ImmutableSet &RHS) const {
+ return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get())
+ : Root != RHS.Root;
+ }
+
+ TreeTy *getRoot() {
+ if (Root) { Root->retain(); }
+ return Root.get();
+ }
+
+ TreeTy *getRootWithoutRetain() const { return Root.get(); }
+
+ /// isEmpty - Return true if the set contains no elements.
+ bool isEmpty() const { return !Root; }
+
+ /// isSingleton - Return true if the set contains exactly one element.
+ /// This method runs in constant time.
+ bool isSingleton() const { return getHeight() == 1; }
+
+ //===--------------------------------------------------===//
+ // Iterators.
+ //===--------------------------------------------------===//
+
+ using iterator = ImutAVLValueIterator<ImmutableSet>;
+
+ iterator begin() const { return iterator(Root.get()); }
+ iterator end() const { return iterator(); }
+
+ //===--------------------------------------------------===//
+ // Utility methods.
+ //===--------------------------------------------------===//
+
+ unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
+
+ static void Profile(FoldingSetNodeID &ID, const ImmutableSet &S) {
+ ID.AddPointer(S.Root.get());
+ }
+
+ void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }
+
+ //===--------------------------------------------------===//
+ // For testing.
+ //===--------------------------------------------------===//
+
+ void validateTree() const { if (Root) Root->validateTree(); }
+};
+
+// NOTE: This may some day replace the current ImmutableSet.
+template <typename ValT, typename ValInfo = ImutContainerInfo<ValT>>
+class ImmutableSetRef {
+public:
+ using value_type = typename ValInfo::value_type;
+ using value_type_ref = typename ValInfo::value_type_ref;
+ using TreeTy = ImutAVLTree<ValInfo>;
+ using FactoryTy = typename TreeTy::Factory;
+
+private:
+ IntrusiveRefCntPtr<TreeTy> Root;
+ FactoryTy *Factory;
+
+public:
+ /// Constructs a set from a pointer to a tree root. In general one
+ /// should use a Factory object to create sets instead of directly
+ /// invoking the constructor, but there are cases where make this
+ /// constructor public is useful.
+ ImmutableSetRef(TreeTy *R, FactoryTy *F) : Root(R), Factory(F) {}
+
+ static ImmutableSetRef getEmptySet(FactoryTy *F) {
+ return ImmutableSetRef(0, F);
+ }
+
+ ImmutableSetRef add(value_type_ref V) {
+ return ImmutableSetRef(Factory->add(Root.get(), V), Factory);
+ }
+
+ ImmutableSetRef remove(value_type_ref V) {
+ return ImmutableSetRef(Factory->remove(Root.get(), V), Factory);
+ }
+
+ /// Returns true if the set contains the specified value.
+ bool contains(value_type_ref V) const {
+ return Root ? Root->contains(V) : false;
+ }
+
+ ImmutableSet<ValT> asImmutableSet(bool canonicalize = true) const {
+ return ImmutableSet<ValT>(
+ canonicalize ? Factory->getCanonicalTree(Root.get()) : Root.get());
+ }
+
+ TreeTy *getRootWithoutRetain() const { return Root.get(); }
+
+ bool operator==(const ImmutableSetRef &RHS) const {
+ return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root;
+ }
+
+ bool operator!=(const ImmutableSetRef &RHS) const {
+ return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get())
+ : Root != RHS.Root;
+ }
+
+ /// isEmpty - Return true if the set contains no elements.
+ bool isEmpty() const { return !Root; }
+
+ /// isSingleton - Return true if the set contains exactly one element.
+ /// This method runs in constant time.
+ bool isSingleton() const { return getHeight() == 1; }
+
+ //===--------------------------------------------------===//
+ // Iterators.
+ //===--------------------------------------------------===//
+
+ using iterator = ImutAVLValueIterator<ImmutableSetRef>;
+
+ iterator begin() const { return iterator(Root.get()); }
+ iterator end() const { return iterator(); }
+
+ //===--------------------------------------------------===//
+ // Utility methods.
+ //===--------------------------------------------------===//
+
+ unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
+
+ static void Profile(FoldingSetNodeID &ID, const ImmutableSetRef &S) {
+ ID.AddPointer(S.Root.get());
+ }
+
+ void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }
+
+ //===--------------------------------------------------===//
+ // For testing.
+ //===--------------------------------------------------===//
+
+ void validateTree() const { if (Root) Root->validateTree(); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_IMMUTABLESET_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/IndexedMap.h b/contrib/libs/llvm14/include/llvm/ADT/IndexedMap.h
new file mode 100644
index 0000000000..d45d968819
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/IndexedMap.h
@@ -0,0 +1,96 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/IndexedMap.h - An index map implementation ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements an indexed map. The index map template takes two
+/// types. The first is the mapped type and the second is a functor
+/// that maps its argument to a size_t. On instantiation a "null" value
+/// can be provided to be used as a "does not exist" indicator in the
+/// map. A member function grow() is provided that given the value of
+/// the maximally indexed key (the argument of the functor) makes sure
+/// the map has enough space for it.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INDEXEDMAP_H
+#define LLVM_ADT_INDEXEDMAP_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include <cassert>
+
+namespace llvm {
+
+template <typename T, typename ToIndexT = identity<unsigned>>
+ class IndexedMap {
+ using IndexT = typename ToIndexT::argument_type;
+ // Prefer SmallVector with zero inline storage over std::vector. IndexedMaps
+ // can grow very large and SmallVector grows more efficiently as long as T
+ // is trivially copyable.
+ using StorageT = SmallVector<T, 0>;
+
+ StorageT storage_;
+ T nullVal_;
+ ToIndexT toIndex_;
+
+ public:
+ IndexedMap() : nullVal_(T()) {}
+
+ explicit IndexedMap(const T& val) : nullVal_(val) {}
+
+ typename StorageT::reference operator[](IndexT n) {
+ assert(toIndex_(n) < storage_.size() && "index out of bounds!");
+ return storage_[toIndex_(n)];
+ }
+
+ typename StorageT::const_reference operator[](IndexT n) const {
+ assert(toIndex_(n) < storage_.size() && "index out of bounds!");
+ return storage_[toIndex_(n)];
+ }
+
+ void reserve(typename StorageT::size_type s) {
+ storage_.reserve(s);
+ }
+
+ void resize(typename StorageT::size_type s) {
+ storage_.resize(s, nullVal_);
+ }
+
+ void clear() {
+ storage_.clear();
+ }
+
+ void grow(IndexT n) {
+ unsigned NewSize = toIndex_(n) + 1;
+ if (NewSize > storage_.size())
+ resize(NewSize);
+ }
+
+ bool inBounds(IndexT n) const {
+ return toIndex_(n) < storage_.size();
+ }
+
+ typename StorageT::size_type size() const {
+ return storage_.size();
+ }
+ };
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_INDEXEDMAP_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/IntEqClasses.h b/contrib/libs/llvm14/include/llvm/ADT/IntEqClasses.h
new file mode 100644
index 0000000000..9358a87912
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/IntEqClasses.h
@@ -0,0 +1,99 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- llvm/ADT/IntEqClasses.h - Equiv. Classes of Integers ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Equivalence classes for small integers. This is a mapping of the integers
+/// 0 .. N-1 into M equivalence classes numbered 0 .. M-1.
+///
+/// Initially each integer has its own equivalence class. Classes are joined by
+/// passing a representative member of each class to join().
+///
+/// Once the classes are built, compress() will number them 0 .. M-1 and prevent
+/// further changes.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INTEQCLASSES_H
+#define LLVM_ADT_INTEQCLASSES_H
+
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+class IntEqClasses {
+ /// EC - When uncompressed, map each integer to a smaller member of its
+ /// equivalence class. The class leader is the smallest member and maps to
+ /// itself.
+ ///
+ /// When compressed, EC[i] is the equivalence class of i.
+ SmallVector<unsigned, 8> EC;
+
+ /// NumClasses - The number of equivalence classes when compressed, or 0 when
+ /// uncompressed.
+ unsigned NumClasses;
+
+public:
+ /// IntEqClasses - Create an equivalence class mapping for 0 .. N-1.
+ IntEqClasses(unsigned N = 0) : NumClasses(0) { grow(N); }
+
+ /// grow - Increase capacity to hold 0 .. N-1, putting new integers in unique
+ /// equivalence classes.
+ /// This requires an uncompressed map.
+ void grow(unsigned N);
+
+ /// clear - Clear all classes so that grow() will assign a unique class to
+ /// every integer.
+ void clear() {
+ EC.clear();
+ NumClasses = 0;
+ }
+
+ /// Join the equivalence classes of a and b. After joining classes,
+ /// findLeader(a) == findLeader(b). This requires an uncompressed map.
+ /// Returns the new leader.
+ unsigned join(unsigned a, unsigned b);
+
+ /// findLeader - Compute the leader of a's equivalence class. This is the
+ /// smallest member of the class.
+ /// This requires an uncompressed map.
+ unsigned findLeader(unsigned a) const;
+
+ /// compress - Compress equivalence classes by numbering them 0 .. M.
+ /// This makes the equivalence class map immutable.
+ void compress();
+
+ /// getNumClasses - Return the number of equivalence classes after compress()
+ /// was called.
+ unsigned getNumClasses() const { return NumClasses; }
+
+ /// operator[] - Return a's equivalence class number, 0 .. getNumClasses()-1.
+ /// This requires a compressed map.
+ unsigned operator[](unsigned a) const {
+ assert(NumClasses && "operator[] called before compress()");
+ return EC[a];
+ }
+
+ /// uncompress - Change back to the uncompressed representation that allows
+ /// editing.
+ void uncompress();
+};
+
+} // End llvm namespace
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/IntervalMap.h b/contrib/libs/llvm14/include/llvm/ADT/IntervalMap.h
new file mode 100644
index 0000000000..d2493c266e
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/IntervalMap.h
@@ -0,0 +1,2185 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/IntervalMap.h - A sorted interval map -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements a coalescing interval map for small objects.
+///
+/// KeyT objects are mapped to ValT objects. Intervals of keys that map to the
+/// same value are represented in a compressed form.
+///
+/// Iterators provide ordered access to the compressed intervals rather than the
+/// individual keys, and insert and erase operations use key intervals as well.
+///
+/// Like SmallVector, IntervalMap will store the first N intervals in the map
+/// object itself without any allocations. When space is exhausted it switches
+/// to a B+-tree representation with very small overhead for small key and
+/// value objects.
+///
+/// A Traits class specifies how keys are compared. It also allows IntervalMap
+/// to work with both closed and half-open intervals.
+///
+/// Keys and values are not stored next to each other in a std::pair, so we
+/// don't provide such a value_type. Dereferencing iterators only returns the
+/// mapped value. The interval bounds are accessible through the start() and
+/// stop() iterator methods.
+///
+/// IntervalMap is optimized for small key and value objects, 4 or 8 bytes
+/// each is the optimal size. For large objects use std::map instead.
+//
+//===----------------------------------------------------------------------===//
+//
+// Synopsis:
+//
+// template <typename KeyT, typename ValT, unsigned N, typename Traits>
+// class IntervalMap {
+// public:
+// typedef KeyT key_type;
+// typedef ValT mapped_type;
+// typedef RecyclingAllocator<...> Allocator;
+// class iterator;
+// class const_iterator;
+//
+// explicit IntervalMap(Allocator&);
+// ~IntervalMap():
+//
+// bool empty() const;
+// KeyT start() const;
+// KeyT stop() const;
+// ValT lookup(KeyT x, Value NotFound = Value()) const;
+//
+// const_iterator begin() const;
+// const_iterator end() const;
+// iterator begin();
+// iterator end();
+// const_iterator find(KeyT x) const;
+// iterator find(KeyT x);
+//
+// void insert(KeyT a, KeyT b, ValT y);
+// void clear();
+// };
+//
+// template <typename KeyT, typename ValT, unsigned N, typename Traits>
+// class IntervalMap::const_iterator {
+// public:
+// using iterator_category = std::bidirectional_iterator_tag;
+// using value_type = ValT;
+// using difference_type = std::ptrdiff_t;
+// using pointer = value_type *;
+// using reference = value_type &;
+//
+// bool operator==(const const_iterator &) const;
+// bool operator!=(const const_iterator &) const;
+// bool valid() const;
+//
+// const KeyT &start() const;
+// const KeyT &stop() const;
+// const ValT &value() const;
+// const ValT &operator*() const;
+// const ValT *operator->() const;
+//
+// const_iterator &operator++();
+// const_iterator &operator++(int);
+// const_iterator &operator--();
+// const_iterator &operator--(int);
+// void goToBegin();
+// void goToEnd();
+// void find(KeyT x);
+// void advanceTo(KeyT x);
+// };
+//
+// template <typename KeyT, typename ValT, unsigned N, typename Traits>
+// class IntervalMap::iterator : public const_iterator {
+// public:
+// void insert(KeyT a, KeyT b, Value y);
+// void erase();
+// };
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INTERVALMAP_H
+#define LLVM_ADT_INTERVALMAP_H
+
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/bit.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/RecyclingAllocator.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <iterator>
+#include <new>
+#include <utility>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+//--- Key traits ---//
+//===----------------------------------------------------------------------===//
+//
+// The IntervalMap works with closed or half-open intervals.
+// Adjacent intervals that map to the same value are coalesced.
+//
+// The IntervalMapInfo traits class is used to determine if a key is contained
+// in an interval, and if two intervals are adjacent so they can be coalesced.
+// The provided implementation works for closed integer intervals, other keys
+// probably need a specialized version.
+//
+// The point x is contained in [a;b] when !startLess(x, a) && !stopLess(b, x).
+//
+// It is assumed that (a;b] half-open intervals are not used, only [a;b) is
+// allowed. This is so that stopLess(a, b) can be used to determine if two
+// intervals overlap.
+//
+//===----------------------------------------------------------------------===//
+
+template <typename T>
+struct IntervalMapInfo {
+ /// startLess - Return true if x is not in [a;b].
+ /// This is x < a both for closed intervals and for [a;b) half-open intervals.
+ static inline bool startLess(const T &x, const T &a) {
+ return x < a;
+ }
+
+ /// stopLess - Return true if x is not in [a;b].
+ /// This is b < x for a closed interval, b <= x for [a;b) half-open intervals.
+ static inline bool stopLess(const T &b, const T &x) {
+ return b < x;
+ }
+
+ /// adjacent - Return true when the intervals [x;a] and [b;y] can coalesce.
+ /// This is a+1 == b for closed intervals, a == b for half-open intervals.
+ static inline bool adjacent(const T &a, const T &b) {
+ return a+1 == b;
+ }
+
+ /// nonEmpty - Return true if [a;b] is non-empty.
+ /// This is a <= b for a closed interval, a < b for [a;b) half-open intervals.
+ static inline bool nonEmpty(const T &a, const T &b) {
+ return a <= b;
+ }
+};
+
+template <typename T>
+struct IntervalMapHalfOpenInfo {
+ /// startLess - Return true if x is not in [a;b).
+ static inline bool startLess(const T &x, const T &a) {
+ return x < a;
+ }
+
+ /// stopLess - Return true if x is not in [a;b).
+ static inline bool stopLess(const T &b, const T &x) {
+ return b <= x;
+ }
+
+ /// adjacent - Return true when the intervals [x;a) and [b;y) can coalesce.
+ static inline bool adjacent(const T &a, const T &b) {
+ return a == b;
+ }
+
+ /// nonEmpty - Return true if [a;b) is non-empty.
+ static inline bool nonEmpty(const T &a, const T &b) {
+ return a < b;
+ }
+};
+
+/// IntervalMapImpl - Namespace used for IntervalMap implementation details.
+/// It should be considered private to the implementation.
+namespace IntervalMapImpl {
+
+using IdxPair = std::pair<unsigned,unsigned>;
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::NodeBase ---//
+//===----------------------------------------------------------------------===//
+//
+// Both leaf and branch nodes store vectors of pairs.
+// Leaves store ((KeyT, KeyT), ValT) pairs, branches use (NodeRef, KeyT).
+//
+// Keys and values are stored in separate arrays to avoid padding caused by
+// different object alignments. This also helps improve locality of reference
+// when searching the keys.
+//
+// The nodes don't know how many elements they contain - that information is
+// stored elsewhere. Omitting the size field prevents padding and allows a node
+// to fill the allocated cache lines completely.
+//
+// These are typical key and value sizes, the node branching factor (N), and
+// wasted space when nodes are sized to fit in three cache lines (192 bytes):
+//
+// T1 T2 N Waste Used by
+// 4 4 24 0 Branch<4> (32-bit pointers)
+// 8 4 16 0 Leaf<4,4>, Branch<4>
+// 8 8 12 0 Leaf<4,8>, Branch<8>
+// 16 4 9 12 Leaf<8,4>
+// 16 8 8 0 Leaf<8,8>
+//
+//===----------------------------------------------------------------------===//
+
+template <typename T1, typename T2, unsigned N>
+class NodeBase {
+public:
+ enum { Capacity = N };
+
+ T1 first[N];
+ T2 second[N];
+
+ /// copy - Copy elements from another node.
+ /// @param Other Node elements are copied from.
+ /// @param i Beginning of the source range in other.
+ /// @param j Beginning of the destination range in this.
+ /// @param Count Number of elements to copy.
+ template <unsigned M>
+ void copy(const NodeBase<T1, T2, M> &Other, unsigned i,
+ unsigned j, unsigned Count) {
+ assert(i + Count <= M && "Invalid source range");
+ assert(j + Count <= N && "Invalid dest range");
+ for (unsigned e = i + Count; i != e; ++i, ++j) {
+ first[j] = Other.first[i];
+ second[j] = Other.second[i];
+ }
+ }
+
+ /// moveLeft - Move elements to the left.
+ /// @param i Beginning of the source range.
+ /// @param j Beginning of the destination range.
+ /// @param Count Number of elements to copy.
+ void moveLeft(unsigned i, unsigned j, unsigned Count) {
+ assert(j <= i && "Use moveRight shift elements right");
+ copy(*this, i, j, Count);
+ }
+
+ /// moveRight - Move elements to the right.
+ /// @param i Beginning of the source range.
+ /// @param j Beginning of the destination range.
+ /// @param Count Number of elements to copy.
+ void moveRight(unsigned i, unsigned j, unsigned Count) {
+ assert(i <= j && "Use moveLeft shift elements left");
+ assert(j + Count <= N && "Invalid range");
+ while (Count--) {
+ first[j + Count] = first[i + Count];
+ second[j + Count] = second[i + Count];
+ }
+ }
+
+ /// erase - Erase elements [i;j).
+ /// @param i Beginning of the range to erase.
+ /// @param j End of the range. (Exclusive).
+ /// @param Size Number of elements in node.
+ void erase(unsigned i, unsigned j, unsigned Size) {
+ moveLeft(j, i, Size - j);
+ }
+
+ /// erase - Erase element at i.
+ /// @param i Index of element to erase.
+ /// @param Size Number of elements in node.
+ void erase(unsigned i, unsigned Size) {
+ erase(i, i+1, Size);
+ }
+
+ /// shift - Shift elements [i;size) 1 position to the right.
+ /// @param i Beginning of the range to move.
+ /// @param Size Number of elements in node.
+ void shift(unsigned i, unsigned Size) {
+ moveRight(i, i + 1, Size - i);
+ }
+
+ /// transferToLeftSib - Transfer elements to a left sibling node.
+ /// @param Size Number of elements in this.
+ /// @param Sib Left sibling node.
+ /// @param SSize Number of elements in sib.
+ /// @param Count Number of elements to transfer.
+ void transferToLeftSib(unsigned Size, NodeBase &Sib, unsigned SSize,
+ unsigned Count) {
+ Sib.copy(*this, 0, SSize, Count);
+ erase(0, Count, Size);
+ }
+
+ /// transferToRightSib - Transfer elements to a right sibling node.
+ /// @param Size Number of elements in this.
+ /// @param Sib Right sibling node.
+ /// @param SSize Number of elements in sib.
+ /// @param Count Number of elements to transfer.
+ void transferToRightSib(unsigned Size, NodeBase &Sib, unsigned SSize,
+ unsigned Count) {
+ Sib.moveRight(0, Count, SSize);
+ Sib.copy(*this, Size-Count, 0, Count);
+ }
+
+ /// adjustFromLeftSib - Adjust the number if elements in this node by moving
+ /// elements to or from a left sibling node.
+ /// @param Size Number of elements in this.
+ /// @param Sib Right sibling node.
+ /// @param SSize Number of elements in sib.
+ /// @param Add The number of elements to add to this node, possibly < 0.
+ /// @return Number of elements added to this node, possibly negative.
+ int adjustFromLeftSib(unsigned Size, NodeBase &Sib, unsigned SSize, int Add) {
+ if (Add > 0) {
+ // We want to grow, copy from sib.
+ unsigned Count = std::min(std::min(unsigned(Add), SSize), N - Size);
+ Sib.transferToRightSib(SSize, *this, Size, Count);
+ return Count;
+ } else {
+ // We want to shrink, copy to sib.
+ unsigned Count = std::min(std::min(unsigned(-Add), Size), N - SSize);
+ transferToLeftSib(Size, Sib, SSize, Count);
+ return -Count;
+ }
+ }
+};
+
+/// IntervalMapImpl::adjustSiblingSizes - Move elements between sibling nodes.
+/// @param Node Array of pointers to sibling nodes.
+/// @param Nodes Number of nodes.
+/// @param CurSize Array of current node sizes, will be overwritten.
+/// @param NewSize Array of desired node sizes.
+template <typename NodeT>
+void adjustSiblingSizes(NodeT *Node[], unsigned Nodes,
+ unsigned CurSize[], const unsigned NewSize[]) {
+ // Move elements right.
+ for (int n = Nodes - 1; n; --n) {
+ if (CurSize[n] == NewSize[n])
+ continue;
+ for (int m = n - 1; m != -1; --m) {
+ int d = Node[n]->adjustFromLeftSib(CurSize[n], *Node[m], CurSize[m],
+ NewSize[n] - CurSize[n]);
+ CurSize[m] -= d;
+ CurSize[n] += d;
+ // Keep going if the current node was exhausted.
+ if (CurSize[n] >= NewSize[n])
+ break;
+ }
+ }
+
+ if (Nodes == 0)
+ return;
+
+ // Move elements left.
+ for (unsigned n = 0; n != Nodes - 1; ++n) {
+ if (CurSize[n] == NewSize[n])
+ continue;
+ for (unsigned m = n + 1; m != Nodes; ++m) {
+ int d = Node[m]->adjustFromLeftSib(CurSize[m], *Node[n], CurSize[n],
+ CurSize[n] - NewSize[n]);
+ CurSize[m] += d;
+ CurSize[n] -= d;
+ // Keep going if the current node was exhausted.
+ if (CurSize[n] >= NewSize[n])
+ break;
+ }
+ }
+
+#ifndef NDEBUG
+ for (unsigned n = 0; n != Nodes; n++)
+ assert(CurSize[n] == NewSize[n] && "Insufficient element shuffle");
+#endif
+}
+
+/// IntervalMapImpl::distribute - Compute a new distribution of node elements
+/// after an overflow or underflow. Reserve space for a new element at Position,
+/// and compute the node that will hold Position after redistributing node
+/// elements.
+///
+/// It is required that
+///
+/// Elements == sum(CurSize), and
+/// Elements + Grow <= Nodes * Capacity.
+///
+/// NewSize[] will be filled in such that:
+///
+/// sum(NewSize) == Elements, and
+/// NewSize[i] <= Capacity.
+///
+/// The returned index is the node where Position will go, so:
+///
+/// sum(NewSize[0..idx-1]) <= Position
+/// sum(NewSize[0..idx]) >= Position
+///
+/// The last equality, sum(NewSize[0..idx]) == Position, can only happen when
+/// Grow is set and NewSize[idx] == Capacity-1. The index points to the node
+/// before the one holding the Position'th element where there is room for an
+/// insertion.
+///
+/// @param Nodes The number of nodes.
+/// @param Elements Total elements in all nodes.
+/// @param Capacity The capacity of each node.
+/// @param CurSize Array[Nodes] of current node sizes, or NULL.
+/// @param NewSize Array[Nodes] to receive the new node sizes.
+/// @param Position Insert position.
+/// @param Grow Reserve space for a new element at Position.
+/// @return (node, offset) for Position.
+IdxPair distribute(unsigned Nodes, unsigned Elements, unsigned Capacity,
+ const unsigned *CurSize, unsigned NewSize[],
+ unsigned Position, bool Grow);
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::NodeSizer ---//
+//===----------------------------------------------------------------------===//
+//
+// Compute node sizes from key and value types.
+//
+// The branching factors are chosen to make nodes fit in three cache lines.
+// This may not be possible if keys or values are very large. Such large objects
+// are handled correctly, but a std::map would probably give better performance.
+//
+//===----------------------------------------------------------------------===//
+
+enum {
+ // Cache line size. Most architectures have 32 or 64 byte cache lines.
+ // We use 64 bytes here because it provides good branching factors.
+ Log2CacheLine = 6,
+ CacheLineBytes = 1 << Log2CacheLine,
+ DesiredNodeBytes = 3 * CacheLineBytes
+};
+
+template <typename KeyT, typename ValT>
+struct NodeSizer {
+ enum {
+ // Compute the leaf node branching factor that makes a node fit in three
+ // cache lines. The branching factor must be at least 3, or some B+-tree
+ // balancing algorithms won't work.
+ // LeafSize can't be larger than CacheLineBytes. This is required by the
+ // PointerIntPair used by NodeRef.
+ DesiredLeafSize = DesiredNodeBytes /
+ static_cast<unsigned>(2*sizeof(KeyT)+sizeof(ValT)),
+ MinLeafSize = 3,
+ LeafSize = DesiredLeafSize > MinLeafSize ? DesiredLeafSize : MinLeafSize
+ };
+
+ using LeafBase = NodeBase<std::pair<KeyT, KeyT>, ValT, LeafSize>;
+
+ enum {
+ // Now that we have the leaf branching factor, compute the actual allocation
+ // unit size by rounding up to a whole number of cache lines.
+ AllocBytes = (sizeof(LeafBase) + CacheLineBytes-1) & ~(CacheLineBytes-1),
+
+ // Determine the branching factor for branch nodes.
+ BranchSize = AllocBytes /
+ static_cast<unsigned>(sizeof(KeyT) + sizeof(void*))
+ };
+
+ /// Allocator - The recycling allocator used for both branch and leaf nodes.
+ /// This typedef is very likely to be identical for all IntervalMaps with
+ /// reasonably sized entries, so the same allocator can be shared among
+ /// different kinds of maps.
+ using Allocator =
+ RecyclingAllocator<BumpPtrAllocator, char, AllocBytes, CacheLineBytes>;
+};
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::NodeRef ---//
+//===----------------------------------------------------------------------===//
+//
+// B+-tree nodes can be leaves or branches, so we need a polymorphic node
+// pointer that can point to both kinds.
+//
+// All nodes are cache line aligned and the low 6 bits of a node pointer are
+// always 0. These bits are used to store the number of elements in the
+// referenced node. Besides saving space, placing node sizes in the parents
+// allow tree balancing algorithms to run without faulting cache lines for nodes
+// that may not need to be modified.
+//
+// A NodeRef doesn't know whether it references a leaf node or a branch node.
+// It is the responsibility of the caller to use the correct types.
+//
+// Nodes are never supposed to be empty, and it is invalid to store a node size
+// of 0 in a NodeRef. The valid range of sizes is 1-64.
+//
+//===----------------------------------------------------------------------===//
+
+class NodeRef {
+ struct CacheAlignedPointerTraits {
+ static inline void *getAsVoidPointer(void *P) { return P; }
+ static inline void *getFromVoidPointer(void *P) { return P; }
+ static constexpr int NumLowBitsAvailable = Log2CacheLine;
+ };
+ PointerIntPair<void*, Log2CacheLine, unsigned, CacheAlignedPointerTraits> pip;
+
+public:
+ /// NodeRef - Create a null ref.
+ NodeRef() = default;
+
+ /// operator bool - Detect a null ref.
+ explicit operator bool() const { return pip.getOpaqueValue(); }
+
+ /// NodeRef - Create a reference to the node p with n elements.
+ template <typename NodeT>
+ NodeRef(NodeT *p, unsigned n) : pip(p, n - 1) {
+ assert(n <= NodeT::Capacity && "Size too big for node");
+ }
+
+ /// size - Return the number of elements in the referenced node.
+ unsigned size() const { return pip.getInt() + 1; }
+
+ /// setSize - Update the node size.
+ void setSize(unsigned n) { pip.setInt(n - 1); }
+
+ /// subtree - Access the i'th subtree reference in a branch node.
+ /// This depends on branch nodes storing the NodeRef array as their first
+ /// member.
+ NodeRef &subtree(unsigned i) const {
+ return reinterpret_cast<NodeRef*>(pip.getPointer())[i];
+ }
+
+ /// get - Dereference as a NodeT reference.
+ template <typename NodeT>
+ NodeT &get() const {
+ return *reinterpret_cast<NodeT*>(pip.getPointer());
+ }
+
+ bool operator==(const NodeRef &RHS) const {
+ if (pip == RHS.pip)
+ return true;
+ assert(pip.getPointer() != RHS.pip.getPointer() && "Inconsistent NodeRefs");
+ return false;
+ }
+
+ bool operator!=(const NodeRef &RHS) const {
+ return !operator==(RHS);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::LeafNode ---//
+//===----------------------------------------------------------------------===//
+//
+// Leaf nodes store up to N disjoint intervals with corresponding values.
+//
+// The intervals are kept sorted and fully coalesced so there are no adjacent
+// intervals mapping to the same value.
+//
+// These constraints are always satisfied:
+//
+// - Traits::stopLess(start(i), stop(i)) - Non-empty, sane intervals.
+//
+// - Traits::stopLess(stop(i), start(i + 1) - Sorted.
+//
+// - value(i) != value(i + 1) || !Traits::adjacent(stop(i), start(i + 1))
+// - Fully coalesced.
+//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class LeafNode : public NodeBase<std::pair<KeyT, KeyT>, ValT, N> {
+public:
+ const KeyT &start(unsigned i) const { return this->first[i].first; }
+ const KeyT &stop(unsigned i) const { return this->first[i].second; }
+ const ValT &value(unsigned i) const { return this->second[i]; }
+
+ KeyT &start(unsigned i) { return this->first[i].first; }
+ KeyT &stop(unsigned i) { return this->first[i].second; }
+ ValT &value(unsigned i) { return this->second[i]; }
+
+ /// findFrom - Find the first interval after i that may contain x.
+ /// @param i Starting index for the search.
+ /// @param Size Number of elements in node.
+ /// @param x Key to search for.
+ /// @return First index with !stopLess(key[i].stop, x), or size.
+ /// This is the first interval that can possibly contain x.
+ unsigned findFrom(unsigned i, unsigned Size, KeyT x) const {
+ assert(i <= Size && Size <= N && "Bad indices");
+ assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+ "Index is past the needed point");
+ while (i != Size && Traits::stopLess(stop(i), x)) ++i;
+ return i;
+ }
+
+ /// safeFind - Find an interval that is known to exist. This is the same as
+ /// findFrom except is it assumed that x is at least within range of the last
+ /// interval.
+ /// @param i Starting index for the search.
+ /// @param x Key to search for.
+ /// @return First index with !stopLess(key[i].stop, x), never size.
+ /// This is the first interval that can possibly contain x.
+ unsigned safeFind(unsigned i, KeyT x) const {
+ assert(i < N && "Bad index");
+ assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+ "Index is past the needed point");
+ while (Traits::stopLess(stop(i), x)) ++i;
+ assert(i < N && "Unsafe intervals");
+ return i;
+ }
+
+ /// safeLookup - Lookup mapped value for a safe key.
+ /// It is assumed that x is within range of the last entry.
+ /// @param x Key to search for.
+ /// @param NotFound Value to return if x is not in any interval.
+ /// @return The mapped value at x or NotFound.
+ ValT safeLookup(KeyT x, ValT NotFound) const {
+ unsigned i = safeFind(0, x);
+ return Traits::startLess(x, start(i)) ? NotFound : value(i);
+ }
+
+ unsigned insertFrom(unsigned &Pos, unsigned Size, KeyT a, KeyT b, ValT y);
+};
+
+/// insertFrom - Add mapping of [a;b] to y if possible, coalescing as much as
+/// possible. This may cause the node to grow by 1, or it may cause the node
+/// to shrink because of coalescing.
+/// @param Pos Starting index = insertFrom(0, size, a)
+/// @param Size Number of elements in node.
+/// @param a Interval start.
+/// @param b Interval stop.
+/// @param y Value be mapped.
+/// @return (insert position, new size), or (i, Capacity+1) on overflow.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+unsigned LeafNode<KeyT, ValT, N, Traits>::
+insertFrom(unsigned &Pos, unsigned Size, KeyT a, KeyT b, ValT y) {
+ unsigned i = Pos;
+ assert(i <= Size && Size <= N && "Invalid index");
+ assert(!Traits::stopLess(b, a) && "Invalid interval");
+
+ // Verify the findFrom invariant.
+ assert((i == 0 || Traits::stopLess(stop(i - 1), a)));
+ assert((i == Size || !Traits::stopLess(stop(i), a)));
+ assert((i == Size || Traits::stopLess(b, start(i))) && "Overlapping insert");
+
+ // Coalesce with previous interval.
+ if (i && value(i - 1) == y && Traits::adjacent(stop(i - 1), a)) {
+ Pos = i - 1;
+ // Also coalesce with next interval?
+ if (i != Size && value(i) == y && Traits::adjacent(b, start(i))) {
+ stop(i - 1) = stop(i);
+ this->erase(i, Size);
+ return Size - 1;
+ }
+ stop(i - 1) = b;
+ return Size;
+ }
+
+ // Detect overflow.
+ if (i == N)
+ return N + 1;
+
+ // Add new interval at end.
+ if (i == Size) {
+ start(i) = a;
+ stop(i) = b;
+ value(i) = y;
+ return Size + 1;
+ }
+
+ // Try to coalesce with following interval.
+ if (value(i) == y && Traits::adjacent(b, start(i))) {
+ start(i) = a;
+ return Size;
+ }
+
+ // We must insert before i. Detect overflow.
+ if (Size == N)
+ return N + 1;
+
+ // Insert before i.
+ this->shift(i, Size);
+ start(i) = a;
+ stop(i) = b;
+ value(i) = y;
+ return Size + 1;
+}
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::BranchNode ---//
+//===----------------------------------------------------------------------===//
+//
+// A branch node stores references to 1--N subtrees all of the same height.
+//
+// The key array in a branch node holds the rightmost stop key of each subtree.
+// It is redundant to store the last stop key since it can be found in the
+// parent node, but doing so makes tree balancing a lot simpler.
+//
+// It is unusual for a branch node to only have one subtree, but it can happen
+// in the root node if it is smaller than the normal nodes.
+//
+// When all of the leaf nodes from all the subtrees are concatenated, they must
+// satisfy the same constraints as a single leaf node. They must be sorted,
+// sane, and fully coalesced.
+//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class BranchNode : public NodeBase<NodeRef, KeyT, N> {
+public:
+ const KeyT &stop(unsigned i) const { return this->second[i]; }
+ const NodeRef &subtree(unsigned i) const { return this->first[i]; }
+
+ KeyT &stop(unsigned i) { return this->second[i]; }
+ NodeRef &subtree(unsigned i) { return this->first[i]; }
+
+ /// findFrom - Find the first subtree after i that may contain x.
+ /// @param i Starting index for the search.
+ /// @param Size Number of elements in node.
+ /// @param x Key to search for.
+ /// @return First index with !stopLess(key[i], x), or size.
+ /// This is the first subtree that can possibly contain x.
+ unsigned findFrom(unsigned i, unsigned Size, KeyT x) const {
+ assert(i <= Size && Size <= N && "Bad indices");
+ assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+ "Index to findFrom is past the needed point");
+ while (i != Size && Traits::stopLess(stop(i), x)) ++i;
+ return i;
+ }
+
+ /// safeFind - Find a subtree that is known to exist. This is the same as
+ /// findFrom except is it assumed that x is in range.
+ /// @param i Starting index for the search.
+ /// @param x Key to search for.
+ /// @return First index with !stopLess(key[i], x), never size.
+ /// This is the first subtree that can possibly contain x.
+ unsigned safeFind(unsigned i, KeyT x) const {
+ assert(i < N && "Bad index");
+ assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+ "Index is past the needed point");
+ while (Traits::stopLess(stop(i), x)) ++i;
+ assert(i < N && "Unsafe intervals");
+ return i;
+ }
+
+ /// safeLookup - Get the subtree containing x, Assuming that x is in range.
+ /// @param x Key to search for.
+ /// @return Subtree containing x
+ NodeRef safeLookup(KeyT x) const {
+ return subtree(safeFind(0, x));
+ }
+
+ /// insert - Insert a new (subtree, stop) pair.
+ /// @param i Insert position, following entries will be shifted.
+ /// @param Size Number of elements in node.
+ /// @param Node Subtree to insert.
+ /// @param Stop Last key in subtree.
+ void insert(unsigned i, unsigned Size, NodeRef Node, KeyT Stop) {
+ assert(Size < N && "branch node overflow");
+ assert(i <= Size && "Bad insert position");
+ this->shift(i, Size);
+ subtree(i) = Node;
+ stop(i) = Stop;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::Path ---//
+//===----------------------------------------------------------------------===//
+//
+// A Path is used by iterators to represent a position in a B+-tree, and the
+// path to get there from the root.
+//
+// The Path class also contains the tree navigation code that doesn't have to
+// be templatized.
+//
+//===----------------------------------------------------------------------===//
+
+class Path {
+ /// Entry - Each step in the path is a node pointer and an offset into that
+ /// node.
+ struct Entry {
+ void *node;
+ unsigned size;
+ unsigned offset;
+
+ Entry(void *Node, unsigned Size, unsigned Offset)
+ : node(Node), size(Size), offset(Offset) {}
+
+ Entry(NodeRef Node, unsigned Offset)
+ : node(&Node.subtree(0)), size(Node.size()), offset(Offset) {}
+
+ NodeRef &subtree(unsigned i) const {
+ return reinterpret_cast<NodeRef*>(node)[i];
+ }
+ };
+
+ /// path - The path entries, path[0] is the root node, path.back() is a leaf.
+ SmallVector<Entry, 4> path;
+
+public:
+ // Node accessors.
+ template <typename NodeT> NodeT &node(unsigned Level) const {
+ return *reinterpret_cast<NodeT*>(path[Level].node);
+ }
+ unsigned size(unsigned Level) const { return path[Level].size; }
+ unsigned offset(unsigned Level) const { return path[Level].offset; }
+ unsigned &offset(unsigned Level) { return path[Level].offset; }
+
+ // Leaf accessors.
+ template <typename NodeT> NodeT &leaf() const {
+ return *reinterpret_cast<NodeT*>(path.back().node);
+ }
+ unsigned leafSize() const { return path.back().size; }
+ unsigned leafOffset() const { return path.back().offset; }
+ unsigned &leafOffset() { return path.back().offset; }
+
+ /// valid - Return true if path is at a valid node, not at end().
+ bool valid() const {
+ return !path.empty() && path.front().offset < path.front().size;
+ }
+
+ /// height - Return the height of the tree corresponding to this path.
+ /// This matches map->height in a full path.
+ unsigned height() const { return path.size() - 1; }
+
+ /// subtree - Get the subtree referenced from Level. When the path is
+ /// consistent, node(Level + 1) == subtree(Level).
+ /// @param Level 0..height-1. The leaves have no subtrees.
+ NodeRef &subtree(unsigned Level) const {
+ return path[Level].subtree(path[Level].offset);
+ }
+
+ /// reset - Reset cached information about node(Level) from subtree(Level -1).
+ /// @param Level 1..height. The node to update after parent node changed.
+ void reset(unsigned Level) {
+ path[Level] = Entry(subtree(Level - 1), offset(Level));
+ }
+
+ /// push - Add entry to path.
+ /// @param Node Node to add, should be subtree(path.size()-1).
+ /// @param Offset Offset into Node.
+ void push(NodeRef Node, unsigned Offset) {
+ path.push_back(Entry(Node, Offset));
+ }
+
+ /// pop - Remove the last path entry.
+ void pop() {
+ path.pop_back();
+ }
+
+ /// setSize - Set the size of a node both in the path and in the tree.
+ /// @param Level 0..height. Note that setting the root size won't change
+ /// map->rootSize.
+ /// @param Size New node size.
+ void setSize(unsigned Level, unsigned Size) {
+ path[Level].size = Size;
+ if (Level)
+ subtree(Level - 1).setSize(Size);
+ }
+
+ /// setRoot - Clear the path and set a new root node.
+ /// @param Node New root node.
+ /// @param Size New root size.
+ /// @param Offset Offset into root node.
+ void setRoot(void *Node, unsigned Size, unsigned Offset) {
+ path.clear();
+ path.push_back(Entry(Node, Size, Offset));
+ }
+
+ /// replaceRoot - Replace the current root node with two new entries after the
+ /// tree height has increased.
+ /// @param Root The new root node.
+ /// @param Size Number of entries in the new root.
+ /// @param Offsets Offsets into the root and first branch nodes.
+ void replaceRoot(void *Root, unsigned Size, IdxPair Offsets);
+
+ /// getLeftSibling - Get the left sibling node at Level, or a null NodeRef.
+ /// @param Level Get the sibling to node(Level).
+ /// @return Left sibling, or NodeRef().
+ NodeRef getLeftSibling(unsigned Level) const;
+
+ /// moveLeft - Move path to the left sibling at Level. Leave nodes below Level
+ /// unaltered.
+ /// @param Level Move node(Level).
+ void moveLeft(unsigned Level);
+
+ /// fillLeft - Grow path to Height by taking leftmost branches.
+ /// @param Height The target height.
+ void fillLeft(unsigned Height) {
+ while (height() < Height)
+ push(subtree(height()), 0);
+ }
+
+ /// getLeftSibling - Get the left sibling node at Level, or a null NodeRef.
+ /// @param Level Get the sibling to node(Level).
+ /// @return Left sibling, or NodeRef().
+ NodeRef getRightSibling(unsigned Level) const;
+
+ /// moveRight - Move path to the left sibling at Level. Leave nodes below
+ /// Level unaltered.
+ /// @param Level Move node(Level).
+ void moveRight(unsigned Level);
+
+ /// atBegin - Return true if path is at begin().
+ bool atBegin() const {
+ for (unsigned i = 0, e = path.size(); i != e; ++i)
+ if (path[i].offset != 0)
+ return false;
+ return true;
+ }
+
+ /// atLastEntry - Return true if the path is at the last entry of the node at
+ /// Level.
+ /// @param Level Node to examine.
+ bool atLastEntry(unsigned Level) const {
+ return path[Level].offset == path[Level].size - 1;
+ }
+
+ /// legalizeForInsert - Prepare the path for an insertion at Level. When the
+ /// path is at end(), node(Level) may not be a legal node. legalizeForInsert
+ /// ensures that node(Level) is real by moving back to the last node at Level,
+ /// and setting offset(Level) to size(Level) if required.
+ /// @param Level The level where an insertion is about to take place.
+ void legalizeForInsert(unsigned Level) {
+ if (valid())
+ return;
+ moveLeft(Level);
+ ++path[Level].offset;
+ }
+};
+
+} // end namespace IntervalMapImpl
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMap ----//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT,
+ unsigned N = IntervalMapImpl::NodeSizer<KeyT, ValT>::LeafSize,
+ typename Traits = IntervalMapInfo<KeyT>>
+class IntervalMap {
+ using Sizer = IntervalMapImpl::NodeSizer<KeyT, ValT>;
+ using Leaf = IntervalMapImpl::LeafNode<KeyT, ValT, Sizer::LeafSize, Traits>;
+ using Branch =
+ IntervalMapImpl::BranchNode<KeyT, ValT, Sizer::BranchSize, Traits>;
+ using RootLeaf = IntervalMapImpl::LeafNode<KeyT, ValT, N, Traits>;
+ using IdxPair = IntervalMapImpl::IdxPair;
+
+ // The RootLeaf capacity is given as a template parameter. We must compute the
+ // corresponding RootBranch capacity.
+ enum {
+ DesiredRootBranchCap = (sizeof(RootLeaf) - sizeof(KeyT)) /
+ (sizeof(KeyT) + sizeof(IntervalMapImpl::NodeRef)),
+ RootBranchCap = DesiredRootBranchCap ? DesiredRootBranchCap : 1
+ };
+
+ using RootBranch =
+ IntervalMapImpl::BranchNode<KeyT, ValT, RootBranchCap, Traits>;
+
+ // When branched, we store a global start key as well as the branch node.
+ struct RootBranchData {
+ KeyT start;
+ RootBranch node;
+ };
+
+public:
+ using Allocator = typename Sizer::Allocator;
+ using KeyType = KeyT;
+ using ValueType = ValT;
+ using KeyTraits = Traits;
+
+private:
+ // The root data is either a RootLeaf or a RootBranchData instance.
+ AlignedCharArrayUnion<RootLeaf, RootBranchData> data;
+
+ // Tree height.
+ // 0: Leaves in root.
+ // 1: Root points to leaf.
+ // 2: root->branch->leaf ...
+ unsigned height;
+
+ // Number of entries in the root node.
+ unsigned rootSize;
+
+ // Allocator used for creating external nodes.
+ Allocator &allocator;
+
+ /// Represent data as a node type without breaking aliasing rules.
+ template <typename T> T &dataAs() const { return *llvm::bit_cast<T *>(&data); }
+
+ const RootLeaf &rootLeaf() const {
+ assert(!branched() && "Cannot acces leaf data in branched root");
+ return dataAs<RootLeaf>();
+ }
+ RootLeaf &rootLeaf() {
+ assert(!branched() && "Cannot acces leaf data in branched root");
+ return dataAs<RootLeaf>();
+ }
+
+ RootBranchData &rootBranchData() const {
+ assert(branched() && "Cannot access branch data in non-branched root");
+ return dataAs<RootBranchData>();
+ }
+ RootBranchData &rootBranchData() {
+ assert(branched() && "Cannot access branch data in non-branched root");
+ return dataAs<RootBranchData>();
+ }
+
+ const RootBranch &rootBranch() const { return rootBranchData().node; }
+ RootBranch &rootBranch() { return rootBranchData().node; }
+ KeyT rootBranchStart() const { return rootBranchData().start; }
+ KeyT &rootBranchStart() { return rootBranchData().start; }
+
+ template <typename NodeT> NodeT *newNode() {
+ return new(allocator.template Allocate<NodeT>()) NodeT();
+ }
+
+ template <typename NodeT> void deleteNode(NodeT *P) {
+ P->~NodeT();
+ allocator.Deallocate(P);
+ }
+
+ IdxPair branchRoot(unsigned Position);
+ IdxPair splitRoot(unsigned Position);
+
+ void switchRootToBranch() {
+ rootLeaf().~RootLeaf();
+ height = 1;
+ new (&rootBranchData()) RootBranchData();
+ }
+
+ void switchRootToLeaf() {
+ rootBranchData().~RootBranchData();
+ height = 0;
+ new(&rootLeaf()) RootLeaf();
+ }
+
+ bool branched() const { return height > 0; }
+
+ ValT treeSafeLookup(KeyT x, ValT NotFound) const;
+ void visitNodes(void (IntervalMap::*f)(IntervalMapImpl::NodeRef,
+ unsigned Level));
+ void deleteNode(IntervalMapImpl::NodeRef Node, unsigned Level);
+
+public:
+ explicit IntervalMap(Allocator &a) : height(0), rootSize(0), allocator(a) {
+ assert((uintptr_t(&data) & (alignof(RootLeaf) - 1)) == 0 &&
+ "Insufficient alignment");
+ new(&rootLeaf()) RootLeaf();
+ }
+
+ ~IntervalMap() {
+ clear();
+ rootLeaf().~RootLeaf();
+ }
+
+ /// empty - Return true when no intervals are mapped.
+ bool empty() const {
+ return rootSize == 0;
+ }
+
+ /// start - Return the smallest mapped key in a non-empty map.
+ KeyT start() const {
+ assert(!empty() && "Empty IntervalMap has no start");
+ return !branched() ? rootLeaf().start(0) : rootBranchStart();
+ }
+
+ /// stop - Return the largest mapped key in a non-empty map.
+ KeyT stop() const {
+ assert(!empty() && "Empty IntervalMap has no stop");
+ return !branched() ? rootLeaf().stop(rootSize - 1) :
+ rootBranch().stop(rootSize - 1);
+ }
+
+ /// lookup - Return the mapped value at x or NotFound.
+ ValT lookup(KeyT x, ValT NotFound = ValT()) const {
+ if (empty() || Traits::startLess(x, start()) || Traits::stopLess(stop(), x))
+ return NotFound;
+ return branched() ? treeSafeLookup(x, NotFound) :
+ rootLeaf().safeLookup(x, NotFound);
+ }
+
+ /// insert - Add a mapping of [a;b] to y, coalesce with adjacent intervals.
+ /// It is assumed that no key in the interval is mapped to another value, but
+ /// overlapping intervals already mapped to y will be coalesced.
+ void insert(KeyT a, KeyT b, ValT y) {
+ if (branched() || rootSize == RootLeaf::Capacity)
+ return find(a).insert(a, b, y);
+
+ // Easy insert into root leaf.
+ unsigned p = rootLeaf().findFrom(0, rootSize, a);
+ rootSize = rootLeaf().insertFrom(p, rootSize, a, b, y);
+ }
+
+ /// clear - Remove all entries.
+ void clear();
+
+ class const_iterator;
+ class iterator;
+ friend class const_iterator;
+ friend class iterator;
+
+ const_iterator begin() const {
+ const_iterator I(*this);
+ I.goToBegin();
+ return I;
+ }
+
+ iterator begin() {
+ iterator I(*this);
+ I.goToBegin();
+ return I;
+ }
+
+ const_iterator end() const {
+ const_iterator I(*this);
+ I.goToEnd();
+ return I;
+ }
+
+ iterator end() {
+ iterator I(*this);
+ I.goToEnd();
+ return I;
+ }
+
+ /// find - Return an iterator pointing to the first interval ending at or
+ /// after x, or end().
+ const_iterator find(KeyT x) const {
+ const_iterator I(*this);
+ I.find(x);
+ return I;
+ }
+
+ iterator find(KeyT x) {
+ iterator I(*this);
+ I.find(x);
+ return I;
+ }
+
+ /// overlaps(a, b) - Return true if the intervals in this map overlap with the
+ /// interval [a;b].
+ bool overlaps(KeyT a, KeyT b) const {
+ assert(Traits::nonEmpty(a, b));
+ const_iterator I = find(a);
+ if (!I.valid())
+ return false;
+ // [a;b] and [x;y] overlap iff x<=b and a<=y. The find() call guarantees the
+ // second part (y = find(a).stop()), so it is sufficient to check the first
+ // one.
+ return !Traits::stopLess(b, I.start());
+ }
+};
+
+/// treeSafeLookup - Return the mapped value at x or NotFound, assuming a
+/// branched root.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+ValT IntervalMap<KeyT, ValT, N, Traits>::
+treeSafeLookup(KeyT x, ValT NotFound) const {
+ assert(branched() && "treeLookup assumes a branched root");
+
+ IntervalMapImpl::NodeRef NR = rootBranch().safeLookup(x);
+ for (unsigned h = height-1; h; --h)
+ NR = NR.get<Branch>().safeLookup(x);
+ return NR.get<Leaf>().safeLookup(x, NotFound);
+}
+
+// branchRoot - Switch from a leaf root to a branched root.
+// Return the new (root offset, node offset) corresponding to Position.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+IntervalMapImpl::IdxPair IntervalMap<KeyT, ValT, N, Traits>::
+branchRoot(unsigned Position) {
+ using namespace IntervalMapImpl;
+ // How many external leaf nodes to hold RootLeaf+1?
+ const unsigned Nodes = RootLeaf::Capacity / Leaf::Capacity + 1;
+
+ // Compute element distribution among new nodes.
+ unsigned size[Nodes];
+ IdxPair NewOffset(0, Position);
+
+ // Is is very common for the root node to be smaller than external nodes.
+ if (Nodes == 1)
+ size[0] = rootSize;
+ else
+ NewOffset = distribute(Nodes, rootSize, Leaf::Capacity, nullptr, size,
+ Position, true);
+
+ // Allocate new nodes.
+ unsigned pos = 0;
+ NodeRef node[Nodes];
+ for (unsigned n = 0; n != Nodes; ++n) {
+ Leaf *L = newNode<Leaf>();
+ L->copy(rootLeaf(), pos, 0, size[n]);
+ node[n] = NodeRef(L, size[n]);
+ pos += size[n];
+ }
+
+ // Destroy the old leaf node, construct branch node instead.
+ switchRootToBranch();
+ for (unsigned n = 0; n != Nodes; ++n) {
+ rootBranch().stop(n) = node[n].template get<Leaf>().stop(size[n]-1);
+ rootBranch().subtree(n) = node[n];
+ }
+ rootBranchStart() = node[0].template get<Leaf>().start(0);
+ rootSize = Nodes;
+ return NewOffset;
+}
+
+// splitRoot - Split the current BranchRoot into multiple Branch nodes.
+// Return the new (root offset, node offset) corresponding to Position.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+IntervalMapImpl::IdxPair IntervalMap<KeyT, ValT, N, Traits>::
+splitRoot(unsigned Position) {
+ using namespace IntervalMapImpl;
+ // How many external leaf nodes to hold RootBranch+1?
+ const unsigned Nodes = RootBranch::Capacity / Branch::Capacity + 1;
+
+ // Compute element distribution among new nodes.
+ unsigned Size[Nodes];
+ IdxPair NewOffset(0, Position);
+
+ // Is is very common for the root node to be smaller than external nodes.
+ if (Nodes == 1)
+ Size[0] = rootSize;
+ else
+ NewOffset = distribute(Nodes, rootSize, Leaf::Capacity, nullptr, Size,
+ Position, true);
+
+ // Allocate new nodes.
+ unsigned Pos = 0;
+ NodeRef Node[Nodes];
+ for (unsigned n = 0; n != Nodes; ++n) {
+ Branch *B = newNode<Branch>();
+ B->copy(rootBranch(), Pos, 0, Size[n]);
+ Node[n] = NodeRef(B, Size[n]);
+ Pos += Size[n];
+ }
+
+ for (unsigned n = 0; n != Nodes; ++n) {
+ rootBranch().stop(n) = Node[n].template get<Branch>().stop(Size[n]-1);
+ rootBranch().subtree(n) = Node[n];
+ }
+ rootSize = Nodes;
+ ++height;
+ return NewOffset;
+}
+
+/// visitNodes - Visit each external node.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+visitNodes(void (IntervalMap::*f)(IntervalMapImpl::NodeRef, unsigned Height)) {
+ if (!branched())
+ return;
+ SmallVector<IntervalMapImpl::NodeRef, 4> Refs, NextRefs;
+
+ // Collect level 0 nodes from the root.
+ for (unsigned i = 0; i != rootSize; ++i)
+ Refs.push_back(rootBranch().subtree(i));
+
+ // Visit all branch nodes.
+ for (unsigned h = height - 1; h; --h) {
+ for (unsigned i = 0, e = Refs.size(); i != e; ++i) {
+ for (unsigned j = 0, s = Refs[i].size(); j != s; ++j)
+ NextRefs.push_back(Refs[i].subtree(j));
+ (this->*f)(Refs[i], h);
+ }
+ Refs.clear();
+ Refs.swap(NextRefs);
+ }
+
+ // Visit all leaf nodes.
+ for (unsigned i = 0, e = Refs.size(); i != e; ++i)
+ (this->*f)(Refs[i], 0);
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+deleteNode(IntervalMapImpl::NodeRef Node, unsigned Level) {
+ if (Level)
+ deleteNode(&Node.get<Branch>());
+ else
+ deleteNode(&Node.get<Leaf>());
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+clear() {
+ if (branched()) {
+ visitNodes(&IntervalMap::deleteNode);
+ switchRootToLeaf();
+ }
+ rootSize = 0;
+}
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMap::const_iterator ----//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class IntervalMap<KeyT, ValT, N, Traits>::const_iterator {
+ friend class IntervalMap;
+
+public:
+ using iterator_category = std::bidirectional_iterator_tag;
+ using value_type = ValT;
+ using difference_type = std::ptrdiff_t;
+ using pointer = value_type *;
+ using reference = value_type &;
+
+protected:
+ // The map referred to.
+ IntervalMap *map = nullptr;
+
+ // We store a full path from the root to the current position.
+ // The path may be partially filled, but never between iterator calls.
+ IntervalMapImpl::Path path;
+
+ explicit const_iterator(const IntervalMap &map) :
+ map(const_cast<IntervalMap*>(&map)) {}
+
+ bool branched() const {
+ assert(map && "Invalid iterator");
+ return map->branched();
+ }
+
+ void setRoot(unsigned Offset) {
+ if (branched())
+ path.setRoot(&map->rootBranch(), map->rootSize, Offset);
+ else
+ path.setRoot(&map->rootLeaf(), map->rootSize, Offset);
+ }
+
+ void pathFillFind(KeyT x);
+ void treeFind(KeyT x);
+ void treeAdvanceTo(KeyT x);
+
+ /// unsafeStart - Writable access to start() for iterator.
+ KeyT &unsafeStart() const {
+ assert(valid() && "Cannot access invalid iterator");
+ return branched() ? path.leaf<Leaf>().start(path.leafOffset()) :
+ path.leaf<RootLeaf>().start(path.leafOffset());
+ }
+
+ /// unsafeStop - Writable access to stop() for iterator.
+ KeyT &unsafeStop() const {
+ assert(valid() && "Cannot access invalid iterator");
+ return branched() ? path.leaf<Leaf>().stop(path.leafOffset()) :
+ path.leaf<RootLeaf>().stop(path.leafOffset());
+ }
+
+ /// unsafeValue - Writable access to value() for iterator.
+ ValT &unsafeValue() const {
+ assert(valid() && "Cannot access invalid iterator");
+ return branched() ? path.leaf<Leaf>().value(path.leafOffset()) :
+ path.leaf<RootLeaf>().value(path.leafOffset());
+ }
+
+public:
+ /// const_iterator - Create an iterator that isn't pointing anywhere.
+ const_iterator() = default;
+
+ /// setMap - Change the map iterated over. This call must be followed by a
+ /// call to goToBegin(), goToEnd(), or find()
+ void setMap(const IntervalMap &m) { map = const_cast<IntervalMap*>(&m); }
+
+ /// valid - Return true if the current position is valid, false for end().
+ bool valid() const { return path.valid(); }
+
+ /// atBegin - Return true if the current position is the first map entry.
+ bool atBegin() const { return path.atBegin(); }
+
+ /// start - Return the beginning of the current interval.
+ const KeyT &start() const { return unsafeStart(); }
+
+ /// stop - Return the end of the current interval.
+ const KeyT &stop() const { return unsafeStop(); }
+
+ /// value - Return the mapped value at the current interval.
+ const ValT &value() const { return unsafeValue(); }
+
+ const ValT &operator*() const { return value(); }
+
+ bool operator==(const const_iterator &RHS) const {
+ assert(map == RHS.map && "Cannot compare iterators from different maps");
+ if (!valid())
+ return !RHS.valid();
+ if (path.leafOffset() != RHS.path.leafOffset())
+ return false;
+ return &path.template leaf<Leaf>() == &RHS.path.template leaf<Leaf>();
+ }
+
+ bool operator!=(const const_iterator &RHS) const {
+ return !operator==(RHS);
+ }
+
+ /// goToBegin - Move to the first interval in map.
+ void goToBegin() {
+ setRoot(0);
+ if (branched())
+ path.fillLeft(map->height);
+ }
+
+ /// goToEnd - Move beyond the last interval in map.
+ void goToEnd() {
+ setRoot(map->rootSize);
+ }
+
+ /// preincrement - Move to the next interval.
+ const_iterator &operator++() {
+ assert(valid() && "Cannot increment end()");
+ if (++path.leafOffset() == path.leafSize() && branched())
+ path.moveRight(map->height);
+ return *this;
+ }
+
+ /// postincrement - Don't do that!
+ const_iterator operator++(int) {
+ const_iterator tmp = *this;
+ operator++();
+ return tmp;
+ }
+
+ /// predecrement - Move to the previous interval.
+ const_iterator &operator--() {
+ if (path.leafOffset() && (valid() || !branched()))
+ --path.leafOffset();
+ else
+ path.moveLeft(map->height);
+ return *this;
+ }
+
+ /// postdecrement - Don't do that!
+ const_iterator operator--(int) {
+ const_iterator tmp = *this;
+ operator--();
+ return tmp;
+ }
+
+ /// find - Move to the first interval with stop >= x, or end().
+ /// This is a full search from the root, the current position is ignored.
+ void find(KeyT x) {
+ if (branched())
+ treeFind(x);
+ else
+ setRoot(map->rootLeaf().findFrom(0, map->rootSize, x));
+ }
+
+ /// advanceTo - Move to the first interval with stop >= x, or end().
+ /// The search is started from the current position, and no earlier positions
+ /// can be found. This is much faster than find() for small moves.
+ void advanceTo(KeyT x) {
+ if (!valid())
+ return;
+ if (branched())
+ treeAdvanceTo(x);
+ else
+ path.leafOffset() =
+ map->rootLeaf().findFrom(path.leafOffset(), map->rootSize, x);
+ }
+};
+
+/// pathFillFind - Complete path by searching for x.
+/// @param x Key to search for.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+const_iterator::pathFillFind(KeyT x) {
+ IntervalMapImpl::NodeRef NR = path.subtree(path.height());
+ for (unsigned i = map->height - path.height() - 1; i; --i) {
+ unsigned p = NR.get<Branch>().safeFind(0, x);
+ path.push(NR, p);
+ NR = NR.subtree(p);
+ }
+ path.push(NR, NR.get<Leaf>().safeFind(0, x));
+}
+
+/// treeFind - Find in a branched tree.
+/// @param x Key to search for.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+const_iterator::treeFind(KeyT x) {
+ setRoot(map->rootBranch().findFrom(0, map->rootSize, x));
+ if (valid())
+ pathFillFind(x);
+}
+
+/// treeAdvanceTo - Find position after the current one.
+/// @param x Key to search for.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+const_iterator::treeAdvanceTo(KeyT x) {
+ // Can we stay on the same leaf node?
+ if (!Traits::stopLess(path.leaf<Leaf>().stop(path.leafSize() - 1), x)) {
+ path.leafOffset() = path.leaf<Leaf>().safeFind(path.leafOffset(), x);
+ return;
+ }
+
+ // Drop the current leaf.
+ path.pop();
+
+ // Search towards the root for a usable subtree.
+ if (path.height()) {
+ for (unsigned l = path.height() - 1; l; --l) {
+ if (!Traits::stopLess(path.node<Branch>(l).stop(path.offset(l)), x)) {
+ // The branch node at l+1 is usable
+ path.offset(l + 1) =
+ path.node<Branch>(l + 1).safeFind(path.offset(l + 1), x);
+ return pathFillFind(x);
+ }
+ path.pop();
+ }
+ // Is the level-1 Branch usable?
+ if (!Traits::stopLess(map->rootBranch().stop(path.offset(0)), x)) {
+ path.offset(1) = path.node<Branch>(1).safeFind(path.offset(1), x);
+ return pathFillFind(x);
+ }
+ }
+
+ // We reached the root.
+ setRoot(map->rootBranch().findFrom(path.offset(0), map->rootSize, x));
+ if (valid())
+ pathFillFind(x);
+}
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMap::iterator ----//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class IntervalMap<KeyT, ValT, N, Traits>::iterator : public const_iterator {
+ friend class IntervalMap;
+
+ using IdxPair = IntervalMapImpl::IdxPair;
+
+ explicit iterator(IntervalMap &map) : const_iterator(map) {}
+
+ void setNodeStop(unsigned Level, KeyT Stop);
+ bool insertNode(unsigned Level, IntervalMapImpl::NodeRef Node, KeyT Stop);
+ template <typename NodeT> bool overflow(unsigned Level);
+ void treeInsert(KeyT a, KeyT b, ValT y);
+ void eraseNode(unsigned Level);
+ void treeErase(bool UpdateRoot = true);
+ bool canCoalesceLeft(KeyT Start, ValT x);
+ bool canCoalesceRight(KeyT Stop, ValT x);
+
+public:
+ /// iterator - Create null iterator.
+ iterator() = default;
+
+ /// setStart - Move the start of the current interval.
+ /// This may cause coalescing with the previous interval.
+ /// @param a New start key, must not overlap the previous interval.
+ void setStart(KeyT a);
+
+ /// setStop - Move the end of the current interval.
+ /// This may cause coalescing with the following interval.
+ /// @param b New stop key, must not overlap the following interval.
+ void setStop(KeyT b);
+
+ /// setValue - Change the mapped value of the current interval.
+ /// This may cause coalescing with the previous and following intervals.
+ /// @param x New value.
+ void setValue(ValT x);
+
+ /// setStartUnchecked - Move the start of the current interval without
+ /// checking for coalescing or overlaps.
+ /// This should only be used when it is known that coalescing is not required.
+ /// @param a New start key.
+ void setStartUnchecked(KeyT a) { this->unsafeStart() = a; }
+
+ /// setStopUnchecked - Move the end of the current interval without checking
+ /// for coalescing or overlaps.
+ /// This should only be used when it is known that coalescing is not required.
+ /// @param b New stop key.
+ void setStopUnchecked(KeyT b) {
+ this->unsafeStop() = b;
+ // Update keys in branch nodes as well.
+ if (this->path.atLastEntry(this->path.height()))
+ setNodeStop(this->path.height(), b);
+ }
+
+ /// setValueUnchecked - Change the mapped value of the current interval
+ /// without checking for coalescing.
+ /// @param x New value.
+ void setValueUnchecked(ValT x) { this->unsafeValue() = x; }
+
+ /// insert - Insert mapping [a;b] -> y before the current position.
+ void insert(KeyT a, KeyT b, ValT y);
+
+ /// erase - Erase the current interval.
+ void erase();
+
+ iterator &operator++() {
+ const_iterator::operator++();
+ return *this;
+ }
+
+ iterator operator++(int) {
+ iterator tmp = *this;
+ operator++();
+ return tmp;
+ }
+
+ iterator &operator--() {
+ const_iterator::operator--();
+ return *this;
+ }
+
+ iterator operator--(int) {
+ iterator tmp = *this;
+ operator--();
+ return tmp;
+ }
+};
+
+/// canCoalesceLeft - Can the current interval coalesce to the left after
+/// changing start or value?
+/// @param Start New start of current interval.
+/// @param Value New value for current interval.
+/// @return True when updating the current interval would enable coalescing.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::canCoalesceLeft(KeyT Start, ValT Value) {
+ using namespace IntervalMapImpl;
+ Path &P = this->path;
+ if (!this->branched()) {
+ unsigned i = P.leafOffset();
+ RootLeaf &Node = P.leaf<RootLeaf>();
+ return i && Node.value(i-1) == Value &&
+ Traits::adjacent(Node.stop(i-1), Start);
+ }
+ // Branched.
+ if (unsigned i = P.leafOffset()) {
+ Leaf &Node = P.leaf<Leaf>();
+ return Node.value(i-1) == Value && Traits::adjacent(Node.stop(i-1), Start);
+ } else if (NodeRef NR = P.getLeftSibling(P.height())) {
+ unsigned i = NR.size() - 1;
+ Leaf &Node = NR.get<Leaf>();
+ return Node.value(i) == Value && Traits::adjacent(Node.stop(i), Start);
+ }
+ return false;
+}
+
+/// canCoalesceRight - Can the current interval coalesce to the right after
+/// changing stop or value?
+/// @param Stop New stop of current interval.
+/// @param Value New value for current interval.
+/// @return True when updating the current interval would enable coalescing.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::canCoalesceRight(KeyT Stop, ValT Value) {
+ using namespace IntervalMapImpl;
+ Path &P = this->path;
+ unsigned i = P.leafOffset() + 1;
+ if (!this->branched()) {
+ if (i >= P.leafSize())
+ return false;
+ RootLeaf &Node = P.leaf<RootLeaf>();
+ return Node.value(i) == Value && Traits::adjacent(Stop, Node.start(i));
+ }
+ // Branched.
+ if (i < P.leafSize()) {
+ Leaf &Node = P.leaf<Leaf>();
+ return Node.value(i) == Value && Traits::adjacent(Stop, Node.start(i));
+ } else if (NodeRef NR = P.getRightSibling(P.height())) {
+ Leaf &Node = NR.get<Leaf>();
+ return Node.value(0) == Value && Traits::adjacent(Stop, Node.start(0));
+ }
+ return false;
+}
+
+/// setNodeStop - Update the stop key of the current node at level and above.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setNodeStop(unsigned Level, KeyT Stop) {
+ // There are no references to the root node, so nothing to update.
+ if (!Level)
+ return;
+ IntervalMapImpl::Path &P = this->path;
+ // Update nodes pointing to the current node.
+ while (--Level) {
+ P.node<Branch>(Level).stop(P.offset(Level)) = Stop;
+ if (!P.atLastEntry(Level))
+ return;
+ }
+ // Update root separately since it has a different layout.
+ P.node<RootBranch>(Level).stop(P.offset(Level)) = Stop;
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setStart(KeyT a) {
+ assert(Traits::nonEmpty(a, this->stop()) && "Cannot move start beyond stop");
+ KeyT &CurStart = this->unsafeStart();
+ if (!Traits::startLess(a, CurStart) || !canCoalesceLeft(a, this->value())) {
+ CurStart = a;
+ return;
+ }
+ // Coalesce with the interval to the left.
+ --*this;
+ a = this->start();
+ erase();
+ setStartUnchecked(a);
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setStop(KeyT b) {
+ assert(Traits::nonEmpty(this->start(), b) && "Cannot move stop beyond start");
+ if (Traits::startLess(b, this->stop()) ||
+ !canCoalesceRight(b, this->value())) {
+ setStopUnchecked(b);
+ return;
+ }
+ // Coalesce with interval to the right.
+ KeyT a = this->start();
+ erase();
+ setStartUnchecked(a);
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setValue(ValT x) {
+ setValueUnchecked(x);
+ if (canCoalesceRight(this->stop(), x)) {
+ KeyT a = this->start();
+ erase();
+ setStartUnchecked(a);
+ }
+ if (canCoalesceLeft(this->start(), x)) {
+ --*this;
+ KeyT a = this->start();
+ erase();
+ setStartUnchecked(a);
+ }
+}
+
+/// insertNode - insert a node before the current path at level.
+/// Leave the current path pointing at the new node.
+/// @param Level path index of the node to be inserted.
+/// @param Node The node to be inserted.
+/// @param Stop The last index in the new node.
+/// @return True if the tree height was increased.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::insertNode(unsigned Level, IntervalMapImpl::NodeRef Node, KeyT Stop) {
+ assert(Level && "Cannot insert next to the root");
+ bool SplitRoot = false;
+ IntervalMap &IM = *this->map;
+ IntervalMapImpl::Path &P = this->path;
+
+ if (Level == 1) {
+ // Insert into the root branch node.
+ if (IM.rootSize < RootBranch::Capacity) {
+ IM.rootBranch().insert(P.offset(0), IM.rootSize, Node, Stop);
+ P.setSize(0, ++IM.rootSize);
+ P.reset(Level);
+ return SplitRoot;
+ }
+
+ // We need to split the root while keeping our position.
+ SplitRoot = true;
+ IdxPair Offset = IM.splitRoot(P.offset(0));
+ P.replaceRoot(&IM.rootBranch(), IM.rootSize, Offset);
+
+ // Fall through to insert at the new higher level.
+ ++Level;
+ }
+
+ // When inserting before end(), make sure we have a valid path.
+ P.legalizeForInsert(--Level);
+
+ // Insert into the branch node at Level-1.
+ if (P.size(Level) == Branch::Capacity) {
+ // Branch node is full, handle handle the overflow.
+ assert(!SplitRoot && "Cannot overflow after splitting the root");
+ SplitRoot = overflow<Branch>(Level);
+ Level += SplitRoot;
+ }
+ P.node<Branch>(Level).insert(P.offset(Level), P.size(Level), Node, Stop);
+ P.setSize(Level, P.size(Level) + 1);
+ if (P.atLastEntry(Level))
+ setNodeStop(Level, Stop);
+ P.reset(Level + 1);
+ return SplitRoot;
+}
+
+// insert
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::insert(KeyT a, KeyT b, ValT y) {
+ if (this->branched())
+ return treeInsert(a, b, y);
+ IntervalMap &IM = *this->map;
+ IntervalMapImpl::Path &P = this->path;
+
+ // Try simple root leaf insert.
+ unsigned Size = IM.rootLeaf().insertFrom(P.leafOffset(), IM.rootSize, a, b, y);
+
+ // Was the root node insert successful?
+ if (Size <= RootLeaf::Capacity) {
+ P.setSize(0, IM.rootSize = Size);
+ return;
+ }
+
+ // Root leaf node is full, we must branch.
+ IdxPair Offset = IM.branchRoot(P.leafOffset());
+ P.replaceRoot(&IM.rootBranch(), IM.rootSize, Offset);
+
+ // Now it fits in the new leaf.
+ treeInsert(a, b, y);
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::treeInsert(KeyT a, KeyT b, ValT y) {
+ using namespace IntervalMapImpl;
+ Path &P = this->path;
+
+ if (!P.valid())
+ P.legalizeForInsert(this->map->height);
+
+ // Check if this insertion will extend the node to the left.
+ if (P.leafOffset() == 0 && Traits::startLess(a, P.leaf<Leaf>().start(0))) {
+ // Node is growing to the left, will it affect a left sibling node?
+ if (NodeRef Sib = P.getLeftSibling(P.height())) {
+ Leaf &SibLeaf = Sib.get<Leaf>();
+ unsigned SibOfs = Sib.size() - 1;
+ if (SibLeaf.value(SibOfs) == y &&
+ Traits::adjacent(SibLeaf.stop(SibOfs), a)) {
+ // This insertion will coalesce with the last entry in SibLeaf. We can
+ // handle it in two ways:
+ // 1. Extend SibLeaf.stop to b and be done, or
+ // 2. Extend a to SibLeaf, erase the SibLeaf entry and continue.
+ // We prefer 1., but need 2 when coalescing to the right as well.
+ Leaf &CurLeaf = P.leaf<Leaf>();
+ P.moveLeft(P.height());
+ if (Traits::stopLess(b, CurLeaf.start(0)) &&
+ (y != CurLeaf.value(0) || !Traits::adjacent(b, CurLeaf.start(0)))) {
+ // Easy, just extend SibLeaf and we're done.
+ setNodeStop(P.height(), SibLeaf.stop(SibOfs) = b);
+ return;
+ } else {
+ // We have both left and right coalescing. Erase the old SibLeaf entry
+ // and continue inserting the larger interval.
+ a = SibLeaf.start(SibOfs);
+ treeErase(/* UpdateRoot= */false);
+ }
+ }
+ } else {
+ // No left sibling means we are at begin(). Update cached bound.
+ this->map->rootBranchStart() = a;
+ }
+ }
+
+ // When we are inserting at the end of a leaf node, we must update stops.
+ unsigned Size = P.leafSize();
+ bool Grow = P.leafOffset() == Size;
+ Size = P.leaf<Leaf>().insertFrom(P.leafOffset(), Size, a, b, y);
+
+ // Leaf insertion unsuccessful? Overflow and try again.
+ if (Size > Leaf::Capacity) {
+ overflow<Leaf>(P.height());
+ Grow = P.leafOffset() == P.leafSize();
+ Size = P.leaf<Leaf>().insertFrom(P.leafOffset(), P.leafSize(), a, b, y);
+ assert(Size <= Leaf::Capacity && "overflow() didn't make room");
+ }
+
+ // Inserted, update offset and leaf size.
+ P.setSize(P.height(), Size);
+
+ // Insert was the last node entry, update stops.
+ if (Grow)
+ setNodeStop(P.height(), b);
+}
+
+/// erase - erase the current interval and move to the next position.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::erase() {
+ IntervalMap &IM = *this->map;
+ IntervalMapImpl::Path &P = this->path;
+ assert(P.valid() && "Cannot erase end()");
+ if (this->branched())
+ return treeErase();
+ IM.rootLeaf().erase(P.leafOffset(), IM.rootSize);
+ P.setSize(0, --IM.rootSize);
+}
+
+/// treeErase - erase() for a branched tree.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::treeErase(bool UpdateRoot) {
+ IntervalMap &IM = *this->map;
+ IntervalMapImpl::Path &P = this->path;
+ Leaf &Node = P.leaf<Leaf>();
+
+ // Nodes are not allowed to become empty.
+ if (P.leafSize() == 1) {
+ IM.deleteNode(&Node);
+ eraseNode(IM.height);
+ // Update rootBranchStart if we erased begin().
+ if (UpdateRoot && IM.branched() && P.valid() && P.atBegin())
+ IM.rootBranchStart() = P.leaf<Leaf>().start(0);
+ return;
+ }
+
+ // Erase current entry.
+ Node.erase(P.leafOffset(), P.leafSize());
+ unsigned NewSize = P.leafSize() - 1;
+ P.setSize(IM.height, NewSize);
+ // When we erase the last entry, update stop and move to a legal position.
+ if (P.leafOffset() == NewSize) {
+ setNodeStop(IM.height, Node.stop(NewSize - 1));
+ P.moveRight(IM.height);
+ } else if (UpdateRoot && P.atBegin())
+ IM.rootBranchStart() = P.leaf<Leaf>().start(0);
+}
+
+/// eraseNode - Erase the current node at Level from its parent and move path to
+/// the first entry of the next sibling node.
+/// The node must be deallocated by the caller.
+/// @param Level 1..height, the root node cannot be erased.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::eraseNode(unsigned Level) {
+ assert(Level && "Cannot erase root node");
+ IntervalMap &IM = *this->map;
+ IntervalMapImpl::Path &P = this->path;
+
+ if (--Level == 0) {
+ IM.rootBranch().erase(P.offset(0), IM.rootSize);
+ P.setSize(0, --IM.rootSize);
+ // If this cleared the root, switch to height=0.
+ if (IM.empty()) {
+ IM.switchRootToLeaf();
+ this->setRoot(0);
+ return;
+ }
+ } else {
+ // Remove node ref from branch node at Level.
+ Branch &Parent = P.node<Branch>(Level);
+ if (P.size(Level) == 1) {
+ // Branch node became empty, remove it recursively.
+ IM.deleteNode(&Parent);
+ eraseNode(Level);
+ } else {
+ // Branch node won't become empty.
+ Parent.erase(P.offset(Level), P.size(Level));
+ unsigned NewSize = P.size(Level) - 1;
+ P.setSize(Level, NewSize);
+ // If we removed the last branch, update stop and move to a legal pos.
+ if (P.offset(Level) == NewSize) {
+ setNodeStop(Level, Parent.stop(NewSize - 1));
+ P.moveRight(Level);
+ }
+ }
+ }
+ // Update path cache for the new right sibling position.
+ if (P.valid()) {
+ P.reset(Level + 1);
+ P.offset(Level + 1) = 0;
+ }
+}
+
+/// overflow - Distribute entries of the current node evenly among
+/// its siblings and ensure that the current node is not full.
+/// This may require allocating a new node.
+/// @tparam NodeT The type of node at Level (Leaf or Branch).
+/// @param Level path index of the overflowing node.
+/// @return True when the tree height was changed.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+template <typename NodeT>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::overflow(unsigned Level) {
+ using namespace IntervalMapImpl;
+ Path &P = this->path;
+ unsigned CurSize[4];
+ NodeT *Node[4];
+ unsigned Nodes = 0;
+ unsigned Elements = 0;
+ unsigned Offset = P.offset(Level);
+
+ // Do we have a left sibling?
+ NodeRef LeftSib = P.getLeftSibling(Level);
+ if (LeftSib) {
+ Offset += Elements = CurSize[Nodes] = LeftSib.size();
+ Node[Nodes++] = &LeftSib.get<NodeT>();
+ }
+
+ // Current node.
+ Elements += CurSize[Nodes] = P.size(Level);
+ Node[Nodes++] = &P.node<NodeT>(Level);
+
+ // Do we have a right sibling?
+ NodeRef RightSib = P.getRightSibling(Level);
+ if (RightSib) {
+ Elements += CurSize[Nodes] = RightSib.size();
+ Node[Nodes++] = &RightSib.get<NodeT>();
+ }
+
+ // Do we need to allocate a new node?
+ unsigned NewNode = 0;
+ if (Elements + 1 > Nodes * NodeT::Capacity) {
+ // Insert NewNode at the penultimate position, or after a single node.
+ NewNode = Nodes == 1 ? 1 : Nodes - 1;
+ CurSize[Nodes] = CurSize[NewNode];
+ Node[Nodes] = Node[NewNode];
+ CurSize[NewNode] = 0;
+ Node[NewNode] = this->map->template newNode<NodeT>();
+ ++Nodes;
+ }
+
+ // Compute the new element distribution.
+ unsigned NewSize[4];
+ IdxPair NewOffset = distribute(Nodes, Elements, NodeT::Capacity,
+ CurSize, NewSize, Offset, true);
+ adjustSiblingSizes(Node, Nodes, CurSize, NewSize);
+
+ // Move current location to the leftmost node.
+ if (LeftSib)
+ P.moveLeft(Level);
+
+ // Elements have been rearranged, now update node sizes and stops.
+ bool SplitRoot = false;
+ unsigned Pos = 0;
+ while (true) {
+ KeyT Stop = Node[Pos]->stop(NewSize[Pos]-1);
+ if (NewNode && Pos == NewNode) {
+ SplitRoot = insertNode(Level, NodeRef(Node[Pos], NewSize[Pos]), Stop);
+ Level += SplitRoot;
+ } else {
+ P.setSize(Level, NewSize[Pos]);
+ setNodeStop(Level, Stop);
+ }
+ if (Pos + 1 == Nodes)
+ break;
+ P.moveRight(Level);
+ ++Pos;
+ }
+
+ // Where was I? Find NewOffset.
+ while(Pos != NewOffset.first) {
+ P.moveLeft(Level);
+ --Pos;
+ }
+ P.offset(Level) = NewOffset.second;
+ return SplitRoot;
+}
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapOverlaps ----//
+//===----------------------------------------------------------------------===//
+
+/// IntervalMapOverlaps - Iterate over the overlaps of mapped intervals in two
+/// IntervalMaps. The maps may be different, but the KeyT and Traits types
+/// should be the same.
+///
+/// Typical uses:
+///
+/// 1. Test for overlap:
+/// bool overlap = IntervalMapOverlaps(a, b).valid();
+///
+/// 2. Enumerate overlaps:
+/// for (IntervalMapOverlaps I(a, b); I.valid() ; ++I) { ... }
+///
+template <typename MapA, typename MapB>
+class IntervalMapOverlaps {
+ using KeyType = typename MapA::KeyType;
+ using Traits = typename MapA::KeyTraits;
+
+ typename MapA::const_iterator posA;
+ typename MapB::const_iterator posB;
+
+ /// advance - Move posA and posB forward until reaching an overlap, or until
+ /// either meets end.
+ /// Don't move the iterators if they are already overlapping.
+ void advance() {
+ if (!valid())
+ return;
+
+ if (Traits::stopLess(posA.stop(), posB.start())) {
+ // A ends before B begins. Catch up.
+ posA.advanceTo(posB.start());
+ if (!posA.valid() || !Traits::stopLess(posB.stop(), posA.start()))
+ return;
+ } else if (Traits::stopLess(posB.stop(), posA.start())) {
+ // B ends before A begins. Catch up.
+ posB.advanceTo(posA.start());
+ if (!posB.valid() || !Traits::stopLess(posA.stop(), posB.start()))
+ return;
+ } else
+ // Already overlapping.
+ return;
+
+ while (true) {
+ // Make a.end > b.start.
+ posA.advanceTo(posB.start());
+ if (!posA.valid() || !Traits::stopLess(posB.stop(), posA.start()))
+ return;
+ // Make b.end > a.start.
+ posB.advanceTo(posA.start());
+ if (!posB.valid() || !Traits::stopLess(posA.stop(), posB.start()))
+ return;
+ }
+ }
+
+public:
+ /// IntervalMapOverlaps - Create an iterator for the overlaps of a and b.
+ IntervalMapOverlaps(const MapA &a, const MapB &b)
+ : posA(b.empty() ? a.end() : a.find(b.start())),
+ posB(posA.valid() ? b.find(posA.start()) : b.end()) { advance(); }
+
+ /// valid - Return true if iterator is at an overlap.
+ bool valid() const {
+ return posA.valid() && posB.valid();
+ }
+
+ /// a - access the left hand side in the overlap.
+ const typename MapA::const_iterator &a() const { return posA; }
+
+ /// b - access the right hand side in the overlap.
+ const typename MapB::const_iterator &b() const { return posB; }
+
+ /// start - Beginning of the overlapping interval.
+ KeyType start() const {
+ KeyType ak = a().start();
+ KeyType bk = b().start();
+ return Traits::startLess(ak, bk) ? bk : ak;
+ }
+
+ /// stop - End of the overlapping interval.
+ KeyType stop() const {
+ KeyType ak = a().stop();
+ KeyType bk = b().stop();
+ return Traits::startLess(ak, bk) ? ak : bk;
+ }
+
+ /// skipA - Move to the next overlap that doesn't involve a().
+ void skipA() {
+ ++posA;
+ advance();
+ }
+
+ /// skipB - Move to the next overlap that doesn't involve b().
+ void skipB() {
+ ++posB;
+ advance();
+ }
+
+ /// Preincrement - Move to the next overlap.
+ IntervalMapOverlaps &operator++() {
+ // Bump the iterator that ends first. The other one may have more overlaps.
+ if (Traits::startLess(posB.stop(), posA.stop()))
+ skipB();
+ else
+ skipA();
+ return *this;
+ }
+
+ /// advanceTo - Move to the first overlapping interval with
+ /// stopLess(x, stop()).
+ void advanceTo(KeyType x) {
+ if (!valid())
+ return;
+ // Make sure advanceTo sees monotonic keys.
+ if (Traits::stopLess(posA.stop(), x))
+ posA.advanceTo(x);
+ if (Traits::stopLess(posB.stop(), x))
+ posB.advanceTo(x);
+ advance();
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_INTERVALMAP_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/IntrusiveRefCntPtr.h b/contrib/libs/llvm14/include/llvm/ADT/IntrusiveRefCntPtr.h
new file mode 100644
index 0000000000..ba7d8498b1
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/IntrusiveRefCntPtr.h
@@ -0,0 +1,321 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//==- llvm/ADT/IntrusiveRefCntPtr.h - Smart Refcounting Pointer --*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the RefCountedBase, ThreadSafeRefCountedBase, and
+/// IntrusiveRefCntPtr classes.
+///
+/// IntrusiveRefCntPtr is a smart pointer to an object which maintains a
+/// reference count. (ThreadSafe)RefCountedBase is a mixin class that adds a
+/// refcount member variable and methods for updating the refcount. An object
+/// that inherits from (ThreadSafe)RefCountedBase deletes itself when its
+/// refcount hits zero.
+///
+/// For example:
+///
+/// ```
+/// class MyClass : public RefCountedBase<MyClass> {};
+///
+/// void foo() {
+/// // Constructing an IntrusiveRefCntPtr increases the pointee's refcount
+/// // by 1 (from 0 in this case).
+/// IntrusiveRefCntPtr<MyClass> Ptr1(new MyClass());
+///
+/// // Copying an IntrusiveRefCntPtr increases the pointee's refcount by 1.
+/// IntrusiveRefCntPtr<MyClass> Ptr2(Ptr1);
+///
+/// // Constructing an IntrusiveRefCntPtr has no effect on the object's
+/// // refcount. After a move, the moved-from pointer is null.
+/// IntrusiveRefCntPtr<MyClass> Ptr3(std::move(Ptr1));
+/// assert(Ptr1 == nullptr);
+///
+/// // Clearing an IntrusiveRefCntPtr decreases the pointee's refcount by 1.
+/// Ptr2.reset();
+///
+/// // The object deletes itself when we return from the function, because
+/// // Ptr3's destructor decrements its refcount to 0.
+/// }
+/// ```
+///
+/// You can use IntrusiveRefCntPtr with isa<T>(), dyn_cast<T>(), etc.:
+///
+/// ```
+/// IntrusiveRefCntPtr<MyClass> Ptr(new MyClass());
+/// OtherClass *Other = dyn_cast<OtherClass>(Ptr); // Ptr.get() not required
+/// ```
+///
+/// IntrusiveRefCntPtr works with any class that
+///
+/// - inherits from (ThreadSafe)RefCountedBase,
+/// - has Retain() and Release() methods, or
+/// - specializes IntrusiveRefCntPtrInfo.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INTRUSIVEREFCNTPTR_H
+#define LLVM_ADT_INTRUSIVEREFCNTPTR_H
+
+#include <atomic>
+#include <cassert>
+#include <cstddef>
+#include <memory>
+
+namespace llvm {
+
+/// A CRTP mixin class that adds reference counting to a type.
+///
+/// The lifetime of an object which inherits from RefCountedBase is managed by
+/// calls to Release() and Retain(), which increment and decrement the object's
+/// refcount, respectively. When a Release() call decrements the refcount to 0,
+/// the object deletes itself.
+template <class Derived> class RefCountedBase {
+ mutable unsigned RefCount = 0;
+
+protected:
+ RefCountedBase() = default;
+ RefCountedBase(const RefCountedBase &) {}
+ RefCountedBase &operator=(const RefCountedBase &) = delete;
+
+#ifndef NDEBUG
+ ~RefCountedBase() {
+ assert(RefCount == 0 &&
+ "Destruction occured when there are still references to this.");
+ }
+#else
+ // Default the destructor in release builds, A trivial destructor may enable
+ // better codegen.
+ ~RefCountedBase() = default;
+#endif
+
+public:
+ void Retain() const { ++RefCount; }
+
+ void Release() const {
+ assert(RefCount > 0 && "Reference count is already zero.");
+ if (--RefCount == 0)
+ delete static_cast<const Derived *>(this);
+ }
+};
+
+/// A thread-safe version of \c RefCountedBase.
+template <class Derived> class ThreadSafeRefCountedBase {
+ mutable std::atomic<int> RefCount{0};
+
+protected:
+ ThreadSafeRefCountedBase() = default;
+ ThreadSafeRefCountedBase(const ThreadSafeRefCountedBase &) {}
+ ThreadSafeRefCountedBase &
+ operator=(const ThreadSafeRefCountedBase &) = delete;
+
+#ifndef NDEBUG
+ ~ThreadSafeRefCountedBase() {
+ assert(RefCount == 0 &&
+ "Destruction occured when there are still references to this.");
+ }
+#else
+ // Default the destructor in release builds, A trivial destructor may enable
+ // better codegen.
+ ~ThreadSafeRefCountedBase() = default;
+#endif
+
+public:
+ void Retain() const { RefCount.fetch_add(1, std::memory_order_relaxed); }
+
+ void Release() const {
+ int NewRefCount = RefCount.fetch_sub(1, std::memory_order_acq_rel) - 1;
+ assert(NewRefCount >= 0 && "Reference count was already zero.");
+ if (NewRefCount == 0)
+ delete static_cast<const Derived *>(this);
+ }
+};
+
+/// Class you can specialize to provide custom retain/release functionality for
+/// a type.
+///
+/// Usually specializing this class is not necessary, as IntrusiveRefCntPtr
+/// works with any type which defines Retain() and Release() functions -- you
+/// can define those functions yourself if RefCountedBase doesn't work for you.
+///
+/// One case when you might want to specialize this type is if you have
+/// - Foo.h defines type Foo and includes Bar.h, and
+/// - Bar.h uses IntrusiveRefCntPtr<Foo> in inline functions.
+///
+/// Because Foo.h includes Bar.h, Bar.h can't include Foo.h in order to pull in
+/// the declaration of Foo. Without the declaration of Foo, normally Bar.h
+/// wouldn't be able to use IntrusiveRefCntPtr<Foo>, which wants to call
+/// T::Retain and T::Release.
+///
+/// To resolve this, Bar.h could include a third header, FooFwd.h, which
+/// forward-declares Foo and specializes IntrusiveRefCntPtrInfo<Foo>. Then
+/// Bar.h could use IntrusiveRefCntPtr<Foo>, although it still couldn't call any
+/// functions on Foo itself, because Foo would be an incomplete type.
+template <typename T> struct IntrusiveRefCntPtrInfo {
+ static void retain(T *obj) { obj->Retain(); }
+ static void release(T *obj) { obj->Release(); }
+};
+
+/// A smart pointer to a reference-counted object that inherits from
+/// RefCountedBase or ThreadSafeRefCountedBase.
+///
+/// This class increments its pointee's reference count when it is created, and
+/// decrements its refcount when it's destroyed (or is changed to point to a
+/// different object).
+template <typename T> class IntrusiveRefCntPtr {
+ T *Obj = nullptr;
+
+public:
+ using element_type = T;
+
+ explicit IntrusiveRefCntPtr() = default;
+ IntrusiveRefCntPtr(T *obj) : Obj(obj) { retain(); }
+ IntrusiveRefCntPtr(const IntrusiveRefCntPtr &S) : Obj(S.Obj) { retain(); }
+ IntrusiveRefCntPtr(IntrusiveRefCntPtr &&S) : Obj(S.Obj) { S.Obj = nullptr; }
+
+ template <class X,
+ std::enable_if_t<std::is_convertible<X *, T *>::value, bool> = true>
+ IntrusiveRefCntPtr(IntrusiveRefCntPtr<X> S) : Obj(S.get()) {
+ S.Obj = nullptr;
+ }
+
+ template <class X,
+ std::enable_if_t<std::is_convertible<X *, T *>::value, bool> = true>
+ IntrusiveRefCntPtr(std::unique_ptr<X> S) : Obj(S.release()) {
+ retain();
+ }
+
+ ~IntrusiveRefCntPtr() { release(); }
+
+ IntrusiveRefCntPtr &operator=(IntrusiveRefCntPtr S) {
+ swap(S);
+ return *this;
+ }
+
+ T &operator*() const { return *Obj; }
+ T *operator->() const { return Obj; }
+ T *get() const { return Obj; }
+ explicit operator bool() const { return Obj; }
+
+ void swap(IntrusiveRefCntPtr &other) {
+ T *tmp = other.Obj;
+ other.Obj = Obj;
+ Obj = tmp;
+ }
+
+ void reset() {
+ release();
+ Obj = nullptr;
+ }
+
+ void resetWithoutRelease() { Obj = nullptr; }
+
+private:
+ void retain() {
+ if (Obj)
+ IntrusiveRefCntPtrInfo<T>::retain(Obj);
+ }
+
+ void release() {
+ if (Obj)
+ IntrusiveRefCntPtrInfo<T>::release(Obj);
+ }
+
+ template <typename X> friend class IntrusiveRefCntPtr;
+};
+
+template <class T, class U>
+inline bool operator==(const IntrusiveRefCntPtr<T> &A,
+ const IntrusiveRefCntPtr<U> &B) {
+ return A.get() == B.get();
+}
+
+template <class T, class U>
+inline bool operator!=(const IntrusiveRefCntPtr<T> &A,
+ const IntrusiveRefCntPtr<U> &B) {
+ return A.get() != B.get();
+}
+
+template <class T, class U>
+inline bool operator==(const IntrusiveRefCntPtr<T> &A, U *B) {
+ return A.get() == B;
+}
+
+template <class T, class U>
+inline bool operator!=(const IntrusiveRefCntPtr<T> &A, U *B) {
+ return A.get() != B;
+}
+
+template <class T, class U>
+inline bool operator==(T *A, const IntrusiveRefCntPtr<U> &B) {
+ return A == B.get();
+}
+
+template <class T, class U>
+inline bool operator!=(T *A, const IntrusiveRefCntPtr<U> &B) {
+ return A != B.get();
+}
+
+template <class T>
+bool operator==(std::nullptr_t, const IntrusiveRefCntPtr<T> &B) {
+ return !B;
+}
+
+template <class T>
+bool operator==(const IntrusiveRefCntPtr<T> &A, std::nullptr_t B) {
+ return B == A;
+}
+
+template <class T>
+bool operator!=(std::nullptr_t A, const IntrusiveRefCntPtr<T> &B) {
+ return !(A == B);
+}
+
+template <class T>
+bool operator!=(const IntrusiveRefCntPtr<T> &A, std::nullptr_t B) {
+ return !(A == B);
+}
+
+// Make IntrusiveRefCntPtr work with dyn_cast, isa, and the other idioms from
+// Casting.h.
+template <typename From> struct simplify_type;
+
+template <class T> struct simplify_type<IntrusiveRefCntPtr<T>> {
+ using SimpleType = T *;
+
+ static SimpleType getSimplifiedValue(IntrusiveRefCntPtr<T> &Val) {
+ return Val.get();
+ }
+};
+
+template <class T> struct simplify_type<const IntrusiveRefCntPtr<T>> {
+ using SimpleType = /*const*/ T *;
+
+ static SimpleType getSimplifiedValue(const IntrusiveRefCntPtr<T> &Val) {
+ return Val.get();
+ }
+};
+
+/// Factory function for creating intrusive ref counted pointers.
+template <typename T, typename... Args>
+IntrusiveRefCntPtr<T> makeIntrusiveRefCnt(Args &&...A) {
+ return IntrusiveRefCntPtr<T>(new T(std::forward<Args>(A)...));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_INTRUSIVEREFCNTPTR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/MapVector.h b/contrib/libs/llvm14/include/llvm/ADT/MapVector.h
new file mode 100644
index 0000000000..491371d9a9
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/MapVector.h
@@ -0,0 +1,251 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/MapVector.h - Map w/ deterministic value order --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements a map that provides insertion order iteration. The
+/// interface is purposefully minimal. The key is assumed to be cheap to copy
+/// and 2 copies are kept, one for indexing in a DenseMap, one for iteration in
+/// a std::vector.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_MAPVECTOR_H
+#define LLVM_ADT_MAPVECTOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+/// This class implements a map that also provides access to all stored values
+/// in a deterministic order. The values are kept in a std::vector and the
+/// mapping is done with DenseMap from Keys to indexes in that vector.
+template<typename KeyT, typename ValueT,
+ typename MapType = DenseMap<KeyT, unsigned>,
+ typename VectorType = std::vector<std::pair<KeyT, ValueT>>>
+class MapVector {
+ MapType Map;
+ VectorType Vector;
+
+ static_assert(
+ std::is_integral<typename MapType::mapped_type>::value,
+ "The mapped_type of the specified Map must be an integral type");
+
+public:
+ using key_type = KeyT;
+ using value_type = typename VectorType::value_type;
+ using size_type = typename VectorType::size_type;
+
+ using iterator = typename VectorType::iterator;
+ using const_iterator = typename VectorType::const_iterator;
+ using reverse_iterator = typename VectorType::reverse_iterator;
+ using const_reverse_iterator = typename VectorType::const_reverse_iterator;
+
+ /// Clear the MapVector and return the underlying vector.
+ VectorType takeVector() {
+ Map.clear();
+ return std::move(Vector);
+ }
+
+ size_type size() const { return Vector.size(); }
+
+ /// Grow the MapVector so that it can contain at least \p NumEntries items
+ /// before resizing again.
+ void reserve(size_type NumEntries) {
+ Map.reserve(NumEntries);
+ Vector.reserve(NumEntries);
+ }
+
+ iterator begin() { return Vector.begin(); }
+ const_iterator begin() const { return Vector.begin(); }
+ iterator end() { return Vector.end(); }
+ const_iterator end() const { return Vector.end(); }
+
+ reverse_iterator rbegin() { return Vector.rbegin(); }
+ const_reverse_iterator rbegin() const { return Vector.rbegin(); }
+ reverse_iterator rend() { return Vector.rend(); }
+ const_reverse_iterator rend() const { return Vector.rend(); }
+
+ bool empty() const {
+ return Vector.empty();
+ }
+
+ std::pair<KeyT, ValueT> &front() { return Vector.front(); }
+ const std::pair<KeyT, ValueT> &front() const { return Vector.front(); }
+ std::pair<KeyT, ValueT> &back() { return Vector.back(); }
+ const std::pair<KeyT, ValueT> &back() const { return Vector.back(); }
+
+ void clear() {
+ Map.clear();
+ Vector.clear();
+ }
+
+ void swap(MapVector &RHS) {
+ std::swap(Map, RHS.Map);
+ std::swap(Vector, RHS.Vector);
+ }
+
+ ValueT &operator[](const KeyT &Key) {
+ std::pair<KeyT, typename MapType::mapped_type> Pair = std::make_pair(Key, 0);
+ std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair);
+ auto &I = Result.first->second;
+ if (Result.second) {
+ Vector.push_back(std::make_pair(Key, ValueT()));
+ I = Vector.size() - 1;
+ }
+ return Vector[I].second;
+ }
+
+ // Returns a copy of the value. Only allowed if ValueT is copyable.
+ ValueT lookup(const KeyT &Key) const {
+ static_assert(std::is_copy_constructible<ValueT>::value,
+ "Cannot call lookup() if ValueT is not copyable.");
+ typename MapType::const_iterator Pos = Map.find(Key);
+ return Pos == Map.end()? ValueT() : Vector[Pos->second].second;
+ }
+
+ std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
+ std::pair<KeyT, typename MapType::mapped_type> Pair = std::make_pair(KV.first, 0);
+ std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair);
+ auto &I = Result.first->second;
+ if (Result.second) {
+ Vector.push_back(std::make_pair(KV.first, KV.second));
+ I = Vector.size() - 1;
+ return std::make_pair(std::prev(end()), true);
+ }
+ return std::make_pair(begin() + I, false);
+ }
+
+ std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
+ // Copy KV.first into the map, then move it into the vector.
+ std::pair<KeyT, typename MapType::mapped_type> Pair = std::make_pair(KV.first, 0);
+ std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair);
+ auto &I = Result.first->second;
+ if (Result.second) {
+ Vector.push_back(std::move(KV));
+ I = Vector.size() - 1;
+ return std::make_pair(std::prev(end()), true);
+ }
+ return std::make_pair(begin() + I, false);
+ }
+
+ size_type count(const KeyT &Key) const {
+ typename MapType::const_iterator Pos = Map.find(Key);
+ return Pos == Map.end()? 0 : 1;
+ }
+
+ iterator find(const KeyT &Key) {
+ typename MapType::const_iterator Pos = Map.find(Key);
+ return Pos == Map.end()? Vector.end() :
+ (Vector.begin() + Pos->second);
+ }
+
+ const_iterator find(const KeyT &Key) const {
+ typename MapType::const_iterator Pos = Map.find(Key);
+ return Pos == Map.end()? Vector.end() :
+ (Vector.begin() + Pos->second);
+ }
+
+ /// Remove the last element from the vector.
+ void pop_back() {
+ typename MapType::iterator Pos = Map.find(Vector.back().first);
+ Map.erase(Pos);
+ Vector.pop_back();
+ }
+
+ /// Remove the element given by Iterator.
+ ///
+ /// Returns an iterator to the element following the one which was removed,
+ /// which may be end().
+ ///
+ /// \note This is a deceivingly expensive operation (linear time). It's
+ /// usually better to use \a remove_if() if possible.
+ typename VectorType::iterator erase(typename VectorType::iterator Iterator) {
+ Map.erase(Iterator->first);
+ auto Next = Vector.erase(Iterator);
+ if (Next == Vector.end())
+ return Next;
+
+ // Update indices in the map.
+ size_t Index = Next - Vector.begin();
+ for (auto &I : Map) {
+ assert(I.second != Index && "Index was already erased!");
+ if (I.second > Index)
+ --I.second;
+ }
+ return Next;
+ }
+
+ /// Remove all elements with the key value Key.
+ ///
+ /// Returns the number of elements removed.
+ size_type erase(const KeyT &Key) {
+ auto Iterator = find(Key);
+ if (Iterator == end())
+ return 0;
+ erase(Iterator);
+ return 1;
+ }
+
+ /// Remove the elements that match the predicate.
+ ///
+ /// Erase all elements that match \c Pred in a single pass. Takes linear
+ /// time.
+ template <class Predicate> void remove_if(Predicate Pred);
+};
+
+template <typename KeyT, typename ValueT, typename MapType, typename VectorType>
+template <class Function>
+void MapVector<KeyT, ValueT, MapType, VectorType>::remove_if(Function Pred) {
+ auto O = Vector.begin();
+ for (auto I = O, E = Vector.end(); I != E; ++I) {
+ if (Pred(*I)) {
+ // Erase from the map.
+ Map.erase(I->first);
+ continue;
+ }
+
+ if (I != O) {
+ // Move the value and update the index in the map.
+ *O = std::move(*I);
+ Map[O->first] = O - Vector.begin();
+ }
+ ++O;
+ }
+ // Erase trailing entries in the vector.
+ Vector.erase(O, Vector.end());
+}
+
+/// A MapVector that performs no allocations if smaller than a certain
+/// size.
+template <typename KeyT, typename ValueT, unsigned N>
+struct SmallMapVector
+ : MapVector<KeyT, ValueT, SmallDenseMap<KeyT, unsigned, N>,
+ SmallVector<std::pair<KeyT, ValueT>, N>> {
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_MAPVECTOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/None.h b/contrib/libs/llvm14/include/llvm/ADT/None.h
new file mode 100644
index 0000000000..3e4fcfde0b
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/None.h
@@ -0,0 +1,38 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- None.h - Simple null value for implicit construction ------*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file provides None, an enumerator for use in implicit constructors
+/// of various (usually templated) types to make such construction more
+/// terse.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_NONE_H
+#define LLVM_ADT_NONE_H
+
+namespace llvm {
+/// A simple null object to allow implicit construction of Optional<T>
+/// and similar types without having to spell out the specialization's name.
+// (constant value 1 in an attempt to workaround MSVC build issue... )
+enum class NoneType { None = 1 };
+const NoneType None = NoneType::None;
+}
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/Optional.h b/contrib/libs/llvm14/include/llvm/ADT/Optional.h
new file mode 100644
index 0000000000..fa4a7fc94f
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/Optional.h
@@ -0,0 +1,508 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- Optional.h - Simple variant for passing optional values --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file provides Optional, a template class modeled in the spirit of
+/// OCaml's 'opt' variant. The idea is to strongly type whether or not
+/// a value can be optional.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_OPTIONAL_H
+#define LLVM_ADT_OPTIONAL_H
+
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/STLForwardCompat.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/type_traits.h"
+#include <cassert>
+#include <new>
+#include <utility>
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace optional_detail {
+
+/// Storage for any type.
+//
+// The specialization condition intentionally uses
+// llvm::is_trivially_{copy/move}_constructible instead of
+// std::is_trivially_{copy/move}_constructible. GCC versions prior to 7.4 may
+// instantiate the copy/move constructor of `T` when
+// std::is_trivially_{copy/move}_constructible is instantiated. This causes
+// compilation to fail if we query the trivially copy/move constructible
+// property of a class which is not copy/move constructible.
+//
+// The current implementation of OptionalStorage insists that in order to use
+// the trivial specialization, the value_type must be trivially copy
+// constructible and trivially copy assignable due to =default implementations
+// of the copy/move constructor/assignment. It does not follow that this is
+// necessarily the case std::is_trivially_copyable is true (hence the expanded
+// specialization condition).
+//
+// The move constructible / assignable conditions emulate the remaining behavior
+// of std::is_trivially_copyable.
+template <typename T,
+ bool = (llvm::is_trivially_copy_constructible<T>::value &&
+ std::is_trivially_copy_assignable<T>::value &&
+ (llvm::is_trivially_move_constructible<T>::value ||
+ !std::is_move_constructible<T>::value) &&
+ (std::is_trivially_move_assignable<T>::value ||
+ !std::is_move_assignable<T>::value))>
+class OptionalStorage {
+ union {
+ char empty;
+ T value;
+ };
+ bool hasVal;
+
+public:
+ ~OptionalStorage() { reset(); }
+
+ constexpr OptionalStorage() noexcept : empty(), hasVal(false) {}
+
+ constexpr OptionalStorage(OptionalStorage const &other) : OptionalStorage() {
+ if (other.hasValue()) {
+ emplace(other.value);
+ }
+ }
+ constexpr OptionalStorage(OptionalStorage &&other) : OptionalStorage() {
+ if (other.hasValue()) {
+ emplace(std::move(other.value));
+ }
+ }
+
+ template <class... Args>
+ constexpr explicit OptionalStorage(in_place_t, Args &&... args)
+ : value(std::forward<Args>(args)...), hasVal(true) {}
+
+ void reset() noexcept {
+ if (hasVal) {
+ value.~T();
+ hasVal = false;
+ }
+ }
+
+ constexpr bool hasValue() const noexcept { return hasVal; }
+
+ T &getValue() LLVM_LVALUE_FUNCTION noexcept {
+ assert(hasVal);
+ return value;
+ }
+ constexpr T const &getValue() const LLVM_LVALUE_FUNCTION noexcept {
+ assert(hasVal);
+ return value;
+ }
+#if LLVM_HAS_RVALUE_REFERENCE_THIS
+ T &&getValue() && noexcept {
+ assert(hasVal);
+ return std::move(value);
+ }
+#endif
+
+ template <class... Args> void emplace(Args &&... args) {
+ reset();
+ ::new ((void *)std::addressof(value)) T(std::forward<Args>(args)...);
+ hasVal = true;
+ }
+
+ OptionalStorage &operator=(T const &y) {
+ if (hasValue()) {
+ value = y;
+ } else {
+ ::new ((void *)std::addressof(value)) T(y);
+ hasVal = true;
+ }
+ return *this;
+ }
+ OptionalStorage &operator=(T &&y) {
+ if (hasValue()) {
+ value = std::move(y);
+ } else {
+ ::new ((void *)std::addressof(value)) T(std::move(y));
+ hasVal = true;
+ }
+ return *this;
+ }
+
+ OptionalStorage &operator=(OptionalStorage const &other) {
+ if (other.hasValue()) {
+ if (hasValue()) {
+ value = other.value;
+ } else {
+ ::new ((void *)std::addressof(value)) T(other.value);
+ hasVal = true;
+ }
+ } else {
+ reset();
+ }
+ return *this;
+ }
+
+ OptionalStorage &operator=(OptionalStorage &&other) {
+ if (other.hasValue()) {
+ if (hasValue()) {
+ value = std::move(other.value);
+ } else {
+ ::new ((void *)std::addressof(value)) T(std::move(other.value));
+ hasVal = true;
+ }
+ } else {
+ reset();
+ }
+ return *this;
+ }
+};
+
+template <typename T> class OptionalStorage<T, true> {
+ union {
+ char empty;
+ T value;
+ };
+ bool hasVal = false;
+
+public:
+ ~OptionalStorage() = default;
+
+ constexpr OptionalStorage() noexcept : empty{} {}
+
+ constexpr OptionalStorage(OptionalStorage const &other) = default;
+ constexpr OptionalStorage(OptionalStorage &&other) = default;
+
+ OptionalStorage &operator=(OptionalStorage const &other) = default;
+ OptionalStorage &operator=(OptionalStorage &&other) = default;
+
+ template <class... Args>
+ constexpr explicit OptionalStorage(in_place_t, Args &&... args)
+ : value(std::forward<Args>(args)...), hasVal(true) {}
+
+ void reset() noexcept {
+ if (hasVal) {
+ value.~T();
+ hasVal = false;
+ }
+ }
+
+ constexpr bool hasValue() const noexcept { return hasVal; }
+
+ T &getValue() LLVM_LVALUE_FUNCTION noexcept {
+ assert(hasVal);
+ return value;
+ }
+ constexpr T const &getValue() const LLVM_LVALUE_FUNCTION noexcept {
+ assert(hasVal);
+ return value;
+ }
+#if LLVM_HAS_RVALUE_REFERENCE_THIS
+ T &&getValue() && noexcept {
+ assert(hasVal);
+ return std::move(value);
+ }
+#endif
+
+ template <class... Args> void emplace(Args &&... args) {
+ reset();
+ ::new ((void *)std::addressof(value)) T(std::forward<Args>(args)...);
+ hasVal = true;
+ }
+
+ OptionalStorage &operator=(T const &y) {
+ if (hasValue()) {
+ value = y;
+ } else {
+ ::new ((void *)std::addressof(value)) T(y);
+ hasVal = true;
+ }
+ return *this;
+ }
+ OptionalStorage &operator=(T &&y) {
+ if (hasValue()) {
+ value = std::move(y);
+ } else {
+ ::new ((void *)std::addressof(value)) T(std::move(y));
+ hasVal = true;
+ }
+ return *this;
+ }
+};
+
+} // namespace optional_detail
+
+template <typename T> class Optional {
+ optional_detail::OptionalStorage<T> Storage;
+
+public:
+ using value_type = T;
+
+ constexpr Optional() = default;
+ constexpr Optional(NoneType) {}
+
+ constexpr Optional(const T &y) : Storage(in_place, y) {}
+ constexpr Optional(const Optional &O) = default;
+
+ constexpr Optional(T &&y) : Storage(in_place, std::move(y)) {}
+ constexpr Optional(Optional &&O) = default;
+
+ template <typename... ArgTypes>
+ constexpr Optional(in_place_t, ArgTypes &&...Args)
+ : Storage(in_place, std::forward<ArgTypes>(Args)...) {}
+
+ Optional &operator=(T &&y) {
+ Storage = std::move(y);
+ return *this;
+ }
+ Optional &operator=(Optional &&O) = default;
+
+ /// Create a new object by constructing it in place with the given arguments.
+ template <typename... ArgTypes> void emplace(ArgTypes &&... Args) {
+ Storage.emplace(std::forward<ArgTypes>(Args)...);
+ }
+
+ static constexpr Optional create(const T *y) {
+ return y ? Optional(*y) : Optional();
+ }
+
+ Optional &operator=(const T &y) {
+ Storage = y;
+ return *this;
+ }
+ Optional &operator=(const Optional &O) = default;
+
+ void reset() { Storage.reset(); }
+
+ constexpr const T *getPointer() const { return &Storage.getValue(); }
+ T *getPointer() { return &Storage.getValue(); }
+ constexpr const T &getValue() const LLVM_LVALUE_FUNCTION {
+ return Storage.getValue();
+ }
+ T &getValue() LLVM_LVALUE_FUNCTION { return Storage.getValue(); }
+
+ constexpr explicit operator bool() const { return hasValue(); }
+ constexpr bool hasValue() const { return Storage.hasValue(); }
+ constexpr const T *operator->() const { return getPointer(); }
+ T *operator->() { return getPointer(); }
+ constexpr const T &operator*() const LLVM_LVALUE_FUNCTION {
+ return getValue();
+ }
+ T &operator*() LLVM_LVALUE_FUNCTION { return getValue(); }
+
+ template <typename U>
+ constexpr T getValueOr(U &&value) const LLVM_LVALUE_FUNCTION {
+ return hasValue() ? getValue() : std::forward<U>(value);
+ }
+
+ /// Apply a function to the value if present; otherwise return None.
+ template <class Function>
+ auto map(const Function &F) const LLVM_LVALUE_FUNCTION
+ -> Optional<decltype(F(getValue()))> {
+ if (*this) return F(getValue());
+ return None;
+ }
+
+#if LLVM_HAS_RVALUE_REFERENCE_THIS
+ T &&getValue() && { return std::move(Storage.getValue()); }
+ T &&operator*() && { return std::move(Storage.getValue()); }
+
+ template <typename U>
+ T getValueOr(U &&value) && {
+ return hasValue() ? std::move(getValue()) : std::forward<U>(value);
+ }
+
+ /// Apply a function to the value if present; otherwise return None.
+ template <class Function>
+ auto map(const Function &F) &&
+ -> Optional<decltype(F(std::move(*this).getValue()))> {
+ if (*this) return F(std::move(*this).getValue());
+ return None;
+ }
+#endif
+};
+
+template <class T> llvm::hash_code hash_value(const Optional<T> &O) {
+ return O ? hash_combine(true, *O) : hash_value(false);
+}
+
+template <typename T, typename U>
+constexpr bool operator==(const Optional<T> &X, const Optional<U> &Y) {
+ if (X && Y)
+ return *X == *Y;
+ return X.hasValue() == Y.hasValue();
+}
+
+template <typename T, typename U>
+constexpr bool operator!=(const Optional<T> &X, const Optional<U> &Y) {
+ return !(X == Y);
+}
+
+template <typename T, typename U>
+constexpr bool operator<(const Optional<T> &X, const Optional<U> &Y) {
+ if (X && Y)
+ return *X < *Y;
+ return X.hasValue() < Y.hasValue();
+}
+
+template <typename T, typename U>
+constexpr bool operator<=(const Optional<T> &X, const Optional<U> &Y) {
+ return !(Y < X);
+}
+
+template <typename T, typename U>
+constexpr bool operator>(const Optional<T> &X, const Optional<U> &Y) {
+ return Y < X;
+}
+
+template <typename T, typename U>
+constexpr bool operator>=(const Optional<T> &X, const Optional<U> &Y) {
+ return !(X < Y);
+}
+
+template <typename T>
+constexpr bool operator==(const Optional<T> &X, NoneType) {
+ return !X;
+}
+
+template <typename T>
+constexpr bool operator==(NoneType, const Optional<T> &X) {
+ return X == None;
+}
+
+template <typename T>
+constexpr bool operator!=(const Optional<T> &X, NoneType) {
+ return !(X == None);
+}
+
+template <typename T>
+constexpr bool operator!=(NoneType, const Optional<T> &X) {
+ return X != None;
+}
+
+template <typename T> constexpr bool operator<(const Optional<T> &, NoneType) {
+ return false;
+}
+
+template <typename T> constexpr bool operator<(NoneType, const Optional<T> &X) {
+ return X.hasValue();
+}
+
+template <typename T>
+constexpr bool operator<=(const Optional<T> &X, NoneType) {
+ return !(None < X);
+}
+
+template <typename T>
+constexpr bool operator<=(NoneType, const Optional<T> &X) {
+ return !(X < None);
+}
+
+template <typename T> constexpr bool operator>(const Optional<T> &X, NoneType) {
+ return None < X;
+}
+
+template <typename T> constexpr bool operator>(NoneType, const Optional<T> &X) {
+ return X < None;
+}
+
+template <typename T>
+constexpr bool operator>=(const Optional<T> &X, NoneType) {
+ return None <= X;
+}
+
+template <typename T>
+constexpr bool operator>=(NoneType, const Optional<T> &X) {
+ return X <= None;
+}
+
+template <typename T>
+constexpr bool operator==(const Optional<T> &X, const T &Y) {
+ return X && *X == Y;
+}
+
+template <typename T>
+constexpr bool operator==(const T &X, const Optional<T> &Y) {
+ return Y && X == *Y;
+}
+
+template <typename T>
+constexpr bool operator!=(const Optional<T> &X, const T &Y) {
+ return !(X == Y);
+}
+
+template <typename T>
+constexpr bool operator!=(const T &X, const Optional<T> &Y) {
+ return !(X == Y);
+}
+
+template <typename T>
+constexpr bool operator<(const Optional<T> &X, const T &Y) {
+ return !X || *X < Y;
+}
+
+template <typename T>
+constexpr bool operator<(const T &X, const Optional<T> &Y) {
+ return Y && X < *Y;
+}
+
+template <typename T>
+constexpr bool operator<=(const Optional<T> &X, const T &Y) {
+ return !(Y < X);
+}
+
+template <typename T>
+constexpr bool operator<=(const T &X, const Optional<T> &Y) {
+ return !(Y < X);
+}
+
+template <typename T>
+constexpr bool operator>(const Optional<T> &X, const T &Y) {
+ return Y < X;
+}
+
+template <typename T>
+constexpr bool operator>(const T &X, const Optional<T> &Y) {
+ return Y < X;
+}
+
+template <typename T>
+constexpr bool operator>=(const Optional<T> &X, const T &Y) {
+ return !(X < Y);
+}
+
+template <typename T>
+constexpr bool operator>=(const T &X, const Optional<T> &Y) {
+ return !(X < Y);
+}
+
+raw_ostream &operator<<(raw_ostream &OS, NoneType);
+
+template <typename T, typename = decltype(std::declval<raw_ostream &>()
+ << std::declval<const T &>())>
+raw_ostream &operator<<(raw_ostream &OS, const Optional<T> &O) {
+ if (O)
+ OS << *O;
+ else
+ OS << None;
+ return OS;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_OPTIONAL_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/PackedVector.h b/contrib/libs/llvm14/include/llvm/ADT/PackedVector.h
new file mode 100644
index 0000000000..b216dbd828
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/PackedVector.h
@@ -0,0 +1,162 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/PackedVector.h - Packed values vector -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the PackedVector class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_PACKEDVECTOR_H
+#define LLVM_ADT_PACKEDVECTOR_H
+
+#include "llvm/ADT/BitVector.h"
+#include <cassert>
+#include <limits>
+
+namespace llvm {
+
+template <typename T, unsigned BitNum, typename BitVectorTy, bool isSigned>
+class PackedVectorBase;
+
+// This won't be necessary if we can specialize members without specializing
+// the parent template.
+template <typename T, unsigned BitNum, typename BitVectorTy>
+class PackedVectorBase<T, BitNum, BitVectorTy, false> {
+protected:
+ static T getValue(const BitVectorTy &Bits, unsigned Idx) {
+ T val = T();
+ for (unsigned i = 0; i != BitNum; ++i)
+ val = T(val | ((Bits[(Idx << (BitNum-1)) + i] ? 1UL : 0UL) << i));
+ return val;
+ }
+
+ static void setValue(BitVectorTy &Bits, unsigned Idx, T val) {
+ assert((val >> BitNum) == 0 && "value is too big");
+ for (unsigned i = 0; i != BitNum; ++i)
+ Bits[(Idx << (BitNum-1)) + i] = val & (T(1) << i);
+ }
+};
+
+template <typename T, unsigned BitNum, typename BitVectorTy>
+class PackedVectorBase<T, BitNum, BitVectorTy, true> {
+protected:
+ static T getValue(const BitVectorTy &Bits, unsigned Idx) {
+ T val = T();
+ for (unsigned i = 0; i != BitNum-1; ++i)
+ val = T(val | ((Bits[(Idx << (BitNum-1)) + i] ? 1UL : 0UL) << i));
+ if (Bits[(Idx << (BitNum-1)) + BitNum-1])
+ val = ~val;
+ return val;
+ }
+
+ static void setValue(BitVectorTy &Bits, unsigned Idx, T val) {
+ if (val < 0) {
+ val = ~val;
+ Bits.set((Idx << (BitNum-1)) + BitNum-1);
+ }
+ assert((val >> (BitNum-1)) == 0 && "value is too big");
+ for (unsigned i = 0; i != BitNum-1; ++i)
+ Bits[(Idx << (BitNum-1)) + i] = val & (T(1) << i);
+ }
+};
+
+/// Store a vector of values using a specific number of bits for each
+/// value. Both signed and unsigned types can be used, e.g
+/// @code
+/// PackedVector<signed, 2> vec;
+/// @endcode
+/// will create a vector accepting values -2, -1, 0, 1. Any other value will hit
+/// an assertion.
+template <typename T, unsigned BitNum, typename BitVectorTy = BitVector>
+class PackedVector : public PackedVectorBase<T, BitNum, BitVectorTy,
+ std::numeric_limits<T>::is_signed> {
+ BitVectorTy Bits;
+ using base = PackedVectorBase<T, BitNum, BitVectorTy,
+ std::numeric_limits<T>::is_signed>;
+
+public:
+ class reference {
+ PackedVector &Vec;
+ const unsigned Idx;
+
+ public:
+ reference() = delete;
+ reference(PackedVector &vec, unsigned idx) : Vec(vec), Idx(idx) {}
+
+ reference &operator=(T val) {
+ Vec.setValue(Vec.Bits, Idx, val);
+ return *this;
+ }
+
+ operator T() const {
+ return Vec.getValue(Vec.Bits, Idx);
+ }
+ };
+
+ PackedVector() = default;
+ explicit PackedVector(unsigned size) : Bits(size << (BitNum-1)) {}
+
+ bool empty() const { return Bits.empty(); }
+
+ unsigned size() const { return Bits.size() >> (BitNum - 1); }
+
+ void clear() { Bits.clear(); }
+
+ void resize(unsigned N) { Bits.resize(N << (BitNum - 1)); }
+
+ void reserve(unsigned N) { Bits.reserve(N << (BitNum-1)); }
+
+ PackedVector &reset() {
+ Bits.reset();
+ return *this;
+ }
+
+ void push_back(T val) {
+ resize(size()+1);
+ (*this)[size()-1] = val;
+ }
+
+ reference operator[](unsigned Idx) {
+ return reference(*this, Idx);
+ }
+
+ T operator[](unsigned Idx) const {
+ return base::getValue(Bits, Idx);
+ }
+
+ bool operator==(const PackedVector &RHS) const {
+ return Bits == RHS.Bits;
+ }
+
+ bool operator!=(const PackedVector &RHS) const {
+ return Bits != RHS.Bits;
+ }
+
+ PackedVector &operator|=(const PackedVector &RHS) {
+ Bits |= RHS.Bits;
+ return *this;
+ }
+};
+
+// Leave BitNum=0 undefined.
+template <typename T> class PackedVector<T, 0>;
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_PACKEDVECTOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/PointerEmbeddedInt.h b/contrib/libs/llvm14/include/llvm/ADT/PointerEmbeddedInt.h
new file mode 100644
index 0000000000..98b87fb47c
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/PointerEmbeddedInt.h
@@ -0,0 +1,130 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/PointerEmbeddedInt.h ----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_POINTEREMBEDDEDINT_H
+#define LLVM_ADT_POINTEREMBEDDEDINT_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <cassert>
+#include <climits>
+#include <cstdint>
+#include <type_traits>
+
+namespace llvm {
+
+/// Utility to embed an integer into a pointer-like type. This is specifically
+/// intended to allow embedding integers where fewer bits are required than
+/// exist in a pointer, and the integer can participate in abstractions along
+/// side other pointer-like types. For example it can be placed into a \c
+/// PointerSumType or \c PointerUnion.
+///
+/// Note that much like pointers, an integer value of zero has special utility
+/// due to boolean conversions. For example, a non-null value can be tested for
+/// in the above abstractions without testing the particular active member.
+/// Also, the default constructed value zero initializes the integer.
+template <typename IntT, int Bits = sizeof(IntT) * CHAR_BIT>
+class PointerEmbeddedInt {
+ uintptr_t Value = 0;
+
+ // Note: This '<' is correct; using '<=' would result in some shifts
+ // overflowing their storage types.
+ static_assert(Bits < sizeof(uintptr_t) * CHAR_BIT,
+ "Cannot embed more bits than we have in a pointer!");
+
+ enum : uintptr_t {
+ // We shift as many zeros into the value as we can while preserving the
+ // number of bits desired for the integer.
+ Shift = sizeof(uintptr_t) * CHAR_BIT - Bits,
+
+ // We also want to be able to mask out the preserved bits for asserts.
+ Mask = static_cast<uintptr_t>(-1) << Bits
+ };
+
+ struct RawValueTag {
+ explicit RawValueTag() = default;
+ };
+
+ friend struct PointerLikeTypeTraits<PointerEmbeddedInt>;
+
+ explicit PointerEmbeddedInt(uintptr_t Value, RawValueTag) : Value(Value) {}
+
+public:
+ PointerEmbeddedInt() = default;
+
+ PointerEmbeddedInt(IntT I) { *this = I; }
+
+ PointerEmbeddedInt &operator=(IntT I) {
+ assert((std::is_signed<IntT>::value ? isInt<Bits>(I) : isUInt<Bits>(I)) &&
+ "Integer has bits outside those preserved!");
+ Value = static_cast<uintptr_t>(I) << Shift;
+ return *this;
+ }
+
+ // Note that this implicit conversion additionally allows all of the basic
+ // comparison operators to work transparently, etc.
+ operator IntT() const {
+ if (std::is_signed<IntT>::value)
+ return static_cast<IntT>(static_cast<intptr_t>(Value) >> Shift);
+ return static_cast<IntT>(Value >> Shift);
+ }
+};
+
+// Provide pointer like traits to support use with pointer unions and sum
+// types.
+template <typename IntT, int Bits>
+struct PointerLikeTypeTraits<PointerEmbeddedInt<IntT, Bits>> {
+ using T = PointerEmbeddedInt<IntT, Bits>;
+
+ static inline void *getAsVoidPointer(const T &P) {
+ return reinterpret_cast<void *>(P.Value);
+ }
+
+ static inline T getFromVoidPointer(void *P) {
+ return T(reinterpret_cast<uintptr_t>(P), typename T::RawValueTag());
+ }
+
+ static inline T getFromVoidPointer(const void *P) {
+ return T(reinterpret_cast<uintptr_t>(P), typename T::RawValueTag());
+ }
+
+ static constexpr int NumLowBitsAvailable = T::Shift;
+};
+
+// Teach DenseMap how to use PointerEmbeddedInt objects as keys if the Int type
+// itself can be a key.
+template <typename IntT, int Bits>
+struct DenseMapInfo<PointerEmbeddedInt<IntT, Bits>> {
+ using T = PointerEmbeddedInt<IntT, Bits>;
+ using IntInfo = DenseMapInfo<IntT>;
+
+ static inline T getEmptyKey() { return IntInfo::getEmptyKey(); }
+ static inline T getTombstoneKey() { return IntInfo::getTombstoneKey(); }
+
+ static unsigned getHashValue(const T &Arg) {
+ return IntInfo::getHashValue(Arg);
+ }
+
+ static bool isEqual(const T &LHS, const T &RHS) { return LHS == RHS; }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_POINTEREMBEDDEDINT_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/PointerIntPair.h b/contrib/libs/llvm14/include/llvm/ADT/PointerIntPair.h
new file mode 100644
index 0000000000..6575de275a
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/PointerIntPair.h
@@ -0,0 +1,256 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/PointerIntPair.h - Pair for pointer and int -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the PointerIntPair class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_POINTERINTPAIR_H
+#define LLVM_ADT_POINTERINTPAIR_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include "llvm/Support/type_traits.h"
+#include <cassert>
+#include <cstdint>
+#include <limits>
+
+namespace llvm {
+
+template <typename T, typename Enable> struct DenseMapInfo;
+template <typename PointerT, unsigned IntBits, typename PtrTraits>
+struct PointerIntPairInfo;
+
+/// PointerIntPair - This class implements a pair of a pointer and small
+/// integer. It is designed to represent this in the space required by one
+/// pointer by bitmangling the integer into the low part of the pointer. This
+/// can only be done for small integers: typically up to 3 bits, but it depends
+/// on the number of bits available according to PointerLikeTypeTraits for the
+/// type.
+///
+/// Note that PointerIntPair always puts the IntVal part in the highest bits
+/// possible. For example, PointerIntPair<void*, 1, bool> will put the bit for
+/// the bool into bit #2, not bit #0, which allows the low two bits to be used
+/// for something else. For example, this allows:
+/// PointerIntPair<PointerIntPair<void*, 1, bool>, 1, bool>
+/// ... and the two bools will land in different bits.
+template <typename PointerTy, unsigned IntBits, typename IntType = unsigned,
+ typename PtrTraits = PointerLikeTypeTraits<PointerTy>,
+ typename Info = PointerIntPairInfo<PointerTy, IntBits, PtrTraits>>
+class PointerIntPair {
+ // Used by MSVC visualizer and generally helpful for debugging/visualizing.
+ using InfoTy = Info;
+ intptr_t Value = 0;
+
+public:
+ constexpr PointerIntPair() = default;
+
+ PointerIntPair(PointerTy PtrVal, IntType IntVal) {
+ setPointerAndInt(PtrVal, IntVal);
+ }
+
+ explicit PointerIntPair(PointerTy PtrVal) { initWithPointer(PtrVal); }
+
+ PointerTy getPointer() const { return Info::getPointer(Value); }
+
+ IntType getInt() const { return (IntType)Info::getInt(Value); }
+
+ void setPointer(PointerTy PtrVal) LLVM_LVALUE_FUNCTION {
+ Value = Info::updatePointer(Value, PtrVal);
+ }
+
+ void setInt(IntType IntVal) LLVM_LVALUE_FUNCTION {
+ Value = Info::updateInt(Value, static_cast<intptr_t>(IntVal));
+ }
+
+ void initWithPointer(PointerTy PtrVal) LLVM_LVALUE_FUNCTION {
+ Value = Info::updatePointer(0, PtrVal);
+ }
+
+ void setPointerAndInt(PointerTy PtrVal, IntType IntVal) LLVM_LVALUE_FUNCTION {
+ Value = Info::updateInt(Info::updatePointer(0, PtrVal),
+ static_cast<intptr_t>(IntVal));
+ }
+
+ PointerTy const *getAddrOfPointer() const {
+ return const_cast<PointerIntPair *>(this)->getAddrOfPointer();
+ }
+
+ PointerTy *getAddrOfPointer() {
+ assert(Value == reinterpret_cast<intptr_t>(getPointer()) &&
+ "Can only return the address if IntBits is cleared and "
+ "PtrTraits doesn't change the pointer");
+ return reinterpret_cast<PointerTy *>(&Value);
+ }
+
+ void *getOpaqueValue() const { return reinterpret_cast<void *>(Value); }
+
+ void setFromOpaqueValue(void *Val) LLVM_LVALUE_FUNCTION {
+ Value = reinterpret_cast<intptr_t>(Val);
+ }
+
+ static PointerIntPair getFromOpaqueValue(void *V) {
+ PointerIntPair P;
+ P.setFromOpaqueValue(V);
+ return P;
+ }
+
+ // Allow PointerIntPairs to be created from const void * if and only if the
+ // pointer type could be created from a const void *.
+ static PointerIntPair getFromOpaqueValue(const void *V) {
+ (void)PtrTraits::getFromVoidPointer(V);
+ return getFromOpaqueValue(const_cast<void *>(V));
+ }
+
+ bool operator==(const PointerIntPair &RHS) const {
+ return Value == RHS.Value;
+ }
+
+ bool operator!=(const PointerIntPair &RHS) const {
+ return Value != RHS.Value;
+ }
+
+ bool operator<(const PointerIntPair &RHS) const { return Value < RHS.Value; }
+ bool operator>(const PointerIntPair &RHS) const { return Value > RHS.Value; }
+
+ bool operator<=(const PointerIntPair &RHS) const {
+ return Value <= RHS.Value;
+ }
+
+ bool operator>=(const PointerIntPair &RHS) const {
+ return Value >= RHS.Value;
+ }
+};
+
+// Specialize is_trivially_copyable to avoid limitation of llvm::is_trivially_copyable
+// when compiled with gcc 4.9.
+template <typename PointerTy, unsigned IntBits, typename IntType,
+ typename PtrTraits,
+ typename Info>
+struct is_trivially_copyable<PointerIntPair<PointerTy, IntBits, IntType, PtrTraits, Info>> : std::true_type {
+#ifdef HAVE_STD_IS_TRIVIALLY_COPYABLE
+ static_assert(std::is_trivially_copyable<PointerIntPair<PointerTy, IntBits, IntType, PtrTraits, Info>>::value,
+ "inconsistent behavior between llvm:: and std:: implementation of is_trivially_copyable");
+#endif
+};
+
+
+template <typename PointerT, unsigned IntBits, typename PtrTraits>
+struct PointerIntPairInfo {
+ static_assert(PtrTraits::NumLowBitsAvailable <
+ std::numeric_limits<uintptr_t>::digits,
+ "cannot use a pointer type that has all bits free");
+ static_assert(IntBits <= PtrTraits::NumLowBitsAvailable,
+ "PointerIntPair with integer size too large for pointer");
+ enum MaskAndShiftConstants : uintptr_t {
+ /// PointerBitMask - The bits that come from the pointer.
+ PointerBitMask =
+ ~(uintptr_t)(((intptr_t)1 << PtrTraits::NumLowBitsAvailable) - 1),
+
+ /// IntShift - The number of low bits that we reserve for other uses, and
+ /// keep zero.
+ IntShift = (uintptr_t)PtrTraits::NumLowBitsAvailable - IntBits,
+
+ /// IntMask - This is the unshifted mask for valid bits of the int type.
+ IntMask = (uintptr_t)(((intptr_t)1 << IntBits) - 1),
+
+ // ShiftedIntMask - This is the bits for the integer shifted in place.
+ ShiftedIntMask = (uintptr_t)(IntMask << IntShift)
+ };
+
+ static PointerT getPointer(intptr_t Value) {
+ return PtrTraits::getFromVoidPointer(
+ reinterpret_cast<void *>(Value & PointerBitMask));
+ }
+
+ static intptr_t getInt(intptr_t Value) {
+ return (Value >> IntShift) & IntMask;
+ }
+
+ static intptr_t updatePointer(intptr_t OrigValue, PointerT Ptr) {
+ intptr_t PtrWord =
+ reinterpret_cast<intptr_t>(PtrTraits::getAsVoidPointer(Ptr));
+ assert((PtrWord & ~PointerBitMask) == 0 &&
+ "Pointer is not sufficiently aligned");
+ // Preserve all low bits, just update the pointer.
+ return PtrWord | (OrigValue & ~PointerBitMask);
+ }
+
+ static intptr_t updateInt(intptr_t OrigValue, intptr_t Int) {
+ intptr_t IntWord = static_cast<intptr_t>(Int);
+ assert((IntWord & ~IntMask) == 0 && "Integer too large for field");
+
+ // Preserve all bits other than the ones we are updating.
+ return (OrigValue & ~ShiftedIntMask) | IntWord << IntShift;
+ }
+};
+
+// Provide specialization of DenseMapInfo for PointerIntPair.
+template <typename PointerTy, unsigned IntBits, typename IntType>
+struct DenseMapInfo<PointerIntPair<PointerTy, IntBits, IntType>, void> {
+ using Ty = PointerIntPair<PointerTy, IntBits, IntType>;
+
+ static Ty getEmptyKey() {
+ uintptr_t Val = static_cast<uintptr_t>(-1);
+ Val <<= PointerLikeTypeTraits<Ty>::NumLowBitsAvailable;
+ return Ty::getFromOpaqueValue(reinterpret_cast<void *>(Val));
+ }
+
+ static Ty getTombstoneKey() {
+ uintptr_t Val = static_cast<uintptr_t>(-2);
+ Val <<= PointerLikeTypeTraits<PointerTy>::NumLowBitsAvailable;
+ return Ty::getFromOpaqueValue(reinterpret_cast<void *>(Val));
+ }
+
+ static unsigned getHashValue(Ty V) {
+ uintptr_t IV = reinterpret_cast<uintptr_t>(V.getOpaqueValue());
+ return unsigned(IV) ^ unsigned(IV >> 9);
+ }
+
+ static bool isEqual(const Ty &LHS, const Ty &RHS) { return LHS == RHS; }
+};
+
+// Teach SmallPtrSet that PointerIntPair is "basically a pointer".
+template <typename PointerTy, unsigned IntBits, typename IntType,
+ typename PtrTraits>
+struct PointerLikeTypeTraits<
+ PointerIntPair<PointerTy, IntBits, IntType, PtrTraits>> {
+ static inline void *
+ getAsVoidPointer(const PointerIntPair<PointerTy, IntBits, IntType> &P) {
+ return P.getOpaqueValue();
+ }
+
+ static inline PointerIntPair<PointerTy, IntBits, IntType>
+ getFromVoidPointer(void *P) {
+ return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P);
+ }
+
+ static inline PointerIntPair<PointerTy, IntBits, IntType>
+ getFromVoidPointer(const void *P) {
+ return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P);
+ }
+
+ static constexpr int NumLowBitsAvailable =
+ PtrTraits::NumLowBitsAvailable - IntBits;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_POINTERINTPAIR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/PointerSumType.h b/contrib/libs/llvm14/include/llvm/ADT/PointerSumType.h
new file mode 100644
index 0000000000..9399bd30b0
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/PointerSumType.h
@@ -0,0 +1,305 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/PointerSumType.h --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_POINTERSUMTYPE_H
+#define LLVM_ADT_POINTERSUMTYPE_H
+
+#include "llvm/ADT/bit.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <cassert>
+#include <cstdint>
+#include <type_traits>
+
+namespace llvm {
+
+/// A compile time pair of an integer tag and the pointer-like type which it
+/// indexes within a sum type. Also allows the user to specify a particular
+/// traits class for pointer types with custom behavior such as over-aligned
+/// allocation.
+template <uintptr_t N, typename PointerArgT,
+ typename TraitsArgT = PointerLikeTypeTraits<PointerArgT>>
+struct PointerSumTypeMember {
+ enum { Tag = N };
+ using PointerT = PointerArgT;
+ using TraitsT = TraitsArgT;
+};
+
+namespace detail {
+
+template <typename TagT, typename... MemberTs> struct PointerSumTypeHelper;
+
+} // end namespace detail
+
+/// A sum type over pointer-like types.
+///
+/// This is a normal tagged union across pointer-like types that uses the low
+/// bits of the pointers to store the tag.
+///
+/// Each member of the sum type is specified by passing a \c
+/// PointerSumTypeMember specialization in the variadic member argument list.
+/// This allows the user to control the particular tag value associated with
+/// a particular type, use the same type for multiple different tags, and
+/// customize the pointer-like traits used for a particular member. Note that
+/// these *must* be specializations of \c PointerSumTypeMember, no other type
+/// will suffice, even if it provides a compatible interface.
+///
+/// This type implements all of the comparison operators and even hash table
+/// support by comparing the underlying storage of the pointer values. It
+/// doesn't support delegating to particular members for comparisons.
+///
+/// It also default constructs to a zero tag with a null pointer, whatever that
+/// would be. This means that the zero value for the tag type is significant
+/// and may be desirable to set to a state that is particularly desirable to
+/// default construct.
+///
+/// Having a supported zero-valued tag also enables getting the address of a
+/// pointer stored with that tag provided it is stored in its natural bit
+/// representation. This works because in the case of a zero-valued tag, the
+/// pointer's value is directly stored into this object and we can expose the
+/// address of that internal storage. This is especially useful when building an
+/// `ArrayRef` of a single pointer stored in a sum type.
+///
+/// There is no support for constructing or accessing with a dynamic tag as
+/// that would fundamentally violate the type safety provided by the sum type.
+template <typename TagT, typename... MemberTs> class PointerSumType {
+ using HelperT = detail::PointerSumTypeHelper<TagT, MemberTs...>;
+
+ // We keep both the raw value and the min tag value's pointer in a union. When
+ // the minimum tag value is zero, this allows code below to cleanly expose the
+ // address of the zero-tag pointer instead of just the zero-tag pointer
+ // itself. This is especially useful when building `ArrayRef`s out of a single
+ // pointer. However, we have to carefully access the union due to the active
+ // member potentially changing. When we *store* a new value, we directly
+ // access the union to allow us to store using the obvious types. However,
+ // when we *read* a value, we copy the underlying storage out to avoid relying
+ // on one member or the other being active.
+ union StorageT {
+ // Ensure we get a null default constructed value. We don't use a member
+ // initializer because some compilers seem to not implement those correctly
+ // for a union.
+ StorageT() : Value(0) {}
+
+ uintptr_t Value;
+
+ typename HelperT::template Lookup<HelperT::MinTag>::PointerT MinTagPointer;
+ };
+
+ StorageT Storage;
+
+public:
+ constexpr PointerSumType() = default;
+
+ /// A typed setter to a given tagged member of the sum type.
+ template <TagT N>
+ void set(typename HelperT::template Lookup<N>::PointerT Pointer) {
+ void *V = HelperT::template Lookup<N>::TraitsT::getAsVoidPointer(Pointer);
+ assert((reinterpret_cast<uintptr_t>(V) & HelperT::TagMask) == 0 &&
+ "Pointer is insufficiently aligned to store the discriminant!");
+ Storage.Value = reinterpret_cast<uintptr_t>(V) | N;
+ }
+
+ /// A typed constructor for a specific tagged member of the sum type.
+ template <TagT N>
+ static PointerSumType
+ create(typename HelperT::template Lookup<N>::PointerT Pointer) {
+ PointerSumType Result;
+ Result.set<N>(Pointer);
+ return Result;
+ }
+
+ /// Clear the value to null with the min tag type.
+ void clear() { set<HelperT::MinTag>(nullptr); }
+
+ TagT getTag() const {
+ return static_cast<TagT>(getOpaqueValue() & HelperT::TagMask);
+ }
+
+ template <TagT N> bool is() const { return N == getTag(); }
+
+ template <TagT N> typename HelperT::template Lookup<N>::PointerT get() const {
+ void *P = is<N>() ? getVoidPtr() : nullptr;
+ return HelperT::template Lookup<N>::TraitsT::getFromVoidPointer(P);
+ }
+
+ template <TagT N>
+ typename HelperT::template Lookup<N>::PointerT cast() const {
+ assert(is<N>() && "This instance has a different active member.");
+ return HelperT::template Lookup<N>::TraitsT::getFromVoidPointer(
+ getVoidPtr());
+ }
+
+ /// If the tag is zero and the pointer's value isn't changed when being
+ /// stored, get the address of the stored value type-punned to the zero-tag's
+ /// pointer type.
+ typename HelperT::template Lookup<HelperT::MinTag>::PointerT const *
+ getAddrOfZeroTagPointer() const {
+ return const_cast<PointerSumType *>(this)->getAddrOfZeroTagPointer();
+ }
+
+ /// If the tag is zero and the pointer's value isn't changed when being
+ /// stored, get the address of the stored value type-punned to the zero-tag's
+ /// pointer type.
+ typename HelperT::template Lookup<HelperT::MinTag>::PointerT *
+ getAddrOfZeroTagPointer() {
+ static_assert(HelperT::MinTag == 0, "Non-zero minimum tag value!");
+ assert(is<HelperT::MinTag>() && "The active tag is not zero!");
+ // Store the initial value of the pointer when read out of our storage.
+ auto InitialPtr = get<HelperT::MinTag>();
+ // Now update the active member of the union to be the actual pointer-typed
+ // member so that accessing it indirectly through the returned address is
+ // valid.
+ Storage.MinTagPointer = InitialPtr;
+ // Finally, validate that this was a no-op as expected by reading it back
+ // out using the same underlying-storage read as above.
+ assert(InitialPtr == get<HelperT::MinTag>() &&
+ "Switching to typed storage changed the pointer returned!");
+ // Now we can correctly return an address to typed storage.
+ return &Storage.MinTagPointer;
+ }
+
+ explicit operator bool() const {
+ return getOpaqueValue() & HelperT::PointerMask;
+ }
+ bool operator==(const PointerSumType &R) const {
+ return getOpaqueValue() == R.getOpaqueValue();
+ }
+ bool operator!=(const PointerSumType &R) const {
+ return getOpaqueValue() != R.getOpaqueValue();
+ }
+ bool operator<(const PointerSumType &R) const {
+ return getOpaqueValue() < R.getOpaqueValue();
+ }
+ bool operator>(const PointerSumType &R) const {
+ return getOpaqueValue() > R.getOpaqueValue();
+ }
+ bool operator<=(const PointerSumType &R) const {
+ return getOpaqueValue() <= R.getOpaqueValue();
+ }
+ bool operator>=(const PointerSumType &R) const {
+ return getOpaqueValue() >= R.getOpaqueValue();
+ }
+
+ uintptr_t getOpaqueValue() const {
+ // Read the underlying storage of the union, regardless of the active
+ // member.
+ return bit_cast<uintptr_t>(Storage);
+ }
+
+protected:
+ void *getVoidPtr() const {
+ return reinterpret_cast<void *>(getOpaqueValue() & HelperT::PointerMask);
+ }
+};
+
+namespace detail {
+
+/// A helper template for implementing \c PointerSumType. It provides fast
+/// compile-time lookup of the member from a particular tag value, along with
+/// useful constants and compile time checking infrastructure..
+template <typename TagT, typename... MemberTs>
+struct PointerSumTypeHelper : MemberTs... {
+ // First we use a trick to allow quickly looking up information about
+ // a particular member of the sum type. This works because we arranged to
+ // have this type derive from all of the member type templates. We can select
+ // the matching member for a tag using type deduction during overload
+ // resolution.
+ template <TagT N, typename PointerT, typename TraitsT>
+ static PointerSumTypeMember<N, PointerT, TraitsT>
+ LookupOverload(PointerSumTypeMember<N, PointerT, TraitsT> *);
+ template <TagT N> static void LookupOverload(...);
+ template <TagT N> struct Lookup {
+ // Compute a particular member type by resolving the lookup helper overload.
+ using MemberT = decltype(
+ LookupOverload<N>(static_cast<PointerSumTypeHelper *>(nullptr)));
+
+ /// The Nth member's pointer type.
+ using PointerT = typename MemberT::PointerT;
+
+ /// The Nth member's traits type.
+ using TraitsT = typename MemberT::TraitsT;
+ };
+
+ // Next we need to compute the number of bits available for the discriminant
+ // by taking the min of the bits available for each member. Much of this
+ // would be amazingly easier with good constexpr support.
+ template <uintptr_t V, uintptr_t... Vs>
+ struct Min : std::integral_constant<
+ uintptr_t, (V < Min<Vs...>::value ? V : Min<Vs...>::value)> {
+ };
+ template <uintptr_t V>
+ struct Min<V> : std::integral_constant<uintptr_t, V> {};
+ enum { NumTagBits = Min<MemberTs::TraitsT::NumLowBitsAvailable...>::value };
+
+ // Also compute the smallest discriminant and various masks for convenience.
+ constexpr static TagT MinTag =
+ static_cast<TagT>(Min<MemberTs::Tag...>::value);
+ enum : uint64_t {
+ PointerMask = static_cast<uint64_t>(-1) << NumTagBits,
+ TagMask = ~PointerMask
+ };
+
+ // Finally we need a recursive template to do static checks of each
+ // member.
+ template <typename MemberT, typename... InnerMemberTs>
+ struct Checker : Checker<InnerMemberTs...> {
+ static_assert(MemberT::Tag < (1 << NumTagBits),
+ "This discriminant value requires too many bits!");
+ };
+ template <typename MemberT> struct Checker<MemberT> : std::true_type {
+ static_assert(MemberT::Tag < (1 << NumTagBits),
+ "This discriminant value requires too many bits!");
+ };
+ static_assert(Checker<MemberTs...>::value,
+ "Each member must pass the checker.");
+};
+
+} // end namespace detail
+
+// Teach DenseMap how to use PointerSumTypes as keys.
+template <typename TagT, typename... MemberTs>
+struct DenseMapInfo<PointerSumType<TagT, MemberTs...>> {
+ using SumType = PointerSumType<TagT, MemberTs...>;
+ using HelperT = detail::PointerSumTypeHelper<TagT, MemberTs...>;
+ enum { SomeTag = HelperT::MinTag };
+ using SomePointerT =
+ typename HelperT::template Lookup<HelperT::MinTag>::PointerT;
+ using SomePointerInfo = DenseMapInfo<SomePointerT>;
+
+ static inline SumType getEmptyKey() {
+ return SumType::create<SomeTag>(SomePointerInfo::getEmptyKey());
+ }
+
+ static inline SumType getTombstoneKey() {
+ return SumType::create<SomeTag>(SomePointerInfo::getTombstoneKey());
+ }
+
+ static unsigned getHashValue(const SumType &Arg) {
+ uintptr_t OpaqueValue = Arg.getOpaqueValue();
+ return DenseMapInfo<uintptr_t>::getHashValue(OpaqueValue);
+ }
+
+ static bool isEqual(const SumType &LHS, const SumType &RHS) {
+ return LHS == RHS;
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_POINTERSUMTYPE_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/PointerUnion.h b/contrib/libs/llvm14/include/llvm/ADT/PointerUnion.h
new file mode 100644
index 0000000000..0d775d5c54
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/PointerUnion.h
@@ -0,0 +1,261 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/PointerUnion.h - Discriminated Union of 2 Ptrs --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the PointerUnion class, which is a discriminated union of
+/// pointer types.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_POINTERUNION_H
+#define LLVM_ADT_POINTERUNION_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+
+namespace llvm {
+
+namespace pointer_union_detail {
+ /// Determine the number of bits required to store integers with values < n.
+ /// This is ceil(log2(n)).
+ constexpr int bitsRequired(unsigned n) {
+ return n > 1 ? 1 + bitsRequired((n + 1) / 2) : 0;
+ }
+
+ template <typename... Ts> constexpr int lowBitsAvailable() {
+ return std::min<int>({PointerLikeTypeTraits<Ts>::NumLowBitsAvailable...});
+ }
+
+ /// Find the first type in a list of types.
+ template <typename T, typename...> struct GetFirstType {
+ using type = T;
+ };
+
+ /// Provide PointerLikeTypeTraits for void* that is used by PointerUnion
+ /// for the template arguments.
+ template <typename ...PTs> class PointerUnionUIntTraits {
+ public:
+ static inline void *getAsVoidPointer(void *P) { return P; }
+ static inline void *getFromVoidPointer(void *P) { return P; }
+ static constexpr int NumLowBitsAvailable = lowBitsAvailable<PTs...>();
+ };
+
+ template <typename Derived, typename ValTy, int I, typename ...Types>
+ class PointerUnionMembers;
+
+ template <typename Derived, typename ValTy, int I>
+ class PointerUnionMembers<Derived, ValTy, I> {
+ protected:
+ ValTy Val;
+ PointerUnionMembers() = default;
+ PointerUnionMembers(ValTy Val) : Val(Val) {}
+
+ friend struct PointerLikeTypeTraits<Derived>;
+ };
+
+ template <typename Derived, typename ValTy, int I, typename Type,
+ typename ...Types>
+ class PointerUnionMembers<Derived, ValTy, I, Type, Types...>
+ : public PointerUnionMembers<Derived, ValTy, I + 1, Types...> {
+ using Base = PointerUnionMembers<Derived, ValTy, I + 1, Types...>;
+ public:
+ using Base::Base;
+ PointerUnionMembers() = default;
+ PointerUnionMembers(Type V)
+ : Base(ValTy(const_cast<void *>(
+ PointerLikeTypeTraits<Type>::getAsVoidPointer(V)),
+ I)) {}
+
+ using Base::operator=;
+ Derived &operator=(Type V) {
+ this->Val = ValTy(
+ const_cast<void *>(PointerLikeTypeTraits<Type>::getAsVoidPointer(V)),
+ I);
+ return static_cast<Derived &>(*this);
+ };
+ };
+}
+
+/// A discriminated union of two or more pointer types, with the discriminator
+/// in the low bit of the pointer.
+///
+/// This implementation is extremely efficient in space due to leveraging the
+/// low bits of the pointer, while exposing a natural and type-safe API.
+///
+/// Common use patterns would be something like this:
+/// PointerUnion<int*, float*> P;
+/// P = (int*)0;
+/// printf("%d %d", P.is<int*>(), P.is<float*>()); // prints "1 0"
+/// X = P.get<int*>(); // ok.
+/// Y = P.get<float*>(); // runtime assertion failure.
+/// Z = P.get<double*>(); // compile time failure.
+/// P = (float*)0;
+/// Y = P.get<float*>(); // ok.
+/// X = P.get<int*>(); // runtime assertion failure.
+/// PointerUnion<int*, int*> Q; // compile time failure.
+template <typename... PTs>
+class PointerUnion
+ : public pointer_union_detail::PointerUnionMembers<
+ PointerUnion<PTs...>,
+ PointerIntPair<
+ void *, pointer_union_detail::bitsRequired(sizeof...(PTs)), int,
+ pointer_union_detail::PointerUnionUIntTraits<PTs...>>,
+ 0, PTs...> {
+ static_assert(TypesAreDistinct<PTs...>::value,
+ "PointerUnion alternative types cannot be repeated");
+ // The first type is special because we want to directly cast a pointer to a
+ // default-initialized union to a pointer to the first type. But we don't
+ // want PointerUnion to be a 'template <typename First, typename ...Rest>'
+ // because it's much more convenient to have a name for the whole pack. So
+ // split off the first type here.
+ using First = TypeAtIndex<0, PTs...>;
+ using Base = typename PointerUnion::PointerUnionMembers;
+
+public:
+ PointerUnion() = default;
+
+ PointerUnion(std::nullptr_t) : PointerUnion() {}
+ using Base::Base;
+
+ /// Test if the pointer held in the union is null, regardless of
+ /// which type it is.
+ bool isNull() const { return !this->Val.getPointer(); }
+
+ explicit operator bool() const { return !isNull(); }
+
+ /// Test if the Union currently holds the type matching T.
+ template <typename T> bool is() const {
+ return this->Val.getInt() == FirstIndexOfType<T, PTs...>::value;
+ }
+
+ /// Returns the value of the specified pointer type.
+ ///
+ /// If the specified pointer type is incorrect, assert.
+ template <typename T> T get() const {
+ assert(is<T>() && "Invalid accessor called");
+ return PointerLikeTypeTraits<T>::getFromVoidPointer(this->Val.getPointer());
+ }
+
+ /// Returns the current pointer if it is of the specified pointer type,
+ /// otherwise returns null.
+ template <typename T> T dyn_cast() const {
+ if (is<T>())
+ return get<T>();
+ return T();
+ }
+
+ /// If the union is set to the first pointer type get an address pointing to
+ /// it.
+ First const *getAddrOfPtr1() const {
+ return const_cast<PointerUnion *>(this)->getAddrOfPtr1();
+ }
+
+ /// If the union is set to the first pointer type get an address pointing to
+ /// it.
+ First *getAddrOfPtr1() {
+ assert(is<First>() && "Val is not the first pointer");
+ assert(
+ PointerLikeTypeTraits<First>::getAsVoidPointer(get<First>()) ==
+ this->Val.getPointer() &&
+ "Can't get the address because PointerLikeTypeTraits changes the ptr");
+ return const_cast<First *>(
+ reinterpret_cast<const First *>(this->Val.getAddrOfPointer()));
+ }
+
+ /// Assignment from nullptr which just clears the union.
+ const PointerUnion &operator=(std::nullptr_t) {
+ this->Val.initWithPointer(nullptr);
+ return *this;
+ }
+
+ /// Assignment from elements of the union.
+ using Base::operator=;
+
+ void *getOpaqueValue() const { return this->Val.getOpaqueValue(); }
+ static inline PointerUnion getFromOpaqueValue(void *VP) {
+ PointerUnion V;
+ V.Val = decltype(V.Val)::getFromOpaqueValue(VP);
+ return V;
+ }
+};
+
+template <typename ...PTs>
+bool operator==(PointerUnion<PTs...> lhs, PointerUnion<PTs...> rhs) {
+ return lhs.getOpaqueValue() == rhs.getOpaqueValue();
+}
+
+template <typename ...PTs>
+bool operator!=(PointerUnion<PTs...> lhs, PointerUnion<PTs...> rhs) {
+ return lhs.getOpaqueValue() != rhs.getOpaqueValue();
+}
+
+template <typename ...PTs>
+bool operator<(PointerUnion<PTs...> lhs, PointerUnion<PTs...> rhs) {
+ return lhs.getOpaqueValue() < rhs.getOpaqueValue();
+}
+
+// Teach SmallPtrSet that PointerUnion is "basically a pointer", that has
+// # low bits available = min(PT1bits,PT2bits)-1.
+template <typename ...PTs>
+struct PointerLikeTypeTraits<PointerUnion<PTs...>> {
+ static inline void *getAsVoidPointer(const PointerUnion<PTs...> &P) {
+ return P.getOpaqueValue();
+ }
+
+ static inline PointerUnion<PTs...> getFromVoidPointer(void *P) {
+ return PointerUnion<PTs...>::getFromOpaqueValue(P);
+ }
+
+ // The number of bits available are the min of the pointer types minus the
+ // bits needed for the discriminator.
+ static constexpr int NumLowBitsAvailable = PointerLikeTypeTraits<decltype(
+ PointerUnion<PTs...>::Val)>::NumLowBitsAvailable;
+};
+
+// Teach DenseMap how to use PointerUnions as keys.
+template <typename ...PTs> struct DenseMapInfo<PointerUnion<PTs...>> {
+ using Union = PointerUnion<PTs...>;
+ using FirstInfo =
+ DenseMapInfo<typename pointer_union_detail::GetFirstType<PTs...>::type>;
+
+ static inline Union getEmptyKey() { return Union(FirstInfo::getEmptyKey()); }
+
+ static inline Union getTombstoneKey() {
+ return Union(FirstInfo::getTombstoneKey());
+ }
+
+ static unsigned getHashValue(const Union &UnionVal) {
+ intptr_t key = (intptr_t)UnionVal.getOpaqueValue();
+ return DenseMapInfo<intptr_t>::getHashValue(key);
+ }
+
+ static bool isEqual(const Union &LHS, const Union &RHS) {
+ return LHS == RHS;
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_POINTERUNION_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/PostOrderIterator.h b/contrib/libs/llvm14/include/llvm/ADT/PostOrderIterator.h
new file mode 100644
index 0000000000..2fe5197ed2
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/PostOrderIterator.h
@@ -0,0 +1,326 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/PostOrderIterator.h - PostOrder iterator --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file builds on the ADT/GraphTraits.h file to build a generic graph
+/// post order iterator. This should work over any graph type that has a
+/// GraphTraits specialization.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_POSTORDERITERATOR_H
+#define LLVM_ADT_POSTORDERITERATOR_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator_range.h"
+#include <iterator>
+#include <set>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+// The po_iterator_storage template provides access to the set of already
+// visited nodes during the po_iterator's depth-first traversal.
+//
+// The default implementation simply contains a set of visited nodes, while
+// the External=true version uses a reference to an external set.
+//
+// It is possible to prune the depth-first traversal in several ways:
+//
+// - When providing an external set that already contains some graph nodes,
+// those nodes won't be visited again. This is useful for restarting a
+// post-order traversal on a graph with nodes that aren't dominated by a
+// single node.
+//
+// - By providing a custom SetType class, unwanted graph nodes can be excluded
+// by having the insert() function return false. This could for example
+// confine a CFG traversal to blocks in a specific loop.
+//
+// - Finally, by specializing the po_iterator_storage template itself, graph
+// edges can be pruned by returning false in the insertEdge() function. This
+// could be used to remove loop back-edges from the CFG seen by po_iterator.
+//
+// A specialized po_iterator_storage class can observe both the pre-order and
+// the post-order. The insertEdge() function is called in a pre-order, while
+// the finishPostorder() function is called just before the po_iterator moves
+// on to the next node.
+
+/// Default po_iterator_storage implementation with an internal set object.
+template<class SetType, bool External>
+class po_iterator_storage {
+ SetType Visited;
+
+public:
+ // Return true if edge destination should be visited.
+ template <typename NodeRef>
+ bool insertEdge(Optional<NodeRef> From, NodeRef To) {
+ return Visited.insert(To).second;
+ }
+
+ // Called after all children of BB have been visited.
+ template <typename NodeRef> void finishPostorder(NodeRef BB) {}
+};
+
+/// Specialization of po_iterator_storage that references an external set.
+template<class SetType>
+class po_iterator_storage<SetType, true> {
+ SetType &Visited;
+
+public:
+ po_iterator_storage(SetType &VSet) : Visited(VSet) {}
+ po_iterator_storage(const po_iterator_storage &S) : Visited(S.Visited) {}
+
+ // Return true if edge destination should be visited, called with From = 0 for
+ // the root node.
+ // Graph edges can be pruned by specializing this function.
+ template <class NodeRef> bool insertEdge(Optional<NodeRef> From, NodeRef To) {
+ return Visited.insert(To).second;
+ }
+
+ // Called after all children of BB have been visited.
+ template <class NodeRef> void finishPostorder(NodeRef BB) {}
+};
+
+template <class GraphT,
+ class SetType = SmallPtrSet<typename GraphTraits<GraphT>::NodeRef, 8>,
+ bool ExtStorage = false, class GT = GraphTraits<GraphT>>
+class po_iterator : public po_iterator_storage<SetType, ExtStorage> {
+public:
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = typename GT::NodeRef;
+ using difference_type = std::ptrdiff_t;
+ using pointer = value_type *;
+ using reference = value_type &;
+
+private:
+ using NodeRef = typename GT::NodeRef;
+ using ChildItTy = typename GT::ChildIteratorType;
+
+ // VisitStack - Used to maintain the ordering. Top = current block
+ // First element is basic block pointer, second is the 'next child' to visit
+ SmallVector<std::pair<NodeRef, ChildItTy>, 8> VisitStack;
+
+ po_iterator(NodeRef BB) {
+ this->insertEdge(Optional<NodeRef>(), BB);
+ VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
+ traverseChild();
+ }
+
+ po_iterator() = default; // End is when stack is empty.
+
+ po_iterator(NodeRef BB, SetType &S)
+ : po_iterator_storage<SetType, ExtStorage>(S) {
+ if (this->insertEdge(Optional<NodeRef>(), BB)) {
+ VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
+ traverseChild();
+ }
+ }
+
+ po_iterator(SetType &S)
+ : po_iterator_storage<SetType, ExtStorage>(S) {
+ } // End is when stack is empty.
+
+ void traverseChild() {
+ while (VisitStack.back().second != GT::child_end(VisitStack.back().first)) {
+ NodeRef BB = *VisitStack.back().second++;
+ if (this->insertEdge(Optional<NodeRef>(VisitStack.back().first), BB)) {
+ // If the block is not visited...
+ VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
+ }
+ }
+ }
+
+public:
+ // Provide static "constructors"...
+ static po_iterator begin(const GraphT &G) {
+ return po_iterator(GT::getEntryNode(G));
+ }
+ static po_iterator end(const GraphT &G) { return po_iterator(); }
+
+ static po_iterator begin(const GraphT &G, SetType &S) {
+ return po_iterator(GT::getEntryNode(G), S);
+ }
+ static po_iterator end(const GraphT &G, SetType &S) { return po_iterator(S); }
+
+ bool operator==(const po_iterator &x) const {
+ return VisitStack == x.VisitStack;
+ }
+ bool operator!=(const po_iterator &x) const { return !(*this == x); }
+
+ const NodeRef &operator*() const { return VisitStack.back().first; }
+
+ // This is a nonstandard operator-> that dereferences the pointer an extra
+ // time... so that you can actually call methods ON the BasicBlock, because
+ // the contained type is a pointer. This allows BBIt->getTerminator() f.e.
+ //
+ NodeRef operator->() const { return **this; }
+
+ po_iterator &operator++() { // Preincrement
+ this->finishPostorder(VisitStack.back().first);
+ VisitStack.pop_back();
+ if (!VisitStack.empty())
+ traverseChild();
+ return *this;
+ }
+
+ po_iterator operator++(int) { // Postincrement
+ po_iterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+};
+
+// Provide global constructors that automatically figure out correct types...
+//
+template <class T>
+po_iterator<T> po_begin(const T &G) { return po_iterator<T>::begin(G); }
+template <class T>
+po_iterator<T> po_end (const T &G) { return po_iterator<T>::end(G); }
+
+template <class T> iterator_range<po_iterator<T>> post_order(const T &G) {
+ return make_range(po_begin(G), po_end(G));
+}
+
+// Provide global definitions of external postorder iterators...
+template <class T, class SetType = std::set<typename GraphTraits<T>::NodeRef>>
+struct po_ext_iterator : public po_iterator<T, SetType, true> {
+ po_ext_iterator(const po_iterator<T, SetType, true> &V) :
+ po_iterator<T, SetType, true>(V) {}
+};
+
+template<class T, class SetType>
+po_ext_iterator<T, SetType> po_ext_begin(T G, SetType &S) {
+ return po_ext_iterator<T, SetType>::begin(G, S);
+}
+
+template<class T, class SetType>
+po_ext_iterator<T, SetType> po_ext_end(T G, SetType &S) {
+ return po_ext_iterator<T, SetType>::end(G, S);
+}
+
+template <class T, class SetType>
+iterator_range<po_ext_iterator<T, SetType>> post_order_ext(const T &G, SetType &S) {
+ return make_range(po_ext_begin(G, S), po_ext_end(G, S));
+}
+
+// Provide global definitions of inverse post order iterators...
+template <class T, class SetType = std::set<typename GraphTraits<T>::NodeRef>,
+ bool External = false>
+struct ipo_iterator : public po_iterator<Inverse<T>, SetType, External> {
+ ipo_iterator(const po_iterator<Inverse<T>, SetType, External> &V) :
+ po_iterator<Inverse<T>, SetType, External> (V) {}
+};
+
+template <class T>
+ipo_iterator<T> ipo_begin(const T &G) {
+ return ipo_iterator<T>::begin(G);
+}
+
+template <class T>
+ipo_iterator<T> ipo_end(const T &G){
+ return ipo_iterator<T>::end(G);
+}
+
+template <class T>
+iterator_range<ipo_iterator<T>> inverse_post_order(const T &G) {
+ return make_range(ipo_begin(G), ipo_end(G));
+}
+
+// Provide global definitions of external inverse postorder iterators...
+template <class T, class SetType = std::set<typename GraphTraits<T>::NodeRef>>
+struct ipo_ext_iterator : public ipo_iterator<T, SetType, true> {
+ ipo_ext_iterator(const ipo_iterator<T, SetType, true> &V) :
+ ipo_iterator<T, SetType, true>(V) {}
+ ipo_ext_iterator(const po_iterator<Inverse<T>, SetType, true> &V) :
+ ipo_iterator<T, SetType, true>(V) {}
+};
+
+template <class T, class SetType>
+ipo_ext_iterator<T, SetType> ipo_ext_begin(const T &G, SetType &S) {
+ return ipo_ext_iterator<T, SetType>::begin(G, S);
+}
+
+template <class T, class SetType>
+ipo_ext_iterator<T, SetType> ipo_ext_end(const T &G, SetType &S) {
+ return ipo_ext_iterator<T, SetType>::end(G, S);
+}
+
+template <class T, class SetType>
+iterator_range<ipo_ext_iterator<T, SetType>>
+inverse_post_order_ext(const T &G, SetType &S) {
+ return make_range(ipo_ext_begin(G, S), ipo_ext_end(G, S));
+}
+
+//===--------------------------------------------------------------------===//
+// Reverse Post Order CFG iterator code
+//===--------------------------------------------------------------------===//
+//
+// This is used to visit basic blocks in a method in reverse post order. This
+// class is awkward to use because I don't know a good incremental algorithm to
+// computer RPO from a graph. Because of this, the construction of the
+// ReversePostOrderTraversal object is expensive (it must walk the entire graph
+// with a postorder iterator to build the data structures). The moral of this
+// story is: Don't create more ReversePostOrderTraversal classes than necessary.
+//
+// Because it does the traversal in its constructor, it won't invalidate when
+// BasicBlocks are removed, *but* it may contain erased blocks. Some places
+// rely on this behavior (i.e. GVN).
+//
+// This class should be used like this:
+// {
+// ReversePostOrderTraversal<Function*> RPOT(FuncPtr); // Expensive to create
+// for (rpo_iterator I = RPOT.begin(); I != RPOT.end(); ++I) {
+// ...
+// }
+// for (rpo_iterator I = RPOT.begin(); I != RPOT.end(); ++I) {
+// ...
+// }
+// }
+//
+
+template<class GraphT, class GT = GraphTraits<GraphT>>
+class ReversePostOrderTraversal {
+ using NodeRef = typename GT::NodeRef;
+
+ std::vector<NodeRef> Blocks; // Block list in normal PO order
+
+ void Initialize(const GraphT &G) {
+ std::copy(po_begin(G), po_end(G), std::back_inserter(Blocks));
+ }
+
+public:
+ using rpo_iterator = typename std::vector<NodeRef>::reverse_iterator;
+ using const_rpo_iterator = typename std::vector<NodeRef>::const_reverse_iterator;
+
+ ReversePostOrderTraversal(const GraphT &G) { Initialize(G); }
+
+ // Because we want a reverse post order, use reverse iterators from the vector
+ rpo_iterator begin() { return Blocks.rbegin(); }
+ const_rpo_iterator begin() const { return Blocks.crbegin(); }
+ rpo_iterator end() { return Blocks.rend(); }
+ const_rpo_iterator end() const { return Blocks.crend(); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_POSTORDERITERATOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/PriorityQueue.h b/contrib/libs/llvm14/include/llvm/ADT/PriorityQueue.h
new file mode 100644
index 0000000000..d2c31a82c1
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/PriorityQueue.h
@@ -0,0 +1,94 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/PriorityQueue.h - Priority queues ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the PriorityQueue class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_PRIORITYQUEUE_H
+#define LLVM_ADT_PRIORITYQUEUE_H
+
+#include <algorithm>
+#include <queue>
+
+namespace llvm {
+
+/// PriorityQueue - This class behaves like std::priority_queue and
+/// provides a few additional convenience functions.
+///
+template<class T,
+ class Sequence = std::vector<T>,
+ class Compare = std::less<typename Sequence::value_type> >
+class PriorityQueue : public std::priority_queue<T, Sequence, Compare> {
+public:
+ explicit PriorityQueue(const Compare &compare = Compare(),
+ const Sequence &sequence = Sequence())
+ : std::priority_queue<T, Sequence, Compare>(compare, sequence)
+ {}
+
+ template<class Iterator>
+ PriorityQueue(Iterator begin, Iterator end,
+ const Compare &compare = Compare(),
+ const Sequence &sequence = Sequence())
+ : std::priority_queue<T, Sequence, Compare>(begin, end, compare, sequence)
+ {}
+
+ /// erase_one - Erase one element from the queue, regardless of its
+ /// position. This operation performs a linear search to find an element
+ /// equal to t, but then uses all logarithmic-time algorithms to do
+ /// the erase operation.
+ ///
+ void erase_one(const T &t) {
+ // Linear-search to find the element.
+ typename Sequence::size_type i = find(this->c, t) - this->c.begin();
+
+ // Logarithmic-time heap bubble-up.
+ while (i != 0) {
+ typename Sequence::size_type parent = (i - 1) / 2;
+ this->c[i] = this->c[parent];
+ i = parent;
+ }
+
+ // The element we want to remove is now at the root, so we can use
+ // priority_queue's plain pop to remove it.
+ this->pop();
+ }
+
+ /// reheapify - If an element in the queue has changed in a way that
+ /// affects its standing in the comparison function, the queue's
+ /// internal state becomes invalid. Calling reheapify() resets the
+ /// queue's state, making it valid again. This operation has time
+ /// complexity proportional to the number of elements in the queue,
+ /// so don't plan to use it a lot.
+ ///
+ void reheapify() {
+ std::make_heap(this->c.begin(), this->c.end(), this->comp);
+ }
+
+ /// clear - Erase all elements from the queue.
+ ///
+ void clear() {
+ this->c.clear();
+ }
+};
+
+} // End llvm namespace
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/PriorityWorklist.h b/contrib/libs/llvm14/include/llvm/ADT/PriorityWorklist.h
new file mode 100644
index 0000000000..f11bc08566
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/PriorityWorklist.h
@@ -0,0 +1,275 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- PriorityWorklist.h - Worklist with insertion priority ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+///
+/// This file provides a priority worklist. See the class comments for details.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_PRIORITYWORKLIST_H
+#define LLVM_ADT_PRIORITYWORKLIST_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+#include <vector>
+
+namespace llvm {
+
+/// A FILO worklist that prioritizes on re-insertion without duplication.
+///
+/// This is very similar to a \c SetVector with the primary difference that
+/// while re-insertion does not create a duplicate, it does adjust the
+/// visitation order to respect the last insertion point. This can be useful
+/// when the visit order needs to be prioritized based on insertion point
+/// without actually having duplicate visits.
+///
+/// Note that this doesn't prevent re-insertion of elements which have been
+/// visited -- if you need to break cycles, a set will still be necessary.
+///
+/// The type \c T must be default constructable to a null value that will be
+/// ignored. It is an error to insert such a value, and popping elements will
+/// never produce such a value. It is expected to be used with common nullable
+/// types like pointers or optionals.
+///
+/// Internally this uses a vector to store the worklist and a map to identify
+/// existing elements in the worklist. Both of these may be customized, but the
+/// map must support the basic DenseMap API for mapping from a T to an integer
+/// index into the vector.
+///
+/// A partial specialization is provided to automatically select a SmallVector
+/// and a SmallDenseMap if custom data structures are not provided.
+template <typename T, typename VectorT = std::vector<T>,
+ typename MapT = DenseMap<T, ptrdiff_t>>
+class PriorityWorklist {
+public:
+ using value_type = T;
+ using key_type = T;
+ using reference = T&;
+ using const_reference = const T&;
+ using size_type = typename MapT::size_type;
+
+ /// Construct an empty PriorityWorklist
+ PriorityWorklist() = default;
+
+ /// Determine if the PriorityWorklist is empty or not.
+ bool empty() const {
+ return V.empty();
+ }
+
+ /// Returns the number of elements in the worklist.
+ size_type size() const {
+ return M.size();
+ }
+
+ /// Count the number of elements of a given key in the PriorityWorklist.
+ /// \returns 0 if the element is not in the PriorityWorklist, 1 if it is.
+ size_type count(const key_type &key) const {
+ return M.count(key);
+ }
+
+ /// Return the last element of the PriorityWorklist.
+ const T &back() const {
+ assert(!empty() && "Cannot call back() on empty PriorityWorklist!");
+ return V.back();
+ }
+
+ /// Insert a new element into the PriorityWorklist.
+ /// \returns true if the element was inserted into the PriorityWorklist.
+ bool insert(const T &X) {
+ assert(X != T() && "Cannot insert a null (default constructed) value!");
+ auto InsertResult = M.insert({X, V.size()});
+ if (InsertResult.second) {
+ // Fresh value, just append it to the vector.
+ V.push_back(X);
+ return true;
+ }
+
+ auto &Index = InsertResult.first->second;
+ assert(V[Index] == X && "Value not actually at index in map!");
+ if (Index != (ptrdiff_t)(V.size() - 1)) {
+ // If the element isn't at the back, null it out and append a fresh one.
+ V[Index] = T();
+ Index = (ptrdiff_t)V.size();
+ V.push_back(X);
+ }
+ return false;
+ }
+
+ /// Insert a sequence of new elements into the PriorityWorklist.
+ template <typename SequenceT>
+ std::enable_if_t<!std::is_convertible<SequenceT, T>::value>
+ insert(SequenceT &&Input) {
+ if (std::begin(Input) == std::end(Input))
+ // Nothing to do for an empty input sequence.
+ return;
+
+ // First pull the input sequence into the vector as a bulk append
+ // operation.
+ ptrdiff_t StartIndex = V.size();
+ V.insert(V.end(), std::begin(Input), std::end(Input));
+ // Now walk backwards fixing up the index map and deleting any duplicates.
+ for (ptrdiff_t i = V.size() - 1; i >= StartIndex; --i) {
+ auto InsertResult = M.insert({V[i], i});
+ if (InsertResult.second)
+ continue;
+
+ // If the existing index is before this insert's start, nuke that one and
+ // move it up.
+ ptrdiff_t &Index = InsertResult.first->second;
+ if (Index < StartIndex) {
+ V[Index] = T();
+ Index = i;
+ continue;
+ }
+
+ // Otherwise the existing one comes first so just clear out the value in
+ // this slot.
+ V[i] = T();
+ }
+ }
+
+ /// Remove the last element of the PriorityWorklist.
+ void pop_back() {
+ assert(!empty() && "Cannot remove an element when empty!");
+ assert(back() != T() && "Cannot have a null element at the back!");
+ M.erase(back());
+ do {
+ V.pop_back();
+ } while (!V.empty() && V.back() == T());
+ }
+
+ LLVM_NODISCARD T pop_back_val() {
+ T Ret = back();
+ pop_back();
+ return Ret;
+ }
+
+ /// Erase an item from the worklist.
+ ///
+ /// Note that this is constant time due to the nature of the worklist implementation.
+ bool erase(const T& X) {
+ auto I = M.find(X);
+ if (I == M.end())
+ return false;
+
+ assert(V[I->second] == X && "Value not actually at index in map!");
+ if (I->second == (ptrdiff_t)(V.size() - 1)) {
+ do {
+ V.pop_back();
+ } while (!V.empty() && V.back() == T());
+ } else {
+ V[I->second] = T();
+ }
+ M.erase(I);
+ return true;
+ }
+
+ /// Erase items from the set vector based on a predicate function.
+ ///
+ /// This is intended to be equivalent to the following code, if we could
+ /// write it:
+ ///
+ /// \code
+ /// V.erase(remove_if(V, P), V.end());
+ /// \endcode
+ ///
+ /// However, PriorityWorklist doesn't expose non-const iterators, making any
+ /// algorithm like remove_if impossible to use.
+ ///
+ /// \returns true if any element is removed.
+ template <typename UnaryPredicate>
+ bool erase_if(UnaryPredicate P) {
+ typename VectorT::iterator E =
+ remove_if(V, TestAndEraseFromMap<UnaryPredicate>(P, M));
+ if (E == V.end())
+ return false;
+ for (auto I = V.begin(); I != E; ++I)
+ if (*I != T())
+ M[*I] = I - V.begin();
+ V.erase(E, V.end());
+ return true;
+ }
+
+ /// Reverse the items in the PriorityWorklist.
+ ///
+ /// This does an in-place reversal. Other kinds of reverse aren't easy to
+ /// support in the face of the worklist semantics.
+
+ /// Completely clear the PriorityWorklist
+ void clear() {
+ M.clear();
+ V.clear();
+ }
+
+private:
+ /// A wrapper predicate designed for use with std::remove_if.
+ ///
+ /// This predicate wraps a predicate suitable for use with std::remove_if to
+ /// call M.erase(x) on each element which is slated for removal. This just
+ /// allows the predicate to be move only which we can't do with lambdas
+ /// today.
+ template <typename UnaryPredicateT>
+ class TestAndEraseFromMap {
+ UnaryPredicateT P;
+ MapT &M;
+
+ public:
+ TestAndEraseFromMap(UnaryPredicateT P, MapT &M)
+ : P(std::move(P)), M(M) {}
+
+ bool operator()(const T &Arg) {
+ if (Arg == T())
+ // Skip null values in the PriorityWorklist.
+ return false;
+
+ if (P(Arg)) {
+ M.erase(Arg);
+ return true;
+ }
+ return false;
+ }
+ };
+
+ /// The map from value to index in the vector.
+ MapT M;
+
+ /// The vector of elements in insertion order.
+ VectorT V;
+};
+
+/// A version of \c PriorityWorklist that selects small size optimized data
+/// structures for the vector and map.
+template <typename T, unsigned N>
+class SmallPriorityWorklist
+ : public PriorityWorklist<T, SmallVector<T, N>,
+ SmallDenseMap<T, ptrdiff_t>> {
+public:
+ SmallPriorityWorklist() = default;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_PRIORITYWORKLIST_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/SCCIterator.h b/contrib/libs/llvm14/include/llvm/ADT/SCCIterator.h
new file mode 100644
index 0000000000..06810834a6
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/SCCIterator.h
@@ -0,0 +1,383 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- ADT/SCCIterator.h - Strongly Connected Comp. Iter. -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This builds on the llvm/ADT/GraphTraits.h file to find the strongly
+/// connected components (SCCs) of a graph in O(N+E) time using Tarjan's DFS
+/// algorithm.
+///
+/// The SCC iterator has the important property that if a node in SCC S1 has an
+/// edge to a node in SCC S2, then it visits S1 *after* S2.
+///
+/// To visit S1 *before* S2, use the scc_iterator on the Inverse graph. (NOTE:
+/// This requires some simple wrappers and is not supported yet.)
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SCCITERATOR_H
+#define LLVM_ADT_SCCITERATOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/iterator.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <queue>
+#include <set>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+namespace llvm {
+
+/// Enumerate the SCCs of a directed graph in reverse topological order
+/// of the SCC DAG.
+///
+/// This is implemented using Tarjan's DFS algorithm using an internal stack to
+/// build up a vector of nodes in a particular SCC. Note that it is a forward
+/// iterator and thus you cannot backtrack or re-visit nodes.
+template <class GraphT, class GT = GraphTraits<GraphT>>
+class scc_iterator : public iterator_facade_base<
+ scc_iterator<GraphT, GT>, std::forward_iterator_tag,
+ const std::vector<typename GT::NodeRef>, ptrdiff_t> {
+ using NodeRef = typename GT::NodeRef;
+ using ChildItTy = typename GT::ChildIteratorType;
+ using SccTy = std::vector<NodeRef>;
+ using reference = typename scc_iterator::reference;
+
+ /// Element of VisitStack during DFS.
+ struct StackElement {
+ NodeRef Node; ///< The current node pointer.
+ ChildItTy NextChild; ///< The next child, modified inplace during DFS.
+ unsigned MinVisited; ///< Minimum uplink value of all children of Node.
+
+ StackElement(NodeRef Node, const ChildItTy &Child, unsigned Min)
+ : Node(Node), NextChild(Child), MinVisited(Min) {}
+
+ bool operator==(const StackElement &Other) const {
+ return Node == Other.Node &&
+ NextChild == Other.NextChild &&
+ MinVisited == Other.MinVisited;
+ }
+ };
+
+ /// The visit counters used to detect when a complete SCC is on the stack.
+ /// visitNum is the global counter.
+ ///
+ /// nodeVisitNumbers are per-node visit numbers, also used as DFS flags.
+ unsigned visitNum;
+ DenseMap<NodeRef, unsigned> nodeVisitNumbers;
+
+ /// Stack holding nodes of the SCC.
+ std::vector<NodeRef> SCCNodeStack;
+
+ /// The current SCC, retrieved using operator*().
+ SccTy CurrentSCC;
+
+ /// DFS stack, Used to maintain the ordering. The top contains the current
+ /// node, the next child to visit, and the minimum uplink value of all child
+ std::vector<StackElement> VisitStack;
+
+ /// A single "visit" within the non-recursive DFS traversal.
+ void DFSVisitOne(NodeRef N);
+
+ /// The stack-based DFS traversal; defined below.
+ void DFSVisitChildren();
+
+ /// Compute the next SCC using the DFS traversal.
+ void GetNextSCC();
+
+ scc_iterator(NodeRef entryN) : visitNum(0) {
+ DFSVisitOne(entryN);
+ GetNextSCC();
+ }
+
+ /// End is when the DFS stack is empty.
+ scc_iterator() = default;
+
+public:
+ static scc_iterator begin(const GraphT &G) {
+ return scc_iterator(GT::getEntryNode(G));
+ }
+ static scc_iterator end(const GraphT &) { return scc_iterator(); }
+
+ /// Direct loop termination test which is more efficient than
+ /// comparison with \c end().
+ bool isAtEnd() const {
+ assert(!CurrentSCC.empty() || VisitStack.empty());
+ return CurrentSCC.empty();
+ }
+
+ bool operator==(const scc_iterator &x) const {
+ return VisitStack == x.VisitStack && CurrentSCC == x.CurrentSCC;
+ }
+
+ scc_iterator &operator++() {
+ GetNextSCC();
+ return *this;
+ }
+
+ reference operator*() const {
+ assert(!CurrentSCC.empty() && "Dereferencing END SCC iterator!");
+ return CurrentSCC;
+ }
+
+ /// Test if the current SCC has a cycle.
+ ///
+ /// If the SCC has more than one node, this is trivially true. If not, it may
+ /// still contain a cycle if the node has an edge back to itself.
+ bool hasCycle() const;
+
+ /// This informs the \c scc_iterator that the specified \c Old node
+ /// has been deleted, and \c New is to be used in its place.
+ void ReplaceNode(NodeRef Old, NodeRef New) {
+ assert(nodeVisitNumbers.count(Old) && "Old not in scc_iterator?");
+ // Do the assignment in two steps, in case 'New' is not yet in the map, and
+ // inserting it causes the map to grow.
+ auto tempVal = nodeVisitNumbers[Old];
+ nodeVisitNumbers[New] = tempVal;
+ nodeVisitNumbers.erase(Old);
+ }
+};
+
+template <class GraphT, class GT>
+void scc_iterator<GraphT, GT>::DFSVisitOne(NodeRef N) {
+ ++visitNum;
+ nodeVisitNumbers[N] = visitNum;
+ SCCNodeStack.push_back(N);
+ VisitStack.push_back(StackElement(N, GT::child_begin(N), visitNum));
+#if 0 // Enable if needed when debugging.
+ dbgs() << "TarjanSCC: Node " << N <<
+ " : visitNum = " << visitNum << "\n";
+#endif
+}
+
+template <class GraphT, class GT>
+void scc_iterator<GraphT, GT>::DFSVisitChildren() {
+ assert(!VisitStack.empty());
+ while (VisitStack.back().NextChild != GT::child_end(VisitStack.back().Node)) {
+ // TOS has at least one more child so continue DFS
+ NodeRef childN = *VisitStack.back().NextChild++;
+ typename DenseMap<NodeRef, unsigned>::iterator Visited =
+ nodeVisitNumbers.find(childN);
+ if (Visited == nodeVisitNumbers.end()) {
+ // this node has never been seen.
+ DFSVisitOne(childN);
+ continue;
+ }
+
+ unsigned childNum = Visited->second;
+ if (VisitStack.back().MinVisited > childNum)
+ VisitStack.back().MinVisited = childNum;
+ }
+}
+
+template <class GraphT, class GT> void scc_iterator<GraphT, GT>::GetNextSCC() {
+ CurrentSCC.clear(); // Prepare to compute the next SCC
+ while (!VisitStack.empty()) {
+ DFSVisitChildren();
+
+ // Pop the leaf on top of the VisitStack.
+ NodeRef visitingN = VisitStack.back().Node;
+ unsigned minVisitNum = VisitStack.back().MinVisited;
+ assert(VisitStack.back().NextChild == GT::child_end(visitingN));
+ VisitStack.pop_back();
+
+ // Propagate MinVisitNum to parent so we can detect the SCC starting node.
+ if (!VisitStack.empty() && VisitStack.back().MinVisited > minVisitNum)
+ VisitStack.back().MinVisited = minVisitNum;
+
+#if 0 // Enable if needed when debugging.
+ dbgs() << "TarjanSCC: Popped node " << visitingN <<
+ " : minVisitNum = " << minVisitNum << "; Node visit num = " <<
+ nodeVisitNumbers[visitingN] << "\n";
+#endif
+
+ if (minVisitNum != nodeVisitNumbers[visitingN])
+ continue;
+
+ // A full SCC is on the SCCNodeStack! It includes all nodes below
+ // visitingN on the stack. Copy those nodes to CurrentSCC,
+ // reset their minVisit values, and return (this suspends
+ // the DFS traversal till the next ++).
+ do {
+ CurrentSCC.push_back(SCCNodeStack.back());
+ SCCNodeStack.pop_back();
+ nodeVisitNumbers[CurrentSCC.back()] = ~0U;
+ } while (CurrentSCC.back() != visitingN);
+ return;
+ }
+}
+
+template <class GraphT, class GT>
+bool scc_iterator<GraphT, GT>::hasCycle() const {
+ assert(!CurrentSCC.empty() && "Dereferencing END SCC iterator!");
+ if (CurrentSCC.size() > 1)
+ return true;
+ NodeRef N = CurrentSCC.front();
+ for (ChildItTy CI = GT::child_begin(N), CE = GT::child_end(N); CI != CE;
+ ++CI)
+ if (*CI == N)
+ return true;
+ return false;
+ }
+
+/// Construct the begin iterator for a deduced graph type T.
+template <class T> scc_iterator<T> scc_begin(const T &G) {
+ return scc_iterator<T>::begin(G);
+}
+
+/// Construct the end iterator for a deduced graph type T.
+template <class T> scc_iterator<T> scc_end(const T &G) {
+ return scc_iterator<T>::end(G);
+}
+
+/// Sort the nodes of a directed SCC in the decreasing order of the edge
+/// weights. The instantiating GraphT type should have weighted edge type
+/// declared in its graph traits in order to use this iterator.
+///
+/// This is implemented using Kruskal's minimal spanning tree algorithm followed
+/// by a BFS walk. First a maximum spanning tree (forest) is built based on all
+/// edges within the SCC collection. Then a BFS walk is initiated on tree nodes
+/// that do not have a predecessor. Finally, the BFS order computed is the
+/// traversal order of the nodes of the SCC. Such order ensures that
+/// high-weighted edges are visited first during the tranversal.
+template <class GraphT, class GT = GraphTraits<GraphT>>
+class scc_member_iterator {
+ using NodeType = typename GT::NodeType;
+ using EdgeType = typename GT::EdgeType;
+ using NodesType = std::vector<NodeType *>;
+
+ // Auxilary node information used during the MST calculation.
+ struct NodeInfo {
+ NodeInfo *Group = this;
+ uint32_t Rank = 0;
+ bool Visited = true;
+ };
+
+ // Find the root group of the node and compress the path from node to the
+ // root.
+ NodeInfo *find(NodeInfo *Node) {
+ if (Node->Group != Node)
+ Node->Group = find(Node->Group);
+ return Node->Group;
+ }
+
+ // Union the source and target node into the same group and return true.
+ // Returns false if they are already in the same group.
+ bool unionGroups(const EdgeType *Edge) {
+ NodeInfo *G1 = find(&NodeInfoMap[Edge->Source]);
+ NodeInfo *G2 = find(&NodeInfoMap[Edge->Target]);
+
+ // If the edge forms a cycle, do not add it to MST
+ if (G1 == G2)
+ return false;
+
+ // Make the smaller rank tree a direct child or the root of high rank tree.
+ if (G1->Rank < G1->Rank)
+ G1->Group = G2;
+ else {
+ G2->Group = G1;
+ // If the ranks are the same, increment root of one tree by one.
+ if (G1->Rank == G2->Rank)
+ G2->Rank++;
+ }
+ return true;
+ }
+
+ std::unordered_map<NodeType *, NodeInfo> NodeInfoMap;
+ NodesType Nodes;
+
+public:
+ scc_member_iterator(const NodesType &InputNodes);
+
+ NodesType &operator*() { return Nodes; }
+};
+
+template <class GraphT, class GT>
+scc_member_iterator<GraphT, GT>::scc_member_iterator(
+ const NodesType &InputNodes) {
+ if (InputNodes.size() <= 1) {
+ Nodes = InputNodes;
+ return;
+ }
+
+ // Initialize auxilary node information.
+ NodeInfoMap.clear();
+ for (auto *Node : InputNodes) {
+ // This is specifically used to construct a `NodeInfo` object in place. An
+ // insert operation will involve a copy construction which invalidate the
+ // initial value of the `Group` field which should be `this`.
+ (void)NodeInfoMap[Node].Group;
+ }
+
+ // Sort edges by weights.
+ struct EdgeComparer {
+ bool operator()(const EdgeType *L, const EdgeType *R) const {
+ return L->Weight > R->Weight;
+ }
+ };
+
+ std::multiset<const EdgeType *, EdgeComparer> SortedEdges;
+ for (auto *Node : InputNodes) {
+ for (auto &Edge : Node->Edges) {
+ if (NodeInfoMap.count(Edge.Target))
+ SortedEdges.insert(&Edge);
+ }
+ }
+
+ // Traverse all the edges and compute the Maximum Weight Spanning Tree
+ // using Kruskal's algorithm.
+ std::unordered_set<const EdgeType *> MSTEdges;
+ for (auto *Edge : SortedEdges) {
+ if (unionGroups(Edge))
+ MSTEdges.insert(Edge);
+ }
+
+ // Do BFS on MST, starting from nodes that have no incoming edge. These nodes
+ // are "roots" of the MST forest. This ensures that nodes are visited before
+ // their decsendents are, thus ensures hot edges are processed before cold
+ // edges, based on how MST is computed.
+ for (const auto *Edge : MSTEdges)
+ NodeInfoMap[Edge->Target].Visited = false;
+
+ std::queue<NodeType *> Queue;
+ for (auto &Node : NodeInfoMap)
+ if (Node.second.Visited)
+ Queue.push(Node.first);
+
+ while (!Queue.empty()) {
+ auto *Node = Queue.front();
+ Queue.pop();
+ Nodes.push_back(Node);
+ for (auto &Edge : Node->Edges) {
+ if (MSTEdges.count(&Edge) && !NodeInfoMap[Edge.Target].Visited) {
+ NodeInfoMap[Edge.Target].Visited = true;
+ Queue.push(Edge.Target);
+ }
+ }
+ }
+
+ assert(InputNodes.size() == Nodes.size() && "missing nodes in MST");
+ std::reverse(Nodes.begin(), Nodes.end());
+}
+} // end namespace llvm
+
+#endif // LLVM_ADT_SCCITERATOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/STLArrayExtras.h b/contrib/libs/llvm14/include/llvm/ADT/STLArrayExtras.h
new file mode 100644
index 0000000000..13e8790c7b
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/STLArrayExtras.h
@@ -0,0 +1,46 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/STLArrayExtras.h - additions to <array> ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains some templates that are useful if you are working with the
+// STL at all.
+//
+// No library is required when using these functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STLARRAYEXTRAS_H
+#define LLVM_ADT_STLARRAYEXTRAS_H
+
+#include <cstddef>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// Extra additions for arrays
+//===----------------------------------------------------------------------===//
+
+/// Find the length of an array.
+template <class T, std::size_t N>
+constexpr inline size_t array_lengthof(T (&)[N]) {
+ return N;
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STLARRAYEXTRAS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/STLExtras.h b/contrib/libs/llvm14/include/llvm/ADT/STLExtras.h
new file mode 100644
index 0000000000..7edabc3d6a
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/STLExtras.h
@@ -0,0 +1,2176 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains some templates that are useful if you are working with
+/// the STL at all.
+///
+/// No library is required when using these functions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STLEXTRAS_H
+#define LLVM_ADT_STLEXTRAS_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLArrayExtras.h"
+#include "llvm/ADT/STLForwardCompat.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
+#include "llvm/ADT/identity.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Config/abi-breaking.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <functional>
+#include <initializer_list>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#ifdef EXPENSIVE_CHECKS
+#include <random> // for std::mt19937
+#endif
+
+namespace llvm {
+
+// Only used by compiler if both template types are the same. Useful when
+// using SFINAE to test for the existence of member functions.
+template <typename T, T> struct SameType;
+
+namespace detail {
+
+template <typename RangeT>
+using IterOfRange = decltype(std::begin(std::declval<RangeT &>()));
+
+template <typename RangeT>
+using ValueOfRange = typename std::remove_reference<decltype(
+ *std::begin(std::declval<RangeT &>()))>::type;
+
+} // end namespace detail
+
+//===----------------------------------------------------------------------===//
+// Extra additions to <type_traits>
+//===----------------------------------------------------------------------===//
+
+template <typename T> struct make_const_ptr {
+ using type =
+ typename std::add_pointer<typename std::add_const<T>::type>::type;
+};
+
+template <typename T> struct make_const_ref {
+ using type = typename std::add_lvalue_reference<
+ typename std::add_const<T>::type>::type;
+};
+
+namespace detail {
+template <typename...> using void_t = void;
+template <class, template <class...> class Op, class... Args> struct detector {
+ using value_t = std::false_type;
+};
+template <template <class...> class Op, class... Args>
+struct detector<void_t<Op<Args...>>, Op, Args...> {
+ using value_t = std::true_type;
+};
+} // end namespace detail
+
+/// Detects if a given trait holds for some set of arguments 'Args'.
+/// For example, the given trait could be used to detect if a given type
+/// has a copy assignment operator:
+/// template<class T>
+/// using has_copy_assign_t = decltype(std::declval<T&>()
+/// = std::declval<const T&>());
+/// bool fooHasCopyAssign = is_detected<has_copy_assign_t, FooClass>::value;
+template <template <class...> class Op, class... Args>
+using is_detected = typename detail::detector<void, Op, Args...>::value_t;
+
+namespace detail {
+template <typename Callable, typename... Args>
+using is_invocable =
+ decltype(std::declval<Callable &>()(std::declval<Args>()...));
+} // namespace detail
+
+/// Check if a Callable type can be invoked with the given set of arg types.
+template <typename Callable, typename... Args>
+using is_invocable = is_detected<detail::is_invocable, Callable, Args...>;
+
+/// This class provides various trait information about a callable object.
+/// * To access the number of arguments: Traits::num_args
+/// * To access the type of an argument: Traits::arg_t<Index>
+/// * To access the type of the result: Traits::result_t
+template <typename T, bool isClass = std::is_class<T>::value>
+struct function_traits : public function_traits<decltype(&T::operator())> {};
+
+/// Overload for class function types.
+template <typename ClassType, typename ReturnType, typename... Args>
+struct function_traits<ReturnType (ClassType::*)(Args...) const, false> {
+ /// The number of arguments to this function.
+ enum { num_args = sizeof...(Args) };
+
+ /// The result type of this function.
+ using result_t = ReturnType;
+
+ /// The type of an argument to this function.
+ template <size_t Index>
+ using arg_t = typename std::tuple_element<Index, std::tuple<Args...>>::type;
+};
+/// Overload for class function types.
+template <typename ClassType, typename ReturnType, typename... Args>
+struct function_traits<ReturnType (ClassType::*)(Args...), false>
+ : function_traits<ReturnType (ClassType::*)(Args...) const> {};
+/// Overload for non-class function types.
+template <typename ReturnType, typename... Args>
+struct function_traits<ReturnType (*)(Args...), false> {
+ /// The number of arguments to this function.
+ enum { num_args = sizeof...(Args) };
+
+ /// The result type of this function.
+ using result_t = ReturnType;
+
+ /// The type of an argument to this function.
+ template <size_t i>
+ using arg_t = typename std::tuple_element<i, std::tuple<Args...>>::type;
+};
+/// Overload for non-class function type references.
+template <typename ReturnType, typename... Args>
+struct function_traits<ReturnType (&)(Args...), false>
+ : public function_traits<ReturnType (*)(Args...)> {};
+
+/// traits class for checking whether type T is one of any of the given
+/// types in the variadic list.
+template <typename T, typename... Ts>
+using is_one_of = disjunction<std::is_same<T, Ts>...>;
+
+/// traits class for checking whether type T is a base class for all
+/// the given types in the variadic list.
+template <typename T, typename... Ts>
+using are_base_of = conjunction<std::is_base_of<T, Ts>...>;
+
+namespace detail {
+template <typename T, typename... Us> struct TypesAreDistinct;
+template <typename T, typename... Us>
+struct TypesAreDistinct
+ : std::integral_constant<bool, !is_one_of<T, Us...>::value &&
+ TypesAreDistinct<Us...>::value> {};
+template <typename T> struct TypesAreDistinct<T> : std::true_type {};
+} // namespace detail
+
+/// Determine if all types in Ts are distinct.
+///
+/// Useful to statically assert when Ts is intended to describe a non-multi set
+/// of types.
+///
+/// Expensive (currently quadratic in sizeof(Ts...)), and so should only be
+/// asserted once per instantiation of a type which requires it.
+template <typename... Ts> struct TypesAreDistinct;
+template <> struct TypesAreDistinct<> : std::true_type {};
+template <typename... Ts>
+struct TypesAreDistinct
+ : std::integral_constant<bool, detail::TypesAreDistinct<Ts...>::value> {};
+
+/// Find the first index where a type appears in a list of types.
+///
+/// FirstIndexOfType<T, Us...>::value is the first index of T in Us.
+///
+/// Typically only meaningful when it is otherwise statically known that the
+/// type pack has no duplicate types. This should be guaranteed explicitly with
+/// static_assert(TypesAreDistinct<Us...>::value).
+///
+/// It is a compile-time error to instantiate when T is not present in Us, i.e.
+/// if is_one_of<T, Us...>::value is false.
+template <typename T, typename... Us> struct FirstIndexOfType;
+template <typename T, typename U, typename... Us>
+struct FirstIndexOfType<T, U, Us...>
+ : std::integral_constant<size_t, 1 + FirstIndexOfType<T, Us...>::value> {};
+template <typename T, typename... Us>
+struct FirstIndexOfType<T, T, Us...> : std::integral_constant<size_t, 0> {};
+
+/// Find the type at a given index in a list of types.
+///
+/// TypeAtIndex<I, Ts...> is the type at index I in Ts.
+template <size_t I, typename... Ts>
+using TypeAtIndex = std::tuple_element_t<I, std::tuple<Ts...>>;
+
+//===----------------------------------------------------------------------===//
+// Extra additions to <iterator>
+//===----------------------------------------------------------------------===//
+
+namespace adl_detail {
+
+using std::begin;
+
+template <typename ContainerTy>
+decltype(auto) adl_begin(ContainerTy &&container) {
+ return begin(std::forward<ContainerTy>(container));
+}
+
+using std::end;
+
+template <typename ContainerTy>
+decltype(auto) adl_end(ContainerTy &&container) {
+ return end(std::forward<ContainerTy>(container));
+}
+
+using std::swap;
+
+template <typename T>
+void adl_swap(T &&lhs, T &&rhs) noexcept(noexcept(swap(std::declval<T>(),
+ std::declval<T>()))) {
+ swap(std::forward<T>(lhs), std::forward<T>(rhs));
+}
+
+} // end namespace adl_detail
+
+template <typename ContainerTy>
+decltype(auto) adl_begin(ContainerTy &&container) {
+ return adl_detail::adl_begin(std::forward<ContainerTy>(container));
+}
+
+template <typename ContainerTy>
+decltype(auto) adl_end(ContainerTy &&container) {
+ return adl_detail::adl_end(std::forward<ContainerTy>(container));
+}
+
+template <typename T>
+void adl_swap(T &&lhs, T &&rhs) noexcept(
+ noexcept(adl_detail::adl_swap(std::declval<T>(), std::declval<T>()))) {
+ adl_detail::adl_swap(std::forward<T>(lhs), std::forward<T>(rhs));
+}
+
+/// Test whether \p RangeOrContainer is empty. Similar to C++17 std::empty.
+template <typename T>
+constexpr bool empty(const T &RangeOrContainer) {
+ return adl_begin(RangeOrContainer) == adl_end(RangeOrContainer);
+}
+
+/// Returns true if the given container only contains a single element.
+template <typename ContainerTy> bool hasSingleElement(ContainerTy &&C) {
+ auto B = std::begin(C), E = std::end(C);
+ return B != E && std::next(B) == E;
+}
+
+/// Return a range covering \p RangeOrContainer with the first N elements
+/// excluded.
+template <typename T> auto drop_begin(T &&RangeOrContainer, size_t N = 1) {
+ return make_range(std::next(adl_begin(RangeOrContainer), N),
+ adl_end(RangeOrContainer));
+}
+
+// mapped_iterator - This is a simple iterator adapter that causes a function to
+// be applied whenever operator* is invoked on the iterator.
+
+template <typename ItTy, typename FuncTy,
+ typename ReferenceTy =
+ decltype(std::declval<FuncTy>()(*std::declval<ItTy>()))>
+class mapped_iterator
+ : public iterator_adaptor_base<
+ mapped_iterator<ItTy, FuncTy>, ItTy,
+ typename std::iterator_traits<ItTy>::iterator_category,
+ std::remove_reference_t<ReferenceTy>,
+ typename std::iterator_traits<ItTy>::difference_type,
+ std::remove_reference_t<ReferenceTy> *, ReferenceTy> {
+public:
+ mapped_iterator(ItTy U, FuncTy F)
+ : mapped_iterator::iterator_adaptor_base(std::move(U)), F(std::move(F)) {}
+
+ ItTy getCurrent() { return this->I; }
+
+ const FuncTy &getFunction() const { return F; }
+
+ ReferenceTy operator*() const { return F(*this->I); }
+
+private:
+ FuncTy F;
+};
+
+// map_iterator - Provide a convenient way to create mapped_iterators, just like
+// make_pair is useful for creating pairs...
+template <class ItTy, class FuncTy>
+inline mapped_iterator<ItTy, FuncTy> map_iterator(ItTy I, FuncTy F) {
+ return mapped_iterator<ItTy, FuncTy>(std::move(I), std::move(F));
+}
+
+template <class ContainerTy, class FuncTy>
+auto map_range(ContainerTy &&C, FuncTy F) {
+ return make_range(map_iterator(C.begin(), F), map_iterator(C.end(), F));
+}
+
+/// A base type of mapped iterator, that is useful for building derived
+/// iterators that do not need/want to store the map function (as in
+/// mapped_iterator). These iterators must simply provide a `mapElement` method
+/// that defines how to map a value of the iterator to the provided reference
+/// type.
+template <typename DerivedT, typename ItTy, typename ReferenceTy>
+class mapped_iterator_base
+ : public iterator_adaptor_base<
+ DerivedT, ItTy,
+ typename std::iterator_traits<ItTy>::iterator_category,
+ std::remove_reference_t<ReferenceTy>,
+ typename std::iterator_traits<ItTy>::difference_type,
+ std::remove_reference_t<ReferenceTy> *, ReferenceTy> {
+public:
+ using BaseT = mapped_iterator_base;
+
+ mapped_iterator_base(ItTy U)
+ : mapped_iterator_base::iterator_adaptor_base(std::move(U)) {}
+
+ ItTy getCurrent() { return this->I; }
+
+ ReferenceTy operator*() const {
+ return static_cast<const DerivedT &>(*this).mapElement(*this->I);
+ }
+};
+
+/// Helper to determine if type T has a member called rbegin().
+template <typename Ty> class has_rbegin_impl {
+ using yes = char[1];
+ using no = char[2];
+
+ template <typename Inner>
+ static yes& test(Inner *I, decltype(I->rbegin()) * = nullptr);
+
+ template <typename>
+ static no& test(...);
+
+public:
+ static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes);
+};
+
+/// Metafunction to determine if T& or T has a member called rbegin().
+template <typename Ty>
+struct has_rbegin : has_rbegin_impl<typename std::remove_reference<Ty>::type> {
+};
+
+// Returns an iterator_range over the given container which iterates in reverse.
+// Note that the container must have rbegin()/rend() methods for this to work.
+template <typename ContainerTy>
+auto reverse(ContainerTy &&C,
+ std::enable_if_t<has_rbegin<ContainerTy>::value> * = nullptr) {
+ return make_range(C.rbegin(), C.rend());
+}
+
+// Returns an iterator_range over the given container which iterates in reverse.
+// Note that the container must have begin()/end() methods which return
+// bidirectional iterators for this to work.
+template <typename ContainerTy>
+auto reverse(ContainerTy &&C,
+ std::enable_if_t<!has_rbegin<ContainerTy>::value> * = nullptr) {
+ return make_range(std::make_reverse_iterator(std::end(C)),
+ std::make_reverse_iterator(std::begin(C)));
+}
+
+/// An iterator adaptor that filters the elements of given inner iterators.
+///
+/// The predicate parameter should be a callable object that accepts the wrapped
+/// iterator's reference type and returns a bool. When incrementing or
+/// decrementing the iterator, it will call the predicate on each element and
+/// skip any where it returns false.
+///
+/// \code
+/// int A[] = { 1, 2, 3, 4 };
+/// auto R = make_filter_range(A, [](int N) { return N % 2 == 1; });
+/// // R contains { 1, 3 }.
+/// \endcode
+///
+/// Note: filter_iterator_base implements support for forward iteration.
+/// filter_iterator_impl exists to provide support for bidirectional iteration,
+/// conditional on whether the wrapped iterator supports it.
+template <typename WrappedIteratorT, typename PredicateT, typename IterTag>
+class filter_iterator_base
+ : public iterator_adaptor_base<
+ filter_iterator_base<WrappedIteratorT, PredicateT, IterTag>,
+ WrappedIteratorT,
+ typename std::common_type<
+ IterTag, typename std::iterator_traits<
+ WrappedIteratorT>::iterator_category>::type> {
+ using BaseT = typename filter_iterator_base::iterator_adaptor_base;
+
+protected:
+ WrappedIteratorT End;
+ PredicateT Pred;
+
+ void findNextValid() {
+ while (this->I != End && !Pred(*this->I))
+ BaseT::operator++();
+ }
+
+ // Construct the iterator. The begin iterator needs to know where the end
+ // is, so that it can properly stop when it gets there. The end iterator only
+ // needs the predicate to support bidirectional iteration.
+ filter_iterator_base(WrappedIteratorT Begin, WrappedIteratorT End,
+ PredicateT Pred)
+ : BaseT(Begin), End(End), Pred(Pred) {
+ findNextValid();
+ }
+
+public:
+ using BaseT::operator++;
+
+ filter_iterator_base &operator++() {
+ BaseT::operator++();
+ findNextValid();
+ return *this;
+ }
+};
+
+/// Specialization of filter_iterator_base for forward iteration only.
+template <typename WrappedIteratorT, typename PredicateT,
+ typename IterTag = std::forward_iterator_tag>
+class filter_iterator_impl
+ : public filter_iterator_base<WrappedIteratorT, PredicateT, IterTag> {
+public:
+ filter_iterator_impl(WrappedIteratorT Begin, WrappedIteratorT End,
+ PredicateT Pred)
+ : filter_iterator_impl::filter_iterator_base(Begin, End, Pred) {}
+};
+
+/// Specialization of filter_iterator_base for bidirectional iteration.
+template <typename WrappedIteratorT, typename PredicateT>
+class filter_iterator_impl<WrappedIteratorT, PredicateT,
+ std::bidirectional_iterator_tag>
+ : public filter_iterator_base<WrappedIteratorT, PredicateT,
+ std::bidirectional_iterator_tag> {
+ using BaseT = typename filter_iterator_impl::filter_iterator_base;
+
+ void findPrevValid() {
+ while (!this->Pred(*this->I))
+ BaseT::operator--();
+ }
+
+public:
+ using BaseT::operator--;
+
+ filter_iterator_impl(WrappedIteratorT Begin, WrappedIteratorT End,
+ PredicateT Pred)
+ : BaseT(Begin, End, Pred) {}
+
+ filter_iterator_impl &operator--() {
+ BaseT::operator--();
+ findPrevValid();
+ return *this;
+ }
+};
+
+namespace detail {
+
+template <bool is_bidirectional> struct fwd_or_bidi_tag_impl {
+ using type = std::forward_iterator_tag;
+};
+
+template <> struct fwd_or_bidi_tag_impl<true> {
+ using type = std::bidirectional_iterator_tag;
+};
+
+/// Helper which sets its type member to forward_iterator_tag if the category
+/// of \p IterT does not derive from bidirectional_iterator_tag, and to
+/// bidirectional_iterator_tag otherwise.
+template <typename IterT> struct fwd_or_bidi_tag {
+ using type = typename fwd_or_bidi_tag_impl<std::is_base_of<
+ std::bidirectional_iterator_tag,
+ typename std::iterator_traits<IterT>::iterator_category>::value>::type;
+};
+
+} // namespace detail
+
+/// Defines filter_iterator to a suitable specialization of
+/// filter_iterator_impl, based on the underlying iterator's category.
+template <typename WrappedIteratorT, typename PredicateT>
+using filter_iterator = filter_iterator_impl<
+ WrappedIteratorT, PredicateT,
+ typename detail::fwd_or_bidi_tag<WrappedIteratorT>::type>;
+
+/// Convenience function that takes a range of elements and a predicate,
+/// and return a new filter_iterator range.
+///
+/// FIXME: Currently if RangeT && is a rvalue reference to a temporary, the
+/// lifetime of that temporary is not kept by the returned range object, and the
+/// temporary is going to be dropped on the floor after the make_iterator_range
+/// full expression that contains this function call.
+template <typename RangeT, typename PredicateT>
+iterator_range<filter_iterator<detail::IterOfRange<RangeT>, PredicateT>>
+make_filter_range(RangeT &&Range, PredicateT Pred) {
+ using FilterIteratorT =
+ filter_iterator<detail::IterOfRange<RangeT>, PredicateT>;
+ return make_range(
+ FilterIteratorT(std::begin(std::forward<RangeT>(Range)),
+ std::end(std::forward<RangeT>(Range)), Pred),
+ FilterIteratorT(std::end(std::forward<RangeT>(Range)),
+ std::end(std::forward<RangeT>(Range)), Pred));
+}
+
+/// A pseudo-iterator adaptor that is designed to implement "early increment"
+/// style loops.
+///
+/// This is *not a normal iterator* and should almost never be used directly. It
+/// is intended primarily to be used with range based for loops and some range
+/// algorithms.
+///
+/// The iterator isn't quite an `OutputIterator` or an `InputIterator` but
+/// somewhere between them. The constraints of these iterators are:
+///
+/// - On construction or after being incremented, it is comparable and
+/// dereferencable. It is *not* incrementable.
+/// - After being dereferenced, it is neither comparable nor dereferencable, it
+/// is only incrementable.
+///
+/// This means you can only dereference the iterator once, and you can only
+/// increment it once between dereferences.
+template <typename WrappedIteratorT>
+class early_inc_iterator_impl
+ : public iterator_adaptor_base<early_inc_iterator_impl<WrappedIteratorT>,
+ WrappedIteratorT, std::input_iterator_tag> {
+ using BaseT = typename early_inc_iterator_impl::iterator_adaptor_base;
+
+ using PointerT = typename std::iterator_traits<WrappedIteratorT>::pointer;
+
+protected:
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+ bool IsEarlyIncremented = false;
+#endif
+
+public:
+ early_inc_iterator_impl(WrappedIteratorT I) : BaseT(I) {}
+
+ using BaseT::operator*;
+ decltype(*std::declval<WrappedIteratorT>()) operator*() {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+ assert(!IsEarlyIncremented && "Cannot dereference twice!");
+ IsEarlyIncremented = true;
+#endif
+ return *(this->I)++;
+ }
+
+ using BaseT::operator++;
+ early_inc_iterator_impl &operator++() {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+ assert(IsEarlyIncremented && "Cannot increment before dereferencing!");
+ IsEarlyIncremented = false;
+#endif
+ return *this;
+ }
+
+ friend bool operator==(const early_inc_iterator_impl &LHS,
+ const early_inc_iterator_impl &RHS) {
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+ assert(!LHS.IsEarlyIncremented && "Cannot compare after dereferencing!");
+#endif
+ return (const BaseT &)LHS == (const BaseT &)RHS;
+ }
+};
+
+/// Make a range that does early increment to allow mutation of the underlying
+/// range without disrupting iteration.
+///
+/// The underlying iterator will be incremented immediately after it is
+/// dereferenced, allowing deletion of the current node or insertion of nodes to
+/// not disrupt iteration provided they do not invalidate the *next* iterator --
+/// the current iterator can be invalidated.
+///
+/// This requires a very exact pattern of use that is only really suitable to
+/// range based for loops and other range algorithms that explicitly guarantee
+/// to dereference exactly once each element, and to increment exactly once each
+/// element.
+template <typename RangeT>
+iterator_range<early_inc_iterator_impl<detail::IterOfRange<RangeT>>>
+make_early_inc_range(RangeT &&Range) {
+ using EarlyIncIteratorT =
+ early_inc_iterator_impl<detail::IterOfRange<RangeT>>;
+ return make_range(EarlyIncIteratorT(std::begin(std::forward<RangeT>(Range))),
+ EarlyIncIteratorT(std::end(std::forward<RangeT>(Range))));
+}
+
+// forward declarations required by zip_shortest/zip_first/zip_longest
+template <typename R, typename UnaryPredicate>
+bool all_of(R &&range, UnaryPredicate P);
+template <typename R, typename UnaryPredicate>
+bool any_of(R &&range, UnaryPredicate P);
+
+namespace detail {
+
+using std::declval;
+
+// We have to alias this since inlining the actual type at the usage site
+// in the parameter list of iterator_facade_base<> below ICEs MSVC 2017.
+template<typename... Iters> struct ZipTupleType {
+ using type = std::tuple<decltype(*declval<Iters>())...>;
+};
+
+template <typename ZipType, typename... Iters>
+using zip_traits = iterator_facade_base<
+ ZipType, typename std::common_type<std::bidirectional_iterator_tag,
+ typename std::iterator_traits<
+ Iters>::iterator_category...>::type,
+ // ^ TODO: Implement random access methods.
+ typename ZipTupleType<Iters...>::type,
+ typename std::iterator_traits<typename std::tuple_element<
+ 0, std::tuple<Iters...>>::type>::difference_type,
+ // ^ FIXME: This follows boost::make_zip_iterator's assumption that all
+ // inner iterators have the same difference_type. It would fail if, for
+ // instance, the second field's difference_type were non-numeric while the
+ // first is.
+ typename ZipTupleType<Iters...>::type *,
+ typename ZipTupleType<Iters...>::type>;
+
+template <typename ZipType, typename... Iters>
+struct zip_common : public zip_traits<ZipType, Iters...> {
+ using Base = zip_traits<ZipType, Iters...>;
+ using value_type = typename Base::value_type;
+
+ std::tuple<Iters...> iterators;
+
+protected:
+ template <size_t... Ns> value_type deref(std::index_sequence<Ns...>) const {
+ return value_type(*std::get<Ns>(iterators)...);
+ }
+
+ template <size_t... Ns>
+ decltype(iterators) tup_inc(std::index_sequence<Ns...>) const {
+ return std::tuple<Iters...>(std::next(std::get<Ns>(iterators))...);
+ }
+
+ template <size_t... Ns>
+ decltype(iterators) tup_dec(std::index_sequence<Ns...>) const {
+ return std::tuple<Iters...>(std::prev(std::get<Ns>(iterators))...);
+ }
+
+ template <size_t... Ns>
+ bool test_all_equals(const zip_common &other,
+ std::index_sequence<Ns...>) const {
+ return all_of(std::initializer_list<bool>{std::get<Ns>(this->iterators) ==
+ std::get<Ns>(other.iterators)...},
+ identity<bool>{});
+ }
+
+public:
+ zip_common(Iters &&... ts) : iterators(std::forward<Iters>(ts)...) {}
+
+ value_type operator*() const {
+ return deref(std::index_sequence_for<Iters...>{});
+ }
+
+ ZipType &operator++() {
+ iterators = tup_inc(std::index_sequence_for<Iters...>{});
+ return *reinterpret_cast<ZipType *>(this);
+ }
+
+ ZipType &operator--() {
+ static_assert(Base::IsBidirectional,
+ "All inner iterators must be at least bidirectional.");
+ iterators = tup_dec(std::index_sequence_for<Iters...>{});
+ return *reinterpret_cast<ZipType *>(this);
+ }
+
+ /// Return true if all the iterator are matching `other`'s iterators.
+ bool all_equals(zip_common &other) {
+ return test_all_equals(other, std::index_sequence_for<Iters...>{});
+ }
+};
+
+template <typename... Iters>
+struct zip_first : public zip_common<zip_first<Iters...>, Iters...> {
+ using Base = zip_common<zip_first<Iters...>, Iters...>;
+
+ bool operator==(const zip_first<Iters...> &other) const {
+ return std::get<0>(this->iterators) == std::get<0>(other.iterators);
+ }
+
+ zip_first(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {}
+};
+
+template <typename... Iters>
+class zip_shortest : public zip_common<zip_shortest<Iters...>, Iters...> {
+ template <size_t... Ns>
+ bool test(const zip_shortest<Iters...> &other,
+ std::index_sequence<Ns...>) const {
+ return all_of(std::initializer_list<bool>{std::get<Ns>(this->iterators) !=
+ std::get<Ns>(other.iterators)...},
+ identity<bool>{});
+ }
+
+public:
+ using Base = zip_common<zip_shortest<Iters...>, Iters...>;
+
+ zip_shortest(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {}
+
+ bool operator==(const zip_shortest<Iters...> &other) const {
+ return !test(other, std::index_sequence_for<Iters...>{});
+ }
+};
+
+template <template <typename...> class ItType, typename... Args> class zippy {
+public:
+ using iterator = ItType<decltype(std::begin(std::declval<Args>()))...>;
+ using iterator_category = typename iterator::iterator_category;
+ using value_type = typename iterator::value_type;
+ using difference_type = typename iterator::difference_type;
+ using pointer = typename iterator::pointer;
+ using reference = typename iterator::reference;
+
+private:
+ std::tuple<Args...> ts;
+
+ template <size_t... Ns>
+ iterator begin_impl(std::index_sequence<Ns...>) const {
+ return iterator(std::begin(std::get<Ns>(ts))...);
+ }
+ template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
+ return iterator(std::end(std::get<Ns>(ts))...);
+ }
+
+public:
+ zippy(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}
+
+ iterator begin() const {
+ return begin_impl(std::index_sequence_for<Args...>{});
+ }
+ iterator end() const { return end_impl(std::index_sequence_for<Args...>{}); }
+};
+
+} // end namespace detail
+
+/// zip iterator for two or more iteratable types.
+template <typename T, typename U, typename... Args>
+detail::zippy<detail::zip_shortest, T, U, Args...> zip(T &&t, U &&u,
+ Args &&... args) {
+ return detail::zippy<detail::zip_shortest, T, U, Args...>(
+ std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
+}
+
+/// zip iterator that, for the sake of efficiency, assumes the first iteratee to
+/// be the shortest.
+template <typename T, typename U, typename... Args>
+detail::zippy<detail::zip_first, T, U, Args...> zip_first(T &&t, U &&u,
+ Args &&... args) {
+ return detail::zippy<detail::zip_first, T, U, Args...>(
+ std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
+}
+
+namespace detail {
+template <typename Iter>
+Iter next_or_end(const Iter &I, const Iter &End) {
+ if (I == End)
+ return End;
+ return std::next(I);
+}
+
+template <typename Iter>
+auto deref_or_none(const Iter &I, const Iter &End) -> llvm::Optional<
+ std::remove_const_t<std::remove_reference_t<decltype(*I)>>> {
+ if (I == End)
+ return None;
+ return *I;
+}
+
+template <typename Iter> struct ZipLongestItemType {
+ using type =
+ llvm::Optional<typename std::remove_const<typename std::remove_reference<
+ decltype(*std::declval<Iter>())>::type>::type>;
+};
+
+template <typename... Iters> struct ZipLongestTupleType {
+ using type = std::tuple<typename ZipLongestItemType<Iters>::type...>;
+};
+
+template <typename... Iters>
+class zip_longest_iterator
+ : public iterator_facade_base<
+ zip_longest_iterator<Iters...>,
+ typename std::common_type<
+ std::forward_iterator_tag,
+ typename std::iterator_traits<Iters>::iterator_category...>::type,
+ typename ZipLongestTupleType<Iters...>::type,
+ typename std::iterator_traits<typename std::tuple_element<
+ 0, std::tuple<Iters...>>::type>::difference_type,
+ typename ZipLongestTupleType<Iters...>::type *,
+ typename ZipLongestTupleType<Iters...>::type> {
+public:
+ using value_type = typename ZipLongestTupleType<Iters...>::type;
+
+private:
+ std::tuple<Iters...> iterators;
+ std::tuple<Iters...> end_iterators;
+
+ template <size_t... Ns>
+ bool test(const zip_longest_iterator<Iters...> &other,
+ std::index_sequence<Ns...>) const {
+ return llvm::any_of(
+ std::initializer_list<bool>{std::get<Ns>(this->iterators) !=
+ std::get<Ns>(other.iterators)...},
+ identity<bool>{});
+ }
+
+ template <size_t... Ns> value_type deref(std::index_sequence<Ns...>) const {
+ return value_type(
+ deref_or_none(std::get<Ns>(iterators), std::get<Ns>(end_iterators))...);
+ }
+
+ template <size_t... Ns>
+ decltype(iterators) tup_inc(std::index_sequence<Ns...>) const {
+ return std::tuple<Iters...>(
+ next_or_end(std::get<Ns>(iterators), std::get<Ns>(end_iterators))...);
+ }
+
+public:
+ zip_longest_iterator(std::pair<Iters &&, Iters &&>... ts)
+ : iterators(std::forward<Iters>(ts.first)...),
+ end_iterators(std::forward<Iters>(ts.second)...) {}
+
+ value_type operator*() const {
+ return deref(std::index_sequence_for<Iters...>{});
+ }
+
+ zip_longest_iterator<Iters...> &operator++() {
+ iterators = tup_inc(std::index_sequence_for<Iters...>{});
+ return *this;
+ }
+
+ bool operator==(const zip_longest_iterator<Iters...> &other) const {
+ return !test(other, std::index_sequence_for<Iters...>{});
+ }
+};
+
+template <typename... Args> class zip_longest_range {
+public:
+ using iterator =
+ zip_longest_iterator<decltype(adl_begin(std::declval<Args>()))...>;
+ using iterator_category = typename iterator::iterator_category;
+ using value_type = typename iterator::value_type;
+ using difference_type = typename iterator::difference_type;
+ using pointer = typename iterator::pointer;
+ using reference = typename iterator::reference;
+
+private:
+ std::tuple<Args...> ts;
+
+ template <size_t... Ns>
+ iterator begin_impl(std::index_sequence<Ns...>) const {
+ return iterator(std::make_pair(adl_begin(std::get<Ns>(ts)),
+ adl_end(std::get<Ns>(ts)))...);
+ }
+
+ template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
+ return iterator(std::make_pair(adl_end(std::get<Ns>(ts)),
+ adl_end(std::get<Ns>(ts)))...);
+ }
+
+public:
+ zip_longest_range(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}
+
+ iterator begin() const {
+ return begin_impl(std::index_sequence_for<Args...>{});
+ }
+ iterator end() const { return end_impl(std::index_sequence_for<Args...>{}); }
+};
+} // namespace detail
+
+/// Iterate over two or more iterators at the same time. Iteration continues
+/// until all iterators reach the end. The llvm::Optional only contains a value
+/// if the iterator has not reached the end.
+template <typename T, typename U, typename... Args>
+detail::zip_longest_range<T, U, Args...> zip_longest(T &&t, U &&u,
+ Args &&... args) {
+ return detail::zip_longest_range<T, U, Args...>(
+ std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
+}
+
+/// Iterator wrapper that concatenates sequences together.
+///
+/// This can concatenate different iterators, even with different types, into
+/// a single iterator provided the value types of all the concatenated
+/// iterators expose `reference` and `pointer` types that can be converted to
+/// `ValueT &` and `ValueT *` respectively. It doesn't support more
+/// interesting/customized pointer or reference types.
+///
+/// Currently this only supports forward or higher iterator categories as
+/// inputs and always exposes a forward iterator interface.
+template <typename ValueT, typename... IterTs>
+class concat_iterator
+ : public iterator_facade_base<concat_iterator<ValueT, IterTs...>,
+ std::forward_iterator_tag, ValueT> {
+ using BaseT = typename concat_iterator::iterator_facade_base;
+
+ /// We store both the current and end iterators for each concatenated
+ /// sequence in a tuple of pairs.
+ ///
+ /// Note that something like iterator_range seems nice at first here, but the
+ /// range properties are of little benefit and end up getting in the way
+ /// because we need to do mutation on the current iterators.
+ std::tuple<IterTs...> Begins;
+ std::tuple<IterTs...> Ends;
+
+ /// Attempts to increment a specific iterator.
+ ///
+ /// Returns true if it was able to increment the iterator. Returns false if
+ /// the iterator is already at the end iterator.
+ template <size_t Index> bool incrementHelper() {
+ auto &Begin = std::get<Index>(Begins);
+ auto &End = std::get<Index>(Ends);
+ if (Begin == End)
+ return false;
+
+ ++Begin;
+ return true;
+ }
+
+ /// Increments the first non-end iterator.
+ ///
+ /// It is an error to call this with all iterators at the end.
+ template <size_t... Ns> void increment(std::index_sequence<Ns...>) {
+ // Build a sequence of functions to increment each iterator if possible.
+ bool (concat_iterator::*IncrementHelperFns[])() = {
+ &concat_iterator::incrementHelper<Ns>...};
+
+ // Loop over them, and stop as soon as we succeed at incrementing one.
+ for (auto &IncrementHelperFn : IncrementHelperFns)
+ if ((this->*IncrementHelperFn)())
+ return;
+
+ llvm_unreachable("Attempted to increment an end concat iterator!");
+ }
+
+ /// Returns null if the specified iterator is at the end. Otherwise,
+ /// dereferences the iterator and returns the address of the resulting
+ /// reference.
+ template <size_t Index> ValueT *getHelper() const {
+ auto &Begin = std::get<Index>(Begins);
+ auto &End = std::get<Index>(Ends);
+ if (Begin == End)
+ return nullptr;
+
+ return &*Begin;
+ }
+
+ /// Finds the first non-end iterator, dereferences, and returns the resulting
+ /// reference.
+ ///
+ /// It is an error to call this with all iterators at the end.
+ template <size_t... Ns> ValueT &get(std::index_sequence<Ns...>) const {
+ // Build a sequence of functions to get from iterator if possible.
+ ValueT *(concat_iterator::*GetHelperFns[])() const = {
+ &concat_iterator::getHelper<Ns>...};
+
+ // Loop over them, and return the first result we find.
+ for (auto &GetHelperFn : GetHelperFns)
+ if (ValueT *P = (this->*GetHelperFn)())
+ return *P;
+
+ llvm_unreachable("Attempted to get a pointer from an end concat iterator!");
+ }
+
+public:
+ /// Constructs an iterator from a sequence of ranges.
+ ///
+ /// We need the full range to know how to switch between each of the
+ /// iterators.
+ template <typename... RangeTs>
+ explicit concat_iterator(RangeTs &&... Ranges)
+ : Begins(std::begin(Ranges)...), Ends(std::end(Ranges)...) {}
+
+ using BaseT::operator++;
+
+ concat_iterator &operator++() {
+ increment(std::index_sequence_for<IterTs...>());
+ return *this;
+ }
+
+ ValueT &operator*() const {
+ return get(std::index_sequence_for<IterTs...>());
+ }
+
+ bool operator==(const concat_iterator &RHS) const {
+ return Begins == RHS.Begins && Ends == RHS.Ends;
+ }
+};
+
+namespace detail {
+
+/// Helper to store a sequence of ranges being concatenated and access them.
+///
+/// This is designed to facilitate providing actual storage when temporaries
+/// are passed into the constructor such that we can use it as part of range
+/// based for loops.
+template <typename ValueT, typename... RangeTs> class concat_range {
+public:
+ using iterator =
+ concat_iterator<ValueT,
+ decltype(std::begin(std::declval<RangeTs &>()))...>;
+
+private:
+ std::tuple<RangeTs...> Ranges;
+
+ template <size_t... Ns>
+ iterator begin_impl(std::index_sequence<Ns...>) {
+ return iterator(std::get<Ns>(Ranges)...);
+ }
+ template <size_t... Ns>
+ iterator begin_impl(std::index_sequence<Ns...>) const {
+ return iterator(std::get<Ns>(Ranges)...);
+ }
+ template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) {
+ return iterator(make_range(std::end(std::get<Ns>(Ranges)),
+ std::end(std::get<Ns>(Ranges)))...);
+ }
+ template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
+ return iterator(make_range(std::end(std::get<Ns>(Ranges)),
+ std::end(std::get<Ns>(Ranges)))...);
+ }
+
+public:
+ concat_range(RangeTs &&... Ranges)
+ : Ranges(std::forward<RangeTs>(Ranges)...) {}
+
+ iterator begin() {
+ return begin_impl(std::index_sequence_for<RangeTs...>{});
+ }
+ iterator begin() const {
+ return begin_impl(std::index_sequence_for<RangeTs...>{});
+ }
+ iterator end() {
+ return end_impl(std::index_sequence_for<RangeTs...>{});
+ }
+ iterator end() const {
+ return end_impl(std::index_sequence_for<RangeTs...>{});
+ }
+};
+
+} // end namespace detail
+
+/// Concatenated range across two or more ranges.
+///
+/// The desired value type must be explicitly specified.
+template <typename ValueT, typename... RangeTs>
+detail::concat_range<ValueT, RangeTs...> concat(RangeTs &&... Ranges) {
+ static_assert(sizeof...(RangeTs) > 1,
+ "Need more than one range to concatenate!");
+ return detail::concat_range<ValueT, RangeTs...>(
+ std::forward<RangeTs>(Ranges)...);
+}
+
+/// A utility class used to implement an iterator that contains some base object
+/// and an index. The iterator moves the index but keeps the base constant.
+template <typename DerivedT, typename BaseT, typename T,
+ typename PointerT = T *, typename ReferenceT = T &>
+class indexed_accessor_iterator
+ : public llvm::iterator_facade_base<DerivedT,
+ std::random_access_iterator_tag, T,
+ std::ptrdiff_t, PointerT, ReferenceT> {
+public:
+ ptrdiff_t operator-(const indexed_accessor_iterator &rhs) const {
+ assert(base == rhs.base && "incompatible iterators");
+ return index - rhs.index;
+ }
+ bool operator==(const indexed_accessor_iterator &rhs) const {
+ return base == rhs.base && index == rhs.index;
+ }
+ bool operator<(const indexed_accessor_iterator &rhs) const {
+ assert(base == rhs.base && "incompatible iterators");
+ return index < rhs.index;
+ }
+
+ DerivedT &operator+=(ptrdiff_t offset) {
+ this->index += offset;
+ return static_cast<DerivedT &>(*this);
+ }
+ DerivedT &operator-=(ptrdiff_t offset) {
+ this->index -= offset;
+ return static_cast<DerivedT &>(*this);
+ }
+
+ /// Returns the current index of the iterator.
+ ptrdiff_t getIndex() const { return index; }
+
+ /// Returns the current base of the iterator.
+ const BaseT &getBase() const { return base; }
+
+protected:
+ indexed_accessor_iterator(BaseT base, ptrdiff_t index)
+ : base(base), index(index) {}
+ BaseT base;
+ ptrdiff_t index;
+};
+
+namespace detail {
+/// The class represents the base of a range of indexed_accessor_iterators. It
+/// provides support for many different range functionalities, e.g.
+/// drop_front/slice/etc.. Derived range classes must implement the following
+/// static methods:
+/// * ReferenceT dereference_iterator(const BaseT &base, ptrdiff_t index)
+/// - Dereference an iterator pointing to the base object at the given
+/// index.
+/// * BaseT offset_base(const BaseT &base, ptrdiff_t index)
+/// - Return a new base that is offset from the provide base by 'index'
+/// elements.
+template <typename DerivedT, typename BaseT, typename T,
+ typename PointerT = T *, typename ReferenceT = T &>
+class indexed_accessor_range_base {
+public:
+ using RangeBaseT = indexed_accessor_range_base;
+
+ /// An iterator element of this range.
+ class iterator : public indexed_accessor_iterator<iterator, BaseT, T,
+ PointerT, ReferenceT> {
+ public:
+ // Index into this iterator, invoking a static method on the derived type.
+ ReferenceT operator*() const {
+ return DerivedT::dereference_iterator(this->getBase(), this->getIndex());
+ }
+
+ private:
+ iterator(BaseT owner, ptrdiff_t curIndex)
+ : iterator::indexed_accessor_iterator(owner, curIndex) {}
+
+ /// Allow access to the constructor.
+ friend indexed_accessor_range_base<DerivedT, BaseT, T, PointerT,
+ ReferenceT>;
+ };
+
+ indexed_accessor_range_base(iterator begin, iterator end)
+ : base(offset_base(begin.getBase(), begin.getIndex())),
+ count(end.getIndex() - begin.getIndex()) {}
+ indexed_accessor_range_base(const iterator_range<iterator> &range)
+ : indexed_accessor_range_base(range.begin(), range.end()) {}
+ indexed_accessor_range_base(BaseT base, ptrdiff_t count)
+ : base(base), count(count) {}
+
+ iterator begin() const { return iterator(base, 0); }
+ iterator end() const { return iterator(base, count); }
+ ReferenceT operator[](size_t Index) const {
+ assert(Index < size() && "invalid index for value range");
+ return DerivedT::dereference_iterator(base, static_cast<ptrdiff_t>(Index));
+ }
+ ReferenceT front() const {
+ assert(!empty() && "expected non-empty range");
+ return (*this)[0];
+ }
+ ReferenceT back() const {
+ assert(!empty() && "expected non-empty range");
+ return (*this)[size() - 1];
+ }
+
+ /// Compare this range with another.
+ template <typename OtherT> bool operator==(const OtherT &other) const {
+ return size() ==
+ static_cast<size_t>(std::distance(other.begin(), other.end())) &&
+ std::equal(begin(), end(), other.begin());
+ }
+ template <typename OtherT> bool operator!=(const OtherT &other) const {
+ return !(*this == other);
+ }
+
+ /// Return the size of this range.
+ size_t size() const { return count; }
+
+ /// Return if the range is empty.
+ bool empty() const { return size() == 0; }
+
+ /// Drop the first N elements, and keep M elements.
+ DerivedT slice(size_t n, size_t m) const {
+ assert(n + m <= size() && "invalid size specifiers");
+ return DerivedT(offset_base(base, n), m);
+ }
+
+ /// Drop the first n elements.
+ DerivedT drop_front(size_t n = 1) const {
+ assert(size() >= n && "Dropping more elements than exist");
+ return slice(n, size() - n);
+ }
+ /// Drop the last n elements.
+ DerivedT drop_back(size_t n = 1) const {
+ assert(size() >= n && "Dropping more elements than exist");
+ return DerivedT(base, size() - n);
+ }
+
+ /// Take the first n elements.
+ DerivedT take_front(size_t n = 1) const {
+ return n < size() ? drop_back(size() - n)
+ : static_cast<const DerivedT &>(*this);
+ }
+
+ /// Take the last n elements.
+ DerivedT take_back(size_t n = 1) const {
+ return n < size() ? drop_front(size() - n)
+ : static_cast<const DerivedT &>(*this);
+ }
+
+ /// Allow conversion to any type accepting an iterator_range.
+ template <typename RangeT, typename = std::enable_if_t<std::is_constructible<
+ RangeT, iterator_range<iterator>>::value>>
+ operator RangeT() const {
+ return RangeT(iterator_range<iterator>(*this));
+ }
+
+ /// Returns the base of this range.
+ const BaseT &getBase() const { return base; }
+
+private:
+ /// Offset the given base by the given amount.
+ static BaseT offset_base(const BaseT &base, size_t n) {
+ return n == 0 ? base : DerivedT::offset_base(base, n);
+ }
+
+protected:
+ indexed_accessor_range_base(const indexed_accessor_range_base &) = default;
+ indexed_accessor_range_base(indexed_accessor_range_base &&) = default;
+ indexed_accessor_range_base &
+ operator=(const indexed_accessor_range_base &) = default;
+
+ /// The base that owns the provided range of values.
+ BaseT base;
+ /// The size from the owning range.
+ ptrdiff_t count;
+};
+} // end namespace detail
+
+/// This class provides an implementation of a range of
+/// indexed_accessor_iterators where the base is not indexable. Ranges with
+/// bases that are offsetable should derive from indexed_accessor_range_base
+/// instead. Derived range classes are expected to implement the following
+/// static method:
+/// * ReferenceT dereference(const BaseT &base, ptrdiff_t index)
+/// - Dereference an iterator pointing to a parent base at the given index.
+template <typename DerivedT, typename BaseT, typename T,
+ typename PointerT = T *, typename ReferenceT = T &>
+class indexed_accessor_range
+ : public detail::indexed_accessor_range_base<
+ DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, ReferenceT> {
+public:
+ indexed_accessor_range(BaseT base, ptrdiff_t startIndex, ptrdiff_t count)
+ : detail::indexed_accessor_range_base<
+ DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, ReferenceT>(
+ std::make_pair(base, startIndex), count) {}
+ using detail::indexed_accessor_range_base<
+ DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT,
+ ReferenceT>::indexed_accessor_range_base;
+
+ /// Returns the current base of the range.
+ const BaseT &getBase() const { return this->base.first; }
+
+ /// Returns the current start index of the range.
+ ptrdiff_t getStartIndex() const { return this->base.second; }
+
+ /// See `detail::indexed_accessor_range_base` for details.
+ static std::pair<BaseT, ptrdiff_t>
+ offset_base(const std::pair<BaseT, ptrdiff_t> &base, ptrdiff_t index) {
+ // We encode the internal base as a pair of the derived base and a start
+ // index into the derived base.
+ return std::make_pair(base.first, base.second + index);
+ }
+ /// See `detail::indexed_accessor_range_base` for details.
+ static ReferenceT
+ dereference_iterator(const std::pair<BaseT, ptrdiff_t> &base,
+ ptrdiff_t index) {
+ return DerivedT::dereference(base.first, base.second + index);
+ }
+};
+
+namespace detail {
+/// Return a reference to the first or second member of a reference. Otherwise,
+/// return a copy of the member of a temporary.
+///
+/// When passing a range whose iterators return values instead of references,
+/// the reference must be dropped from `decltype((elt.first))`, which will
+/// always be a reference, to avoid returning a reference to a temporary.
+template <typename EltTy, typename FirstTy> class first_or_second_type {
+public:
+ using type =
+ typename std::conditional_t<std::is_reference<EltTy>::value, FirstTy,
+ std::remove_reference_t<FirstTy>>;
+};
+} // end namespace detail
+
+/// Given a container of pairs, return a range over the first elements.
+template <typename ContainerTy> auto make_first_range(ContainerTy &&c) {
+ using EltTy = decltype((*std::begin(c)));
+ return llvm::map_range(std::forward<ContainerTy>(c),
+ [](EltTy elt) -> typename detail::first_or_second_type<
+ EltTy, decltype((elt.first))>::type {
+ return elt.first;
+ });
+}
+
+/// Given a container of pairs, return a range over the second elements.
+template <typename ContainerTy> auto make_second_range(ContainerTy &&c) {
+ return llvm::map_range(
+ std::forward<ContainerTy>(c),
+ [](decltype((*std::begin(c))) elt) -> decltype((elt.second)) {
+ return elt.second;
+ });
+}
+
+//===----------------------------------------------------------------------===//
+// Extra additions to <utility>
+//===----------------------------------------------------------------------===//
+
+/// Function object to check whether the first component of a std::pair
+/// compares less than the first component of another std::pair.
+struct less_first {
+ template <typename T> bool operator()(const T &lhs, const T &rhs) const {
+ return std::less<>()(lhs.first, rhs.first);
+ }
+};
+
+/// Function object to check whether the second component of a std::pair
+/// compares less than the second component of another std::pair.
+struct less_second {
+ template <typename T> bool operator()(const T &lhs, const T &rhs) const {
+ return std::less<>()(lhs.second, rhs.second);
+ }
+};
+
+/// \brief Function object to apply a binary function to the first component of
+/// a std::pair.
+template<typename FuncTy>
+struct on_first {
+ FuncTy func;
+
+ template <typename T>
+ decltype(auto) operator()(const T &lhs, const T &rhs) const {
+ return func(lhs.first, rhs.first);
+ }
+};
+
+/// Utility type to build an inheritance chain that makes it easy to rank
+/// overload candidates.
+template <int N> struct rank : rank<N - 1> {};
+template <> struct rank<0> {};
+
+/// traits class for checking whether type T is one of any of the given
+/// types in the variadic list.
+template <typename T, typename... Ts>
+using is_one_of = disjunction<std::is_same<T, Ts>...>;
+
+/// traits class for checking whether type T is a base class for all
+/// the given types in the variadic list.
+template <typename T, typename... Ts>
+using are_base_of = conjunction<std::is_base_of<T, Ts>...>;
+
+namespace detail {
+template <typename... Ts> struct Visitor;
+
+template <typename HeadT, typename... TailTs>
+struct Visitor<HeadT, TailTs...> : remove_cvref_t<HeadT>, Visitor<TailTs...> {
+ explicit constexpr Visitor(HeadT &&Head, TailTs &&...Tail)
+ : remove_cvref_t<HeadT>(std::forward<HeadT>(Head)),
+ Visitor<TailTs...>(std::forward<TailTs>(Tail)...) {}
+ using remove_cvref_t<HeadT>::operator();
+ using Visitor<TailTs...>::operator();
+};
+
+template <typename HeadT> struct Visitor<HeadT> : remove_cvref_t<HeadT> {
+ explicit constexpr Visitor(HeadT &&Head)
+ : remove_cvref_t<HeadT>(std::forward<HeadT>(Head)) {}
+ using remove_cvref_t<HeadT>::operator();
+};
+} // namespace detail
+
+/// Returns an opaquely-typed Callable object whose operator() overload set is
+/// the sum of the operator() overload sets of each CallableT in CallableTs.
+///
+/// The type of the returned object derives from each CallableT in CallableTs.
+/// The returned object is constructed by invoking the appropriate copy or move
+/// constructor of each CallableT, as selected by overload resolution on the
+/// corresponding argument to makeVisitor.
+///
+/// Example:
+///
+/// \code
+/// auto visitor = makeVisitor([](auto) { return "unhandled type"; },
+/// [](int i) { return "int"; },
+/// [](std::string s) { return "str"; });
+/// auto a = visitor(42); // `a` is now "int".
+/// auto b = visitor("foo"); // `b` is now "str".
+/// auto c = visitor(3.14f); // `c` is now "unhandled type".
+/// \endcode
+///
+/// Example of making a visitor with a lambda which captures a move-only type:
+///
+/// \code
+/// std::unique_ptr<FooHandler> FH = /* ... */;
+/// auto visitor = makeVisitor(
+/// [FH{std::move(FH)}](Foo F) { return FH->handle(F); },
+/// [](int i) { return i; },
+/// [](std::string s) { return atoi(s); });
+/// \endcode
+template <typename... CallableTs>
+constexpr decltype(auto) makeVisitor(CallableTs &&...Callables) {
+ return detail::Visitor<CallableTs...>(std::forward<CallableTs>(Callables)...);
+}
+
+//===----------------------------------------------------------------------===//
+// Extra additions to <algorithm>
+//===----------------------------------------------------------------------===//
+
+// We have a copy here so that LLVM behaves the same when using different
+// standard libraries.
+template <class Iterator, class RNG>
+void shuffle(Iterator first, Iterator last, RNG &&g) {
+ // It would be better to use a std::uniform_int_distribution,
+ // but that would be stdlib dependent.
+ typedef
+ typename std::iterator_traits<Iterator>::difference_type difference_type;
+ for (auto size = last - first; size > 1; ++first, (void)--size) {
+ difference_type offset = g() % size;
+ // Avoid self-assignment due to incorrect assertions in libstdc++
+ // containers (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85828).
+ if (offset != difference_type(0))
+ std::iter_swap(first, first + offset);
+ }
+}
+
+/// Adapt std::less<T> for array_pod_sort.
+template<typename T>
+inline int array_pod_sort_comparator(const void *P1, const void *P2) {
+ if (std::less<T>()(*reinterpret_cast<const T*>(P1),
+ *reinterpret_cast<const T*>(P2)))
+ return -1;
+ if (std::less<T>()(*reinterpret_cast<const T*>(P2),
+ *reinterpret_cast<const T*>(P1)))
+ return 1;
+ return 0;
+}
+
+/// get_array_pod_sort_comparator - This is an internal helper function used to
+/// get type deduction of T right.
+template<typename T>
+inline int (*get_array_pod_sort_comparator(const T &))
+ (const void*, const void*) {
+ return array_pod_sort_comparator<T>;
+}
+
+#ifdef EXPENSIVE_CHECKS
+namespace detail {
+
+inline unsigned presortShuffleEntropy() {
+ static unsigned Result(std::random_device{}());
+ return Result;
+}
+
+template <class IteratorTy>
+inline void presortShuffle(IteratorTy Start, IteratorTy End) {
+ std::mt19937 Generator(presortShuffleEntropy());
+ llvm::shuffle(Start, End, Generator);
+}
+
+} // end namespace detail
+#endif
+
+/// array_pod_sort - This sorts an array with the specified start and end
+/// extent. This is just like std::sort, except that it calls qsort instead of
+/// using an inlined template. qsort is slightly slower than std::sort, but
+/// most sorts are not performance critical in LLVM and std::sort has to be
+/// template instantiated for each type, leading to significant measured code
+/// bloat. This function should generally be used instead of std::sort where
+/// possible.
+///
+/// This function assumes that you have simple POD-like types that can be
+/// compared with std::less and can be moved with memcpy. If this isn't true,
+/// you should use std::sort.
+///
+/// NOTE: If qsort_r were portable, we could allow a custom comparator and
+/// default to std::less.
+template<class IteratorTy>
+inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
+ // Don't inefficiently call qsort with one element or trigger undefined
+ // behavior with an empty sequence.
+ auto NElts = End - Start;
+ if (NElts <= 1) return;
+#ifdef EXPENSIVE_CHECKS
+ detail::presortShuffle<IteratorTy>(Start, End);
+#endif
+ qsort(&*Start, NElts, sizeof(*Start), get_array_pod_sort_comparator(*Start));
+}
+
+template <class IteratorTy>
+inline void array_pod_sort(
+ IteratorTy Start, IteratorTy End,
+ int (*Compare)(
+ const typename std::iterator_traits<IteratorTy>::value_type *,
+ const typename std::iterator_traits<IteratorTy>::value_type *)) {
+ // Don't inefficiently call qsort with one element or trigger undefined
+ // behavior with an empty sequence.
+ auto NElts = End - Start;
+ if (NElts <= 1) return;
+#ifdef EXPENSIVE_CHECKS
+ detail::presortShuffle<IteratorTy>(Start, End);
+#endif
+ qsort(&*Start, NElts, sizeof(*Start),
+ reinterpret_cast<int (*)(const void *, const void *)>(Compare));
+}
+
+namespace detail {
+template <typename T>
+// We can use qsort if the iterator type is a pointer and the underlying value
+// is trivially copyable.
+using sort_trivially_copyable = conjunction<
+ std::is_pointer<T>,
+ std::is_trivially_copyable<typename std::iterator_traits<T>::value_type>>;
+} // namespace detail
+
+// Provide wrappers to std::sort which shuffle the elements before sorting
+// to help uncover non-deterministic behavior (PR35135).
+template <typename IteratorTy,
+ std::enable_if_t<!detail::sort_trivially_copyable<IteratorTy>::value,
+ int> = 0>
+inline void sort(IteratorTy Start, IteratorTy End) {
+#ifdef EXPENSIVE_CHECKS
+ detail::presortShuffle<IteratorTy>(Start, End);
+#endif
+ std::sort(Start, End);
+}
+
+// Forward trivially copyable types to array_pod_sort. This avoids a large
+// amount of code bloat for a minor performance hit.
+template <typename IteratorTy,
+ std::enable_if_t<detail::sort_trivially_copyable<IteratorTy>::value,
+ int> = 0>
+inline void sort(IteratorTy Start, IteratorTy End) {
+ array_pod_sort(Start, End);
+}
+
+template <typename Container> inline void sort(Container &&C) {
+ llvm::sort(adl_begin(C), adl_end(C));
+}
+
+template <typename IteratorTy, typename Compare>
+inline void sort(IteratorTy Start, IteratorTy End, Compare Comp) {
+#ifdef EXPENSIVE_CHECKS
+ detail::presortShuffle<IteratorTy>(Start, End);
+#endif
+ std::sort(Start, End, Comp);
+}
+
+template <typename Container, typename Compare>
+inline void sort(Container &&C, Compare Comp) {
+ llvm::sort(adl_begin(C), adl_end(C), Comp);
+}
+
+/// Get the size of a range. This is a wrapper function around std::distance
+/// which is only enabled when the operation is O(1).
+template <typename R>
+auto size(R &&Range,
+ std::enable_if_t<
+ std::is_base_of<std::random_access_iterator_tag,
+ typename std::iterator_traits<decltype(
+ Range.begin())>::iterator_category>::value,
+ void> * = nullptr) {
+ return std::distance(Range.begin(), Range.end());
+}
+
+/// Provide wrappers to std::for_each which take ranges instead of having to
+/// pass begin/end explicitly.
+template <typename R, typename UnaryFunction>
+UnaryFunction for_each(R &&Range, UnaryFunction F) {
+ return std::for_each(adl_begin(Range), adl_end(Range), F);
+}
+
+/// Provide wrappers to std::all_of which take ranges instead of having to pass
+/// begin/end explicitly.
+template <typename R, typename UnaryPredicate>
+bool all_of(R &&Range, UnaryPredicate P) {
+ return std::all_of(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Provide wrappers to std::any_of which take ranges instead of having to pass
+/// begin/end explicitly.
+template <typename R, typename UnaryPredicate>
+bool any_of(R &&Range, UnaryPredicate P) {
+ return std::any_of(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Provide wrappers to std::none_of which take ranges instead of having to pass
+/// begin/end explicitly.
+template <typename R, typename UnaryPredicate>
+bool none_of(R &&Range, UnaryPredicate P) {
+ return std::none_of(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Provide wrappers to std::find which take ranges instead of having to pass
+/// begin/end explicitly.
+template <typename R, typename T> auto find(R &&Range, const T &Val) {
+ return std::find(adl_begin(Range), adl_end(Range), Val);
+}
+
+/// Provide wrappers to std::find_if which take ranges instead of having to pass
+/// begin/end explicitly.
+template <typename R, typename UnaryPredicate>
+auto find_if(R &&Range, UnaryPredicate P) {
+ return std::find_if(adl_begin(Range), adl_end(Range), P);
+}
+
+template <typename R, typename UnaryPredicate>
+auto find_if_not(R &&Range, UnaryPredicate P) {
+ return std::find_if_not(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Provide wrappers to std::remove_if which take ranges instead of having to
+/// pass begin/end explicitly.
+template <typename R, typename UnaryPredicate>
+auto remove_if(R &&Range, UnaryPredicate P) {
+ return std::remove_if(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Provide wrappers to std::copy_if which take ranges instead of having to
+/// pass begin/end explicitly.
+template <typename R, typename OutputIt, typename UnaryPredicate>
+OutputIt copy_if(R &&Range, OutputIt Out, UnaryPredicate P) {
+ return std::copy_if(adl_begin(Range), adl_end(Range), Out, P);
+}
+
+template <typename R, typename OutputIt>
+OutputIt copy(R &&Range, OutputIt Out) {
+ return std::copy(adl_begin(Range), adl_end(Range), Out);
+}
+
+/// Provide wrappers to std::move which take ranges instead of having to
+/// pass begin/end explicitly.
+template <typename R, typename OutputIt>
+OutputIt move(R &&Range, OutputIt Out) {
+ return std::move(adl_begin(Range), adl_end(Range), Out);
+}
+
+/// Wrapper function around std::find to detect if an element exists
+/// in a container.
+template <typename R, typename E>
+bool is_contained(R &&Range, const E &Element) {
+ return std::find(adl_begin(Range), adl_end(Range), Element) != adl_end(Range);
+}
+
+/// Wrapper function around std::is_sorted to check if elements in a range \p R
+/// are sorted with respect to a comparator \p C.
+template <typename R, typename Compare> bool is_sorted(R &&Range, Compare C) {
+ return std::is_sorted(adl_begin(Range), adl_end(Range), C);
+}
+
+/// Wrapper function around std::is_sorted to check if elements in a range \p R
+/// are sorted in non-descending order.
+template <typename R> bool is_sorted(R &&Range) {
+ return std::is_sorted(adl_begin(Range), adl_end(Range));
+}
+
+/// Wrapper function around std::count to count the number of times an element
+/// \p Element occurs in the given range \p Range.
+template <typename R, typename E> auto count(R &&Range, const E &Element) {
+ return std::count(adl_begin(Range), adl_end(Range), Element);
+}
+
+/// Wrapper function around std::count_if to count the number of times an
+/// element satisfying a given predicate occurs in a range.
+template <typename R, typename UnaryPredicate>
+auto count_if(R &&Range, UnaryPredicate P) {
+ return std::count_if(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Wrapper function around std::transform to apply a function to a range and
+/// store the result elsewhere.
+template <typename R, typename OutputIt, typename UnaryFunction>
+OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F) {
+ return std::transform(adl_begin(Range), adl_end(Range), d_first, F);
+}
+
+/// Provide wrappers to std::partition which take ranges instead of having to
+/// pass begin/end explicitly.
+template <typename R, typename UnaryPredicate>
+auto partition(R &&Range, UnaryPredicate P) {
+ return std::partition(adl_begin(Range), adl_end(Range), P);
+}
+
+/// Provide wrappers to std::lower_bound which take ranges instead of having to
+/// pass begin/end explicitly.
+template <typename R, typename T> auto lower_bound(R &&Range, T &&Value) {
+ return std::lower_bound(adl_begin(Range), adl_end(Range),
+ std::forward<T>(Value));
+}
+
+template <typename R, typename T, typename Compare>
+auto lower_bound(R &&Range, T &&Value, Compare C) {
+ return std::lower_bound(adl_begin(Range), adl_end(Range),
+ std::forward<T>(Value), C);
+}
+
+/// Provide wrappers to std::upper_bound which take ranges instead of having to
+/// pass begin/end explicitly.
+template <typename R, typename T> auto upper_bound(R &&Range, T &&Value) {
+ return std::upper_bound(adl_begin(Range), adl_end(Range),
+ std::forward<T>(Value));
+}
+
+template <typename R, typename T, typename Compare>
+auto upper_bound(R &&Range, T &&Value, Compare C) {
+ return std::upper_bound(adl_begin(Range), adl_end(Range),
+ std::forward<T>(Value), C);
+}
+
+template <typename R>
+void stable_sort(R &&Range) {
+ std::stable_sort(adl_begin(Range), adl_end(Range));
+}
+
+template <typename R, typename Compare>
+void stable_sort(R &&Range, Compare C) {
+ std::stable_sort(adl_begin(Range), adl_end(Range), C);
+}
+
+/// Binary search for the first iterator in a range where a predicate is false.
+/// Requires that C is always true below some limit, and always false above it.
+template <typename R, typename Predicate,
+ typename Val = decltype(*adl_begin(std::declval<R>()))>
+auto partition_point(R &&Range, Predicate P) {
+ return std::partition_point(adl_begin(Range), adl_end(Range), P);
+}
+
+template<typename Range, typename Predicate>
+auto unique(Range &&R, Predicate P) {
+ return std::unique(adl_begin(R), adl_end(R), P);
+}
+
+/// Wrapper function around std::equal to detect if pair-wise elements between
+/// two ranges are the same.
+template <typename L, typename R> bool equal(L &&LRange, R &&RRange) {
+ return std::equal(adl_begin(LRange), adl_end(LRange), adl_begin(RRange),
+ adl_end(RRange));
+}
+
+/// Wrapper function around std::equal to detect if all elements
+/// in a container are same.
+template <typename R>
+bool is_splat(R &&Range) {
+ size_t range_size = size(Range);
+ return range_size != 0 && (range_size == 1 ||
+ std::equal(adl_begin(Range) + 1, adl_end(Range), adl_begin(Range)));
+}
+
+/// Provide a container algorithm similar to C++ Library Fundamentals v2's
+/// `erase_if` which is equivalent to:
+///
+/// C.erase(remove_if(C, pred), C.end());
+///
+/// This version works for any container with an erase method call accepting
+/// two iterators.
+template <typename Container, typename UnaryPredicate>
+void erase_if(Container &C, UnaryPredicate P) {
+ C.erase(remove_if(C, P), C.end());
+}
+
+/// Wrapper function to remove a value from a container:
+///
+/// C.erase(remove(C.begin(), C.end(), V), C.end());
+template <typename Container, typename ValueType>
+void erase_value(Container &C, ValueType V) {
+ C.erase(std::remove(C.begin(), C.end(), V), C.end());
+}
+
+/// Wrapper function to append a range to a container.
+///
+/// C.insert(C.end(), R.begin(), R.end());
+template <typename Container, typename Range>
+inline void append_range(Container &C, Range &&R) {
+ C.insert(C.end(), R.begin(), R.end());
+}
+
+/// Given a sequence container Cont, replace the range [ContIt, ContEnd) with
+/// the range [ValIt, ValEnd) (which is not from the same container).
+template<typename Container, typename RandomAccessIterator>
+void replace(Container &Cont, typename Container::iterator ContIt,
+ typename Container::iterator ContEnd, RandomAccessIterator ValIt,
+ RandomAccessIterator ValEnd) {
+ while (true) {
+ if (ValIt == ValEnd) {
+ Cont.erase(ContIt, ContEnd);
+ return;
+ } else if (ContIt == ContEnd) {
+ Cont.insert(ContIt, ValIt, ValEnd);
+ return;
+ }
+ *ContIt++ = *ValIt++;
+ }
+}
+
+/// Given a sequence container Cont, replace the range [ContIt, ContEnd) with
+/// the range R.
+template<typename Container, typename Range = std::initializer_list<
+ typename Container::value_type>>
+void replace(Container &Cont, typename Container::iterator ContIt,
+ typename Container::iterator ContEnd, Range R) {
+ replace(Cont, ContIt, ContEnd, R.begin(), R.end());
+}
+
+/// An STL-style algorithm similar to std::for_each that applies a second
+/// functor between every pair of elements.
+///
+/// This provides the control flow logic to, for example, print a
+/// comma-separated list:
+/// \code
+/// interleave(names.begin(), names.end(),
+/// [&](StringRef name) { os << name; },
+/// [&] { os << ", "; });
+/// \endcode
+template <typename ForwardIterator, typename UnaryFunctor,
+ typename NullaryFunctor,
+ typename = typename std::enable_if<
+ !std::is_constructible<StringRef, UnaryFunctor>::value &&
+ !std::is_constructible<StringRef, NullaryFunctor>::value>::type>
+inline void interleave(ForwardIterator begin, ForwardIterator end,
+ UnaryFunctor each_fn, NullaryFunctor between_fn) {
+ if (begin == end)
+ return;
+ each_fn(*begin);
+ ++begin;
+ for (; begin != end; ++begin) {
+ between_fn();
+ each_fn(*begin);
+ }
+}
+
+template <typename Container, typename UnaryFunctor, typename NullaryFunctor,
+ typename = typename std::enable_if<
+ !std::is_constructible<StringRef, UnaryFunctor>::value &&
+ !std::is_constructible<StringRef, NullaryFunctor>::value>::type>
+inline void interleave(const Container &c, UnaryFunctor each_fn,
+ NullaryFunctor between_fn) {
+ interleave(c.begin(), c.end(), each_fn, between_fn);
+}
+
+/// Overload of interleave for the common case of string separator.
+template <typename Container, typename UnaryFunctor, typename StreamT,
+ typename T = detail::ValueOfRange<Container>>
+inline void interleave(const Container &c, StreamT &os, UnaryFunctor each_fn,
+ const StringRef &separator) {
+ interleave(c.begin(), c.end(), each_fn, [&] { os << separator; });
+}
+template <typename Container, typename StreamT,
+ typename T = detail::ValueOfRange<Container>>
+inline void interleave(const Container &c, StreamT &os,
+ const StringRef &separator) {
+ interleave(
+ c, os, [&](const T &a) { os << a; }, separator);
+}
+
+template <typename Container, typename UnaryFunctor, typename StreamT,
+ typename T = detail::ValueOfRange<Container>>
+inline void interleaveComma(const Container &c, StreamT &os,
+ UnaryFunctor each_fn) {
+ interleave(c, os, each_fn, ", ");
+}
+template <typename Container, typename StreamT,
+ typename T = detail::ValueOfRange<Container>>
+inline void interleaveComma(const Container &c, StreamT &os) {
+ interleaveComma(c, os, [&](const T &a) { os << a; });
+}
+
+//===----------------------------------------------------------------------===//
+// Extra additions to <memory>
+//===----------------------------------------------------------------------===//
+
+struct FreeDeleter {
+ void operator()(void* v) {
+ ::free(v);
+ }
+};
+
+template<typename First, typename Second>
+struct pair_hash {
+ size_t operator()(const std::pair<First, Second> &P) const {
+ return std::hash<First>()(P.first) * 31 + std::hash<Second>()(P.second);
+ }
+};
+
+/// Binary functor that adapts to any other binary functor after dereferencing
+/// operands.
+template <typename T> struct deref {
+ T func;
+
+ // Could be further improved to cope with non-derivable functors and
+ // non-binary functors (should be a variadic template member function
+ // operator()).
+ template <typename A, typename B> auto operator()(A &lhs, B &rhs) const {
+ assert(lhs);
+ assert(rhs);
+ return func(*lhs, *rhs);
+ }
+};
+
+namespace detail {
+
+template <typename R> class enumerator_iter;
+
+template <typename R> struct result_pair {
+ using value_reference =
+ typename std::iterator_traits<IterOfRange<R>>::reference;
+
+ friend class enumerator_iter<R>;
+
+ result_pair() = default;
+ result_pair(std::size_t Index, IterOfRange<R> Iter)
+ : Index(Index), Iter(Iter) {}
+
+ result_pair(const result_pair<R> &Other)
+ : Index(Other.Index), Iter(Other.Iter) {}
+ result_pair &operator=(const result_pair &Other) {
+ Index = Other.Index;
+ Iter = Other.Iter;
+ return *this;
+ }
+
+ std::size_t index() const { return Index; }
+ value_reference value() const { return *Iter; }
+
+private:
+ std::size_t Index = std::numeric_limits<std::size_t>::max();
+ IterOfRange<R> Iter;
+};
+
+template <typename R>
+class enumerator_iter
+ : public iterator_facade_base<enumerator_iter<R>, std::forward_iterator_tag,
+ const result_pair<R>> {
+ using result_type = result_pair<R>;
+
+public:
+ explicit enumerator_iter(IterOfRange<R> EndIter)
+ : Result(std::numeric_limits<size_t>::max(), EndIter) {}
+
+ enumerator_iter(std::size_t Index, IterOfRange<R> Iter)
+ : Result(Index, Iter) {}
+
+ const result_type &operator*() const { return Result; }
+
+ enumerator_iter &operator++() {
+ assert(Result.Index != std::numeric_limits<size_t>::max());
+ ++Result.Iter;
+ ++Result.Index;
+ return *this;
+ }
+
+ bool operator==(const enumerator_iter &RHS) const {
+ // Don't compare indices here, only iterators. It's possible for an end
+ // iterator to have different indices depending on whether it was created
+ // by calling std::end() versus incrementing a valid iterator.
+ return Result.Iter == RHS.Result.Iter;
+ }
+
+ enumerator_iter(const enumerator_iter &Other) : Result(Other.Result) {}
+ enumerator_iter &operator=(const enumerator_iter &Other) {
+ Result = Other.Result;
+ return *this;
+ }
+
+private:
+ result_type Result;
+};
+
+template <typename R> class enumerator {
+public:
+ explicit enumerator(R &&Range) : TheRange(std::forward<R>(Range)) {}
+
+ enumerator_iter<R> begin() {
+ return enumerator_iter<R>(0, std::begin(TheRange));
+ }
+ enumerator_iter<R> begin() const {
+ return enumerator_iter<R>(0, std::begin(TheRange));
+ }
+
+ enumerator_iter<R> end() {
+ return enumerator_iter<R>(std::end(TheRange));
+ }
+ enumerator_iter<R> end() const {
+ return enumerator_iter<R>(std::end(TheRange));
+ }
+
+private:
+ R TheRange;
+};
+
+} // end namespace detail
+
+/// Given an input range, returns a new range whose values are are pair (A,B)
+/// such that A is the 0-based index of the item in the sequence, and B is
+/// the value from the original sequence. Example:
+///
+/// std::vector<char> Items = {'A', 'B', 'C', 'D'};
+/// for (auto X : enumerate(Items)) {
+/// printf("Item %d - %c\n", X.index(), X.value());
+/// }
+///
+/// Output:
+/// Item 0 - A
+/// Item 1 - B
+/// Item 2 - C
+/// Item 3 - D
+///
+template <typename R> detail::enumerator<R> enumerate(R &&TheRange) {
+ return detail::enumerator<R>(std::forward<R>(TheRange));
+}
+
+namespace detail {
+
+template <typename F, typename Tuple, std::size_t... I>
+decltype(auto) apply_tuple_impl(F &&f, Tuple &&t, std::index_sequence<I...>) {
+ return std::forward<F>(f)(std::get<I>(std::forward<Tuple>(t))...);
+}
+
+} // end namespace detail
+
+/// Given an input tuple (a1, a2, ..., an), pass the arguments of the
+/// tuple variadically to f as if by calling f(a1, a2, ..., an) and
+/// return the result.
+template <typename F, typename Tuple>
+decltype(auto) apply_tuple(F &&f, Tuple &&t) {
+ using Indices = std::make_index_sequence<
+ std::tuple_size<typename std::decay<Tuple>::type>::value>;
+
+ return detail::apply_tuple_impl(std::forward<F>(f), std::forward<Tuple>(t),
+ Indices{});
+}
+
+namespace detail {
+
+template <typename Predicate, typename... Args>
+bool all_of_zip_predicate_first(Predicate &&P, Args &&...args) {
+ auto z = zip(args...);
+ auto it = z.begin();
+ auto end = z.end();
+ while (it != end) {
+ if (!apply_tuple([&](auto &&...args) { return P(args...); }, *it))
+ return false;
+ ++it;
+ }
+ return it.all_equals(end);
+}
+
+// Just an adaptor to switch the order of argument and have the predicate before
+// the zipped inputs.
+template <typename... ArgsThenPredicate, size_t... InputIndexes>
+bool all_of_zip_predicate_last(
+ std::tuple<ArgsThenPredicate...> argsThenPredicate,
+ std::index_sequence<InputIndexes...>) {
+ auto constexpr OutputIndex =
+ std::tuple_size<decltype(argsThenPredicate)>::value - 1;
+ return all_of_zip_predicate_first(std::get<OutputIndex>(argsThenPredicate),
+ std::get<InputIndexes>(argsThenPredicate)...);
+}
+
+} // end namespace detail
+
+/// Compare two zipped ranges using the provided predicate (as last argument).
+/// Return true if all elements satisfy the predicate and false otherwise.
+// Return false if the zipped iterator aren't all at end (size mismatch).
+template <typename... ArgsAndPredicate>
+bool all_of_zip(ArgsAndPredicate &&...argsAndPredicate) {
+ return detail::all_of_zip_predicate_last(
+ std::forward_as_tuple(argsAndPredicate...),
+ std::make_index_sequence<sizeof...(argsAndPredicate) - 1>{});
+}
+
+/// Return true if the sequence [Begin, End) has exactly N items. Runs in O(N)
+/// time. Not meant for use with random-access iterators.
+/// Can optionally take a predicate to filter lazily some items.
+template <typename IterTy,
+ typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
+bool hasNItems(
+ IterTy &&Begin, IterTy &&End, unsigned N,
+ Pred &&ShouldBeCounted =
+ [](const decltype(*std::declval<IterTy>()) &) { return true; },
+ std::enable_if_t<
+ !std::is_base_of<std::random_access_iterator_tag,
+ typename std::iterator_traits<std::remove_reference_t<
+ decltype(Begin)>>::iterator_category>::value,
+ void> * = nullptr) {
+ for (; N; ++Begin) {
+ if (Begin == End)
+ return false; // Too few.
+ N -= ShouldBeCounted(*Begin);
+ }
+ for (; Begin != End; ++Begin)
+ if (ShouldBeCounted(*Begin))
+ return false; // Too many.
+ return true;
+}
+
+/// Return true if the sequence [Begin, End) has N or more items. Runs in O(N)
+/// time. Not meant for use with random-access iterators.
+/// Can optionally take a predicate to lazily filter some items.
+template <typename IterTy,
+ typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
+bool hasNItemsOrMore(
+ IterTy &&Begin, IterTy &&End, unsigned N,
+ Pred &&ShouldBeCounted =
+ [](const decltype(*std::declval<IterTy>()) &) { return true; },
+ std::enable_if_t<
+ !std::is_base_of<std::random_access_iterator_tag,
+ typename std::iterator_traits<std::remove_reference_t<
+ decltype(Begin)>>::iterator_category>::value,
+ void> * = nullptr) {
+ for (; N; ++Begin) {
+ if (Begin == End)
+ return false; // Too few.
+ N -= ShouldBeCounted(*Begin);
+ }
+ return true;
+}
+
+/// Returns true if the sequence [Begin, End) has N or less items. Can
+/// optionally take a predicate to lazily filter some items.
+template <typename IterTy,
+ typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
+bool hasNItemsOrLess(
+ IterTy &&Begin, IterTy &&End, unsigned N,
+ Pred &&ShouldBeCounted = [](const decltype(*std::declval<IterTy>()) &) {
+ return true;
+ }) {
+ assert(N != std::numeric_limits<unsigned>::max());
+ return !hasNItemsOrMore(Begin, End, N + 1, ShouldBeCounted);
+}
+
+/// Returns true if the given container has exactly N items
+template <typename ContainerTy> bool hasNItems(ContainerTy &&C, unsigned N) {
+ return hasNItems(std::begin(C), std::end(C), N);
+}
+
+/// Returns true if the given container has N or more items
+template <typename ContainerTy>
+bool hasNItemsOrMore(ContainerTy &&C, unsigned N) {
+ return hasNItemsOrMore(std::begin(C), std::end(C), N);
+}
+
+/// Returns true if the given container has N or less items
+template <typename ContainerTy>
+bool hasNItemsOrLess(ContainerTy &&C, unsigned N) {
+ return hasNItemsOrLess(std::begin(C), std::end(C), N);
+}
+
+/// Returns a raw pointer that represents the same address as the argument.
+///
+/// This implementation can be removed once we move to C++20 where it's defined
+/// as std::to_address().
+///
+/// The std::pointer_traits<>::to_address(p) variations of these overloads has
+/// not been implemented.
+template <class Ptr> auto to_address(const Ptr &P) { return P.operator->(); }
+template <class T> constexpr T *to_address(T *P) { return P; }
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STLEXTRAS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/STLForwardCompat.h b/contrib/libs/llvm14/include/llvm/ADT/STLForwardCompat.h
new file mode 100644
index 0000000000..7bb9c7432a
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/STLForwardCompat.h
@@ -0,0 +1,94 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- STLForwardCompat.h - Library features from future STLs ------C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains library features backported from future STL versions.
+///
+/// These should be replaced with their STL counterparts as the C++ version LLVM
+/// is compiled with is updated.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STLFORWARDCOMPAT_H
+#define LLVM_ADT_STLFORWARDCOMPAT_H
+
+#include <type_traits>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// Features from C++17
+//===----------------------------------------------------------------------===//
+
+template <typename T>
+struct negation // NOLINT(readability-identifier-naming)
+ : std::integral_constant<bool, !bool(T::value)> {};
+
+template <typename...>
+struct conjunction // NOLINT(readability-identifier-naming)
+ : std::true_type {};
+template <typename B1> struct conjunction<B1> : B1 {};
+template <typename B1, typename... Bn>
+struct conjunction<B1, Bn...>
+ : std::conditional<bool(B1::value), conjunction<Bn...>, B1>::type {};
+
+template <typename...>
+struct disjunction // NOLINT(readability-identifier-naming)
+ : std::false_type {};
+template <typename B1> struct disjunction<B1> : B1 {};
+template <typename B1, typename... Bn>
+struct disjunction<B1, Bn...>
+ : std::conditional<bool(B1::value), B1, disjunction<Bn...>>::type {};
+
+struct in_place_t // NOLINT(readability-identifier-naming)
+{
+ explicit in_place_t() = default;
+};
+/// \warning This must not be odr-used, as it cannot be made \c inline in C++14.
+constexpr in_place_t in_place; // NOLINT(readability-identifier-naming)
+
+template <typename T>
+struct in_place_type_t // NOLINT(readability-identifier-naming)
+{
+ explicit in_place_type_t() = default;
+};
+
+template <std::size_t I>
+struct in_place_index_t // NOLINT(readability-identifier-naming)
+{
+ explicit in_place_index_t() = default;
+};
+
+//===----------------------------------------------------------------------===//
+// Features from C++20
+//===----------------------------------------------------------------------===//
+
+template <typename T>
+struct remove_cvref // NOLINT(readability-identifier-naming)
+{
+ using type = std::remove_cv_t<std::remove_reference_t<T>>;
+};
+
+template <typename T>
+using remove_cvref_t // NOLINT(readability-identifier-naming)
+ = typename llvm::remove_cvref<T>::type;
+
+} // namespace llvm
+
+#endif // LLVM_ADT_STLFORWARDCOMPAT_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/STLFunctionalExtras.h b/contrib/libs/llvm14/include/llvm/ADT/STLFunctionalExtras.h
new file mode 100644
index 0000000000..2ac0accaeb
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/STLFunctionalExtras.h
@@ -0,0 +1,87 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/STLFunctionalExtras.h - Extras for <functional> -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains some extension to <functional>.
+//
+// No library is required when using these functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STLFUNCTIONALEXTRAS_H
+#define LLVM_ADT_STLFUNCTIONALEXTRAS_H
+
+#include "llvm/ADT/STLForwardCompat.h"
+
+#include <type_traits>
+#include <utility>
+#include <cstdint>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// Extra additions to <functional>
+//===----------------------------------------------------------------------===//
+
+/// An efficient, type-erasing, non-owning reference to a callable. This is
+/// intended for use as the type of a function parameter that is not used
+/// after the function in question returns.
+///
+/// This class does not own the callable, so it is not in general safe to store
+/// a function_ref.
+template<typename Fn> class function_ref;
+
+template<typename Ret, typename ...Params>
+class function_ref<Ret(Params...)> {
+ Ret (*callback)(intptr_t callable, Params ...params) = nullptr;
+ intptr_t callable;
+
+ template<typename Callable>
+ static Ret callback_fn(intptr_t callable, Params ...params) {
+ return (*reinterpret_cast<Callable*>(callable))(
+ std::forward<Params>(params)...);
+ }
+
+public:
+ function_ref() = default;
+ function_ref(std::nullptr_t) {}
+
+ template <typename Callable>
+ function_ref(
+ Callable &&callable,
+ // This is not the copy-constructor.
+ std::enable_if_t<!std::is_same<remove_cvref_t<Callable>,
+ function_ref>::value> * = nullptr,
+ // Functor must be callable and return a suitable type.
+ std::enable_if_t<std::is_void<Ret>::value ||
+ std::is_convertible<decltype(std::declval<Callable>()(
+ std::declval<Params>()...)),
+ Ret>::value> * = nullptr)
+ : callback(callback_fn<typename std::remove_reference<Callable>::type>),
+ callable(reinterpret_cast<intptr_t>(&callable)) {}
+
+ Ret operator()(Params ...params) const {
+ return callback(callable, std::forward<Params>(params)...);
+ }
+
+ explicit operator bool() const { return callback; }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STLFUNCTIONALEXTRAS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/ScopeExit.h b/contrib/libs/llvm14/include/llvm/ADT/ScopeExit.h
new file mode 100644
index 0000000000..ed4e3e11f0
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/ScopeExit.h
@@ -0,0 +1,77 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/ScopeExit.h - Execute code at scope exit --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the make_scope_exit function, which executes user-defined
+/// cleanup logic at scope exit.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SCOPEEXIT_H
+#define LLVM_ADT_SCOPEEXIT_H
+
+#include "llvm/Support/Compiler.h"
+
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+namespace detail {
+
+template <typename Callable> class scope_exit {
+ Callable ExitFunction;
+ bool Engaged = true; // False once moved-from or release()d.
+
+public:
+ template <typename Fp>
+ explicit scope_exit(Fp &&F) : ExitFunction(std::forward<Fp>(F)) {}
+
+ scope_exit(scope_exit &&Rhs)
+ : ExitFunction(std::move(Rhs.ExitFunction)), Engaged(Rhs.Engaged) {
+ Rhs.release();
+ }
+ scope_exit(const scope_exit &) = delete;
+ scope_exit &operator=(scope_exit &&) = delete;
+ scope_exit &operator=(const scope_exit &) = delete;
+
+ void release() { Engaged = false; }
+
+ ~scope_exit() {
+ if (Engaged)
+ ExitFunction();
+ }
+};
+
+} // end namespace detail
+
+// Keeps the callable object that is passed in, and execute it at the
+// destruction of the returned object (usually at the scope exit where the
+// returned object is kept).
+//
+// Interface is specified by p0052r2.
+template <typename Callable>
+LLVM_NODISCARD detail::scope_exit<typename std::decay<Callable>::type>
+make_scope_exit(Callable &&F) {
+ return detail::scope_exit<typename std::decay<Callable>::type>(
+ std::forward<Callable>(F));
+}
+
+} // end namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/ScopedHashTable.h b/contrib/libs/llvm14/include/llvm/ADT/ScopedHashTable.h
new file mode 100644
index 0000000000..0336fd9a18
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/ScopedHashTable.h
@@ -0,0 +1,274 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- ScopedHashTable.h - A simple scoped hash table -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements an efficient scoped hash table, which is useful for
+// things like dominator-based optimizations. This allows clients to do things
+// like this:
+//
+// ScopedHashTable<int, int> HT;
+// {
+// ScopedHashTableScope<int, int> Scope1(HT);
+// HT.insert(0, 0);
+// HT.insert(1, 1);
+// {
+// ScopedHashTableScope<int, int> Scope2(HT);
+// HT.insert(0, 42);
+// }
+// }
+//
+// Looking up the value for "0" in the Scope2 block will return 42. Looking
+// up the value for 0 before 42 is inserted or after Scope2 is popped will
+// return 0.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SCOPEDHASHTABLE_H
+#define LLVM_ADT_SCOPEDHASHTABLE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/Support/AllocatorBase.h"
+#include <cassert>
+#include <new>
+
+namespace llvm {
+
+template <typename K, typename V, typename KInfo = DenseMapInfo<K>,
+ typename AllocatorTy = MallocAllocator>
+class ScopedHashTable;
+
+template <typename K, typename V>
+class ScopedHashTableVal {
+ ScopedHashTableVal *NextInScope;
+ ScopedHashTableVal *NextForKey;
+ K Key;
+ V Val;
+
+ ScopedHashTableVal(const K &key, const V &val) : Key(key), Val(val) {}
+
+public:
+ const K &getKey() const { return Key; }
+ const V &getValue() const { return Val; }
+ V &getValue() { return Val; }
+
+ ScopedHashTableVal *getNextForKey() { return NextForKey; }
+ const ScopedHashTableVal *getNextForKey() const { return NextForKey; }
+ ScopedHashTableVal *getNextInScope() { return NextInScope; }
+
+ template <typename AllocatorTy>
+ static ScopedHashTableVal *Create(ScopedHashTableVal *nextInScope,
+ ScopedHashTableVal *nextForKey,
+ const K &key, const V &val,
+ AllocatorTy &Allocator) {
+ ScopedHashTableVal *New = Allocator.template Allocate<ScopedHashTableVal>();
+ // Set up the value.
+ new (New) ScopedHashTableVal(key, val);
+ New->NextInScope = nextInScope;
+ New->NextForKey = nextForKey;
+ return New;
+ }
+
+ template <typename AllocatorTy> void Destroy(AllocatorTy &Allocator) {
+ // Free memory referenced by the item.
+ this->~ScopedHashTableVal();
+ Allocator.Deallocate(this);
+ }
+};
+
+template <typename K, typename V, typename KInfo = DenseMapInfo<K>,
+ typename AllocatorTy = MallocAllocator>
+class ScopedHashTableScope {
+ /// HT - The hashtable that we are active for.
+ ScopedHashTable<K, V, KInfo, AllocatorTy> &HT;
+
+ /// PrevScope - This is the scope that we are shadowing in HT.
+ ScopedHashTableScope *PrevScope;
+
+ /// LastValInScope - This is the last value that was inserted for this scope
+ /// or null if none have been inserted yet.
+ ScopedHashTableVal<K, V> *LastValInScope;
+
+public:
+ ScopedHashTableScope(ScopedHashTable<K, V, KInfo, AllocatorTy> &HT);
+ ScopedHashTableScope(ScopedHashTableScope &) = delete;
+ ScopedHashTableScope &operator=(ScopedHashTableScope &) = delete;
+ ~ScopedHashTableScope();
+
+ ScopedHashTableScope *getParentScope() { return PrevScope; }
+ const ScopedHashTableScope *getParentScope() const { return PrevScope; }
+
+private:
+ friend class ScopedHashTable<K, V, KInfo, AllocatorTy>;
+
+ ScopedHashTableVal<K, V> *getLastValInScope() {
+ return LastValInScope;
+ }
+
+ void setLastValInScope(ScopedHashTableVal<K, V> *Val) {
+ LastValInScope = Val;
+ }
+};
+
+template <typename K, typename V, typename KInfo = DenseMapInfo<K>>
+class ScopedHashTableIterator {
+ ScopedHashTableVal<K, V> *Node;
+
+public:
+ ScopedHashTableIterator(ScopedHashTableVal<K, V> *node) : Node(node) {}
+
+ V &operator*() const {
+ assert(Node && "Dereference end()");
+ return Node->getValue();
+ }
+ V *operator->() const {
+ return &Node->getValue();
+ }
+
+ bool operator==(const ScopedHashTableIterator &RHS) const {
+ return Node == RHS.Node;
+ }
+ bool operator!=(const ScopedHashTableIterator &RHS) const {
+ return Node != RHS.Node;
+ }
+
+ inline ScopedHashTableIterator& operator++() { // Preincrement
+ assert(Node && "incrementing past end()");
+ Node = Node->getNextForKey();
+ return *this;
+ }
+ ScopedHashTableIterator operator++(int) { // Postincrement
+ ScopedHashTableIterator tmp = *this; ++*this; return tmp;
+ }
+};
+
+template <typename K, typename V, typename KInfo, typename AllocatorTy>
+class ScopedHashTable {
+public:
+ /// ScopeTy - This is a helpful typedef that allows clients to get easy access
+ /// to the name of the scope for this hash table.
+ using ScopeTy = ScopedHashTableScope<K, V, KInfo, AllocatorTy>;
+ using size_type = unsigned;
+
+private:
+ friend class ScopedHashTableScope<K, V, KInfo, AllocatorTy>;
+
+ using ValTy = ScopedHashTableVal<K, V>;
+
+ DenseMap<K, ValTy*, KInfo> TopLevelMap;
+ ScopeTy *CurScope = nullptr;
+
+ AllocatorTy Allocator;
+
+public:
+ ScopedHashTable() = default;
+ ScopedHashTable(AllocatorTy A) : Allocator(A) {}
+ ScopedHashTable(const ScopedHashTable &) = delete;
+ ScopedHashTable &operator=(const ScopedHashTable &) = delete;
+
+ ~ScopedHashTable() {
+ assert(!CurScope && TopLevelMap.empty() && "Scope imbalance!");
+ }
+
+ /// Access to the allocator.
+ AllocatorTy &getAllocator() { return Allocator; }
+ const AllocatorTy &getAllocator() const { return Allocator; }
+
+ /// Return 1 if the specified key is in the table, 0 otherwise.
+ size_type count(const K &Key) const {
+ return TopLevelMap.count(Key);
+ }
+
+ V lookup(const K &Key) const {
+ auto I = TopLevelMap.find(Key);
+ if (I != TopLevelMap.end())
+ return I->second->getValue();
+
+ return V();
+ }
+
+ void insert(const K &Key, const V &Val) {
+ insertIntoScope(CurScope, Key, Val);
+ }
+
+ using iterator = ScopedHashTableIterator<K, V, KInfo>;
+
+ iterator end() { return iterator(nullptr); }
+
+ iterator begin(const K &Key) {
+ typename DenseMap<K, ValTy*, KInfo>::iterator I =
+ TopLevelMap.find(Key);
+ if (I == TopLevelMap.end()) return end();
+ return iterator(I->second);
+ }
+
+ ScopeTy *getCurScope() { return CurScope; }
+ const ScopeTy *getCurScope() const { return CurScope; }
+
+ /// insertIntoScope - This inserts the specified key/value at the specified
+ /// (possibly not the current) scope. While it is ok to insert into a scope
+ /// that isn't the current one, it isn't ok to insert *underneath* an existing
+ /// value of the specified key.
+ void insertIntoScope(ScopeTy *S, const K &Key, const V &Val) {
+ assert(S && "No scope active!");
+ ScopedHashTableVal<K, V> *&KeyEntry = TopLevelMap[Key];
+ KeyEntry = ValTy::Create(S->getLastValInScope(), KeyEntry, Key, Val,
+ Allocator);
+ S->setLastValInScope(KeyEntry);
+ }
+};
+
+/// ScopedHashTableScope ctor - Install this as the current scope for the hash
+/// table.
+template <typename K, typename V, typename KInfo, typename Allocator>
+ScopedHashTableScope<K, V, KInfo, Allocator>::
+ ScopedHashTableScope(ScopedHashTable<K, V, KInfo, Allocator> &ht) : HT(ht) {
+ PrevScope = HT.CurScope;
+ HT.CurScope = this;
+ LastValInScope = nullptr;
+}
+
+template <typename K, typename V, typename KInfo, typename Allocator>
+ScopedHashTableScope<K, V, KInfo, Allocator>::~ScopedHashTableScope() {
+ assert(HT.CurScope == this && "Scope imbalance!");
+ HT.CurScope = PrevScope;
+
+ // Pop and delete all values corresponding to this scope.
+ while (ScopedHashTableVal<K, V> *ThisEntry = LastValInScope) {
+ // Pop this value out of the TopLevelMap.
+ if (!ThisEntry->getNextForKey()) {
+ assert(HT.TopLevelMap[ThisEntry->getKey()] == ThisEntry &&
+ "Scope imbalance!");
+ HT.TopLevelMap.erase(ThisEntry->getKey());
+ } else {
+ ScopedHashTableVal<K, V> *&KeyEntry = HT.TopLevelMap[ThisEntry->getKey()];
+ assert(KeyEntry == ThisEntry && "Scope imbalance!");
+ KeyEntry = ThisEntry->getNextForKey();
+ }
+
+ // Pop this value out of the scope.
+ LastValInScope = ThisEntry->getNextInScope();
+
+ // Delete this entry.
+ ThisEntry->Destroy(HT.getAllocator());
+ }
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SCOPEDHASHTABLE_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/Sequence.h b/contrib/libs/llvm14/include/llvm/ADT/Sequence.h
new file mode 100644
index 0000000000..2cd1085d1b
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/Sequence.h
@@ -0,0 +1,388 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- Sequence.h - Utility for producing sequences of values ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// Provides some synthesis utilities to produce sequences of values. The names
+/// are intentionally kept very short as they tend to occur in common and
+/// widely used contexts.
+///
+/// The `seq(A, B)` function produces a sequence of values from `A` to up to
+/// (but not including) `B`, i.e., [`A`, `B`), that can be safely iterated over.
+/// `seq` supports both integral (e.g., `int`, `char`, `uint32_t`) and enum
+/// types. `seq_inclusive(A, B)` produces a sequence of values from `A` to `B`,
+/// including `B`.
+///
+/// Examples with integral types:
+/// ```
+/// for (int x : seq(0, 3))
+/// outs() << x << " ";
+/// ```
+///
+/// Prints: `0 1 2 `.
+///
+/// ```
+/// for (int x : seq_inclusive(0, 3))
+/// outs() << x << " ";
+/// ```
+///
+/// Prints: `0 1 2 3 `.
+///
+/// Similar to `seq` and `seq_inclusive`, the `enum_seq` and
+/// `enum_seq_inclusive` functions produce sequences of enum values that can be
+/// iterated over.
+/// To enable iteration with enum types, you need to either mark enums as safe
+/// to iterate on by specializing `enum_iteration_traits`, or opt into
+/// potentially unsafe iteration at every callsite by passing
+/// `force_iteration_on_noniterable_enum`.
+///
+/// Examples with enum types:
+/// ```
+/// namespace X {
+/// enum class MyEnum : unsigned {A = 0, B, C};
+/// } // namespace X
+///
+/// template <> struct enum_iteration_traits<X::MyEnum> {
+/// static contexpr bool is_iterable = true;
+/// };
+///
+/// class MyClass {
+/// public:
+/// enum Safe { D = 3, E, F };
+/// enum MaybeUnsafe { G = 1, H = 2, I = 4 };
+/// };
+///
+/// template <> struct enum_iteration_traits<MyClass::Safe> {
+/// static contexpr bool is_iterable = true;
+/// };
+/// ```
+///
+/// ```
+/// for (auto v : enum_seq(MyClass::Safe::D, MyClass::Safe::F))
+/// outs() << int(v) << " ";
+/// ```
+///
+/// Prints: `3 4 `.
+///
+/// ```
+/// for (auto v : enum_seq(MyClass::MaybeUnsafe::H, MyClass::MaybeUnsafe::I,
+/// force_iteration_on_noniterable_enum))
+/// outs() << int(v) << " ";
+/// ```
+///
+/// Prints: `2 3 `.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SEQUENCE_H
+#define LLVM_ADT_SEQUENCE_H
+
+#include <cassert> // assert
+#include <iterator> // std::random_access_iterator_tag
+#include <limits> // std::numeric_limits
+#include <type_traits> // std::is_integral, std::is_enum, std::underlying_type,
+ // std::enable_if
+
+#include "llvm/Support/MathExtras.h" // AddOverflow / SubOverflow
+
+namespace llvm {
+
+// Enum traits that marks enums as safe or unsafe to iterate over.
+// By default, enum types are *not* considered safe for iteration.
+// To allow iteration for your enum type, provide a specialization with
+// `is_iterable` set to `true` in the `llvm` namespace.
+// Alternatively, you can pass the `force_iteration_on_noniterable_enum` tag
+// to `enum_seq` or `enum_seq_inclusive`.
+template <typename EnumT> struct enum_iteration_traits {
+ static constexpr bool is_iterable = false;
+};
+
+struct force_iteration_on_noniterable_enum_t {
+ explicit force_iteration_on_noniterable_enum_t() = default;
+};
+
+// TODO: Make this `inline` once we update to C++17 to avoid ORD violations.
+constexpr force_iteration_on_noniterable_enum_t
+ force_iteration_on_noniterable_enum;
+
+namespace detail {
+
+// Returns whether a value of type U can be represented with type T.
+template <typename T, typename U> bool canTypeFitValue(const U Value) {
+ const intmax_t BotT = intmax_t(std::numeric_limits<T>::min());
+ const intmax_t BotU = intmax_t(std::numeric_limits<U>::min());
+ const uintmax_t TopT = uintmax_t(std::numeric_limits<T>::max());
+ const uintmax_t TopU = uintmax_t(std::numeric_limits<U>::max());
+ return !((BotT > BotU && Value < static_cast<U>(BotT)) ||
+ (TopT < TopU && Value > static_cast<U>(TopT)));
+}
+
+// An integer type that asserts when:
+// - constructed from a value that doesn't fit into intmax_t,
+// - casted to a type that cannot hold the current value,
+// - its internal representation overflows.
+struct CheckedInt {
+ // Integral constructor, asserts if Value cannot be represented as intmax_t.
+ template <typename Integral, typename std::enable_if_t<
+ std::is_integral<Integral>::value, bool> = 0>
+ static CheckedInt from(Integral FromValue) {
+ if (!canTypeFitValue<intmax_t>(FromValue))
+ assertOutOfBounds();
+ CheckedInt Result;
+ Result.Value = static_cast<intmax_t>(FromValue);
+ return Result;
+ }
+
+ // Enum constructor, asserts if Value cannot be represented as intmax_t.
+ template <typename Enum,
+ typename std::enable_if_t<std::is_enum<Enum>::value, bool> = 0>
+ static CheckedInt from(Enum FromValue) {
+ using type = typename std::underlying_type<Enum>::type;
+ return from<type>(static_cast<type>(FromValue));
+ }
+
+ // Equality
+ bool operator==(const CheckedInt &O) const { return Value == O.Value; }
+ bool operator!=(const CheckedInt &O) const { return Value != O.Value; }
+
+ CheckedInt operator+(intmax_t Offset) const {
+ CheckedInt Result;
+ if (AddOverflow(Value, Offset, Result.Value))
+ assertOutOfBounds();
+ return Result;
+ }
+
+ intmax_t operator-(CheckedInt Other) const {
+ intmax_t Result;
+ if (SubOverflow(Value, Other.Value, Result))
+ assertOutOfBounds();
+ return Result;
+ }
+
+ // Convert to integral, asserts if Value cannot be represented as Integral.
+ template <typename Integral, typename std::enable_if_t<
+ std::is_integral<Integral>::value, bool> = 0>
+ Integral to() const {
+ if (!canTypeFitValue<Integral>(Value))
+ assertOutOfBounds();
+ return static_cast<Integral>(Value);
+ }
+
+ // Convert to enum, asserts if Value cannot be represented as Enum's
+ // underlying type.
+ template <typename Enum,
+ typename std::enable_if_t<std::is_enum<Enum>::value, bool> = 0>
+ Enum to() const {
+ using type = typename std::underlying_type<Enum>::type;
+ return Enum(to<type>());
+ }
+
+private:
+ static void assertOutOfBounds() { assert(false && "Out of bounds"); }
+
+ intmax_t Value;
+};
+
+template <typename T, bool IsReverse> struct SafeIntIterator {
+ using iterator_category = std::random_access_iterator_tag;
+ using value_type = T;
+ using difference_type = intmax_t;
+ using pointer = T *;
+ using reference = T &;
+
+ // Construct from T.
+ explicit SafeIntIterator(T Value) : SI(CheckedInt::from<T>(Value)) {}
+ // Construct from other direction.
+ SafeIntIterator(const SafeIntIterator<T, !IsReverse> &O) : SI(O.SI) {}
+
+ // Dereference
+ value_type operator*() const { return SI.to<T>(); }
+ // Indexing
+ value_type operator[](intmax_t Offset) const { return *(*this + Offset); }
+
+ // Can be compared for equivalence using the equality/inequality operators.
+ bool operator==(const SafeIntIterator &O) const { return SI == O.SI; }
+ bool operator!=(const SafeIntIterator &O) const { return SI != O.SI; }
+ // Comparison
+ bool operator<(const SafeIntIterator &O) const { return (*this - O) < 0; }
+ bool operator>(const SafeIntIterator &O) const { return (*this - O) > 0; }
+ bool operator<=(const SafeIntIterator &O) const { return (*this - O) <= 0; }
+ bool operator>=(const SafeIntIterator &O) const { return (*this - O) >= 0; }
+
+ // Pre Increment/Decrement
+ void operator++() { offset(1); }
+ void operator--() { offset(-1); }
+
+ // Post Increment/Decrement
+ SafeIntIterator operator++(int) {
+ const auto Copy = *this;
+ ++*this;
+ return Copy;
+ }
+ SafeIntIterator operator--(int) {
+ const auto Copy = *this;
+ --*this;
+ return Copy;
+ }
+
+ // Compound assignment operators
+ void operator+=(intmax_t Offset) { offset(Offset); }
+ void operator-=(intmax_t Offset) { offset(-Offset); }
+
+ // Arithmetic
+ SafeIntIterator operator+(intmax_t Offset) const { return add(Offset); }
+ SafeIntIterator operator-(intmax_t Offset) const { return add(-Offset); }
+
+ // Difference
+ intmax_t operator-(const SafeIntIterator &O) const {
+ return IsReverse ? O.SI - SI : SI - O.SI;
+ }
+
+private:
+ SafeIntIterator(const CheckedInt &SI) : SI(SI) {}
+
+ static intmax_t getOffset(intmax_t Offset) {
+ return IsReverse ? -Offset : Offset;
+ }
+
+ CheckedInt add(intmax_t Offset) const { return SI + getOffset(Offset); }
+
+ void offset(intmax_t Offset) { SI = SI + getOffset(Offset); }
+
+ CheckedInt SI;
+
+ // To allow construction from the other direction.
+ template <typename, bool> friend struct SafeIntIterator;
+};
+
+} // namespace detail
+
+template <typename T> struct iota_range {
+ using value_type = T;
+ using reference = T &;
+ using const_reference = const T &;
+ using iterator = detail::SafeIntIterator<value_type, false>;
+ using const_iterator = iterator;
+ using reverse_iterator = detail::SafeIntIterator<value_type, true>;
+ using const_reverse_iterator = reverse_iterator;
+ using difference_type = intmax_t;
+ using size_type = std::size_t;
+
+ explicit iota_range(T Begin, T End, bool Inclusive)
+ : BeginValue(Begin), PastEndValue(End) {
+ assert(Begin <= End && "Begin must be less or equal to End.");
+ if (Inclusive)
+ ++PastEndValue;
+ }
+
+ size_t size() const { return PastEndValue - BeginValue; }
+ bool empty() const { return BeginValue == PastEndValue; }
+
+ auto begin() const { return const_iterator(BeginValue); }
+ auto end() const { return const_iterator(PastEndValue); }
+
+ auto rbegin() const { return const_reverse_iterator(PastEndValue - 1); }
+ auto rend() const { return const_reverse_iterator(BeginValue - 1); }
+
+private:
+ static_assert(std::is_integral<T>::value || std::is_enum<T>::value,
+ "T must be an integral or enum type");
+ static_assert(std::is_same<T, std::remove_cv_t<T>>::value,
+ "T must not be const nor volatile");
+
+ iterator BeginValue;
+ iterator PastEndValue;
+};
+
+/// Iterate over an integral type from Begin up to - but not including - End.
+/// Note: Begin and End values have to be within [INTMAX_MIN, INTMAX_MAX] for
+/// forward iteration (resp. [INTMAX_MIN + 1, INTMAX_MAX] for reverse
+/// iteration).
+template <typename T, typename = std::enable_if_t<std::is_integral<T>::value &&
+ !std::is_enum<T>::value>>
+auto seq(T Begin, T End) {
+ return iota_range<T>(Begin, End, false);
+}
+
+/// Iterate over an integral type from Begin to End inclusive.
+/// Note: Begin and End values have to be within [INTMAX_MIN, INTMAX_MAX - 1]
+/// for forward iteration (resp. [INTMAX_MIN + 1, INTMAX_MAX - 1] for reverse
+/// iteration).
+template <typename T, typename = std::enable_if_t<std::is_integral<T>::value &&
+ !std::is_enum<T>::value>>
+auto seq_inclusive(T Begin, T End) {
+ return iota_range<T>(Begin, End, true);
+}
+
+/// Iterate over an enum type from Begin up to - but not including - End.
+/// Note: `enum_seq` will generate each consecutive value, even if no
+/// enumerator with that value exists.
+/// Note: Begin and End values have to be within [INTMAX_MIN, INTMAX_MAX] for
+/// forward iteration (resp. [INTMAX_MIN + 1, INTMAX_MAX] for reverse
+/// iteration).
+template <typename EnumT,
+ typename = std::enable_if_t<std::is_enum<EnumT>::value>>
+auto enum_seq(EnumT Begin, EnumT End) {
+ static_assert(enum_iteration_traits<EnumT>::is_iterable,
+ "Enum type is not marked as iterable.");
+ return iota_range<EnumT>(Begin, End, false);
+}
+
+/// Iterate over an enum type from Begin up to - but not including - End, even
+/// when `EnumT` is not marked as safely iterable by `enum_iteration_traits`.
+/// Note: `enum_seq` will generate each consecutive value, even if no
+/// enumerator with that value exists.
+/// Note: Begin and End values have to be within [INTMAX_MIN, INTMAX_MAX] for
+/// forward iteration (resp. [INTMAX_MIN + 1, INTMAX_MAX] for reverse
+/// iteration).
+template <typename EnumT,
+ typename = std::enable_if_t<std::is_enum<EnumT>::value>>
+auto enum_seq(EnumT Begin, EnumT End, force_iteration_on_noniterable_enum_t) {
+ return iota_range<EnumT>(Begin, End, false);
+}
+
+/// Iterate over an enum type from Begin to End inclusive.
+/// Note: `enum_seq_inclusive` will generate each consecutive value, even if no
+/// enumerator with that value exists.
+/// Note: Begin and End values have to be within [INTMAX_MIN, INTMAX_MAX - 1]
+/// for forward iteration (resp. [INTMAX_MIN + 1, INTMAX_MAX - 1] for reverse
+/// iteration).
+template <typename EnumT,
+ typename = std::enable_if_t<std::is_enum<EnumT>::value>>
+auto enum_seq_inclusive(EnumT Begin, EnumT End) {
+ static_assert(enum_iteration_traits<EnumT>::is_iterable,
+ "Enum type is not marked as iterable.");
+ return iota_range<EnumT>(Begin, End, true);
+}
+
+/// Iterate over an enum type from Begin to End inclusive, even when `EnumT`
+/// is not marked as safely iterable by `enum_iteration_traits`.
+/// Note: `enum_seq_inclusive` will generate each consecutive value, even if no
+/// enumerator with that value exists.
+/// Note: Begin and End values have to be within [INTMAX_MIN, INTMAX_MAX - 1]
+/// for forward iteration (resp. [INTMAX_MIN + 1, INTMAX_MAX - 1] for reverse
+/// iteration).
+template <typename EnumT,
+ typename = std::enable_if_t<std::is_enum<EnumT>::value>>
+auto enum_seq_inclusive(EnumT Begin, EnumT End,
+ force_iteration_on_noniterable_enum_t) {
+ return iota_range<EnumT>(Begin, End, true);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SEQUENCE_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/SetOperations.h b/contrib/libs/llvm14/include/llvm/ADT/SetOperations.h
new file mode 100644
index 0000000000..ffddec525e
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/SetOperations.h
@@ -0,0 +1,94 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- llvm/ADT/SetOperations.h - Generic Set Operations -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines generic set operations that may be used on set's of
+/// different types, and different element types.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SETOPERATIONS_H
+#define LLVM_ADT_SETOPERATIONS_H
+
+namespace llvm {
+
+/// set_union(A, B) - Compute A := A u B, return whether A changed.
+///
+template <class S1Ty, class S2Ty>
+bool set_union(S1Ty &S1, const S2Ty &S2) {
+ bool Changed = false;
+
+ for (typename S2Ty::const_iterator SI = S2.begin(), SE = S2.end();
+ SI != SE; ++SI)
+ if (S1.insert(*SI).second)
+ Changed = true;
+
+ return Changed;
+}
+
+/// set_intersect(A, B) - Compute A := A ^ B
+/// Identical to set_intersection, except that it works on set<>'s and
+/// is nicer to use. Functionally, this iterates through S1, removing
+/// elements that are not contained in S2.
+///
+template <class S1Ty, class S2Ty>
+void set_intersect(S1Ty &S1, const S2Ty &S2) {
+ for (typename S1Ty::iterator I = S1.begin(); I != S1.end();) {
+ const auto &E = *I;
+ ++I;
+ if (!S2.count(E)) S1.erase(E); // Erase element if not in S2
+ }
+}
+
+/// set_difference(A, B) - Return A - B
+///
+template <class S1Ty, class S2Ty>
+S1Ty set_difference(const S1Ty &S1, const S2Ty &S2) {
+ S1Ty Result;
+ for (typename S1Ty::const_iterator SI = S1.begin(), SE = S1.end();
+ SI != SE; ++SI)
+ if (!S2.count(*SI)) // if the element is not in set2
+ Result.insert(*SI);
+ return Result;
+}
+
+/// set_subtract(A, B) - Compute A := A - B
+///
+template <class S1Ty, class S2Ty>
+void set_subtract(S1Ty &S1, const S2Ty &S2) {
+ for (typename S2Ty::const_iterator SI = S2.begin(), SE = S2.end();
+ SI != SE; ++SI)
+ S1.erase(*SI);
+}
+
+/// set_is_subset(A, B) - Return true iff A in B
+///
+template <class S1Ty, class S2Ty>
+bool set_is_subset(const S1Ty &S1, const S2Ty &S2) {
+ if (S1.size() > S2.size())
+ return false;
+ for (const auto It : S1)
+ if (!S2.count(It))
+ return false;
+ return true;
+}
+
+} // End llvm namespace
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/SetVector.h b/contrib/libs/llvm14/include/llvm/ADT/SetVector.h
new file mode 100644
index 0000000000..006a2e85c3
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/SetVector.h
@@ -0,0 +1,350 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/SetVector.h - Set with insert order iteration ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements a set that has insertion order iteration
+/// characteristics. This is useful for keeping a set of things that need to be
+/// visited later but in a deterministic order (insertion order). The interface
+/// is purposefully minimal.
+///
+/// This file defines SetVector and SmallSetVector, which performs no
+/// allocations if the SetVector has less than a certain number of elements.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SETVECTOR_H
+#define LLVM_ADT_SETVECTOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Compiler.h"
+#include <cassert>
+#include <iterator>
+#include <vector>
+
+namespace llvm {
+
+/// A vector that has set insertion semantics.
+///
+/// This adapter class provides a way to keep a set of things that also has the
+/// property of a deterministic iteration order. The order of iteration is the
+/// order of insertion.
+template <typename T, typename Vector = std::vector<T>,
+ typename Set = DenseSet<T>>
+class SetVector {
+public:
+ using value_type = T;
+ using key_type = T;
+ using reference = T&;
+ using const_reference = const T&;
+ using set_type = Set;
+ using vector_type = Vector;
+ using iterator = typename vector_type::const_iterator;
+ using const_iterator = typename vector_type::const_iterator;
+ using reverse_iterator = typename vector_type::const_reverse_iterator;
+ using const_reverse_iterator = typename vector_type::const_reverse_iterator;
+ using size_type = typename vector_type::size_type;
+
+ /// Construct an empty SetVector
+ SetVector() = default;
+
+ /// Initialize a SetVector with a range of elements
+ template<typename It>
+ SetVector(It Start, It End) {
+ insert(Start, End);
+ }
+
+ ArrayRef<T> getArrayRef() const { return vector_; }
+
+ /// Clear the SetVector and return the underlying vector.
+ Vector takeVector() {
+ set_.clear();
+ return std::move(vector_);
+ }
+
+ /// Determine if the SetVector is empty or not.
+ bool empty() const {
+ return vector_.empty();
+ }
+
+ /// Determine the number of elements in the SetVector.
+ size_type size() const {
+ return vector_.size();
+ }
+
+ /// Get an iterator to the beginning of the SetVector.
+ iterator begin() {
+ return vector_.begin();
+ }
+
+ /// Get a const_iterator to the beginning of the SetVector.
+ const_iterator begin() const {
+ return vector_.begin();
+ }
+
+ /// Get an iterator to the end of the SetVector.
+ iterator end() {
+ return vector_.end();
+ }
+
+ /// Get a const_iterator to the end of the SetVector.
+ const_iterator end() const {
+ return vector_.end();
+ }
+
+ /// Get an reverse_iterator to the end of the SetVector.
+ reverse_iterator rbegin() {
+ return vector_.rbegin();
+ }
+
+ /// Get a const_reverse_iterator to the end of the SetVector.
+ const_reverse_iterator rbegin() const {
+ return vector_.rbegin();
+ }
+
+ /// Get a reverse_iterator to the beginning of the SetVector.
+ reverse_iterator rend() {
+ return vector_.rend();
+ }
+
+ /// Get a const_reverse_iterator to the beginning of the SetVector.
+ const_reverse_iterator rend() const {
+ return vector_.rend();
+ }
+
+ /// Return the first element of the SetVector.
+ const T &front() const {
+ assert(!empty() && "Cannot call front() on empty SetVector!");
+ return vector_.front();
+ }
+
+ /// Return the last element of the SetVector.
+ const T &back() const {
+ assert(!empty() && "Cannot call back() on empty SetVector!");
+ return vector_.back();
+ }
+
+ /// Index into the SetVector.
+ const_reference operator[](size_type n) const {
+ assert(n < vector_.size() && "SetVector access out of range!");
+ return vector_[n];
+ }
+
+ /// Insert a new element into the SetVector.
+ /// \returns true if the element was inserted into the SetVector.
+ bool insert(const value_type &X) {
+ bool result = set_.insert(X).second;
+ if (result)
+ vector_.push_back(X);
+ return result;
+ }
+
+ /// Insert a range of elements into the SetVector.
+ template<typename It>
+ void insert(It Start, It End) {
+ for (; Start != End; ++Start)
+ if (set_.insert(*Start).second)
+ vector_.push_back(*Start);
+ }
+
+ /// Remove an item from the set vector.
+ bool remove(const value_type& X) {
+ if (set_.erase(X)) {
+ typename vector_type::iterator I = find(vector_, X);
+ assert(I != vector_.end() && "Corrupted SetVector instances!");
+ vector_.erase(I);
+ return true;
+ }
+ return false;
+ }
+
+ /// Erase a single element from the set vector.
+ /// \returns an iterator pointing to the next element that followed the
+ /// element erased. This is the end of the SetVector if the last element is
+ /// erased.
+ iterator erase(iterator I) {
+ const key_type &V = *I;
+ assert(set_.count(V) && "Corrupted SetVector instances!");
+ set_.erase(V);
+
+ // FIXME: No need to use the non-const iterator when built with
+ // std::vector.erase(const_iterator) as defined in C++11. This is for
+ // compatibility with non-standard libstdc++ up to 4.8 (fixed in 4.9).
+ auto NI = vector_.begin();
+ std::advance(NI, std::distance<iterator>(NI, I));
+
+ return vector_.erase(NI);
+ }
+
+ /// Remove items from the set vector based on a predicate function.
+ ///
+ /// This is intended to be equivalent to the following code, if we could
+ /// write it:
+ ///
+ /// \code
+ /// V.erase(remove_if(V, P), V.end());
+ /// \endcode
+ ///
+ /// However, SetVector doesn't expose non-const iterators, making any
+ /// algorithm like remove_if impossible to use.
+ ///
+ /// \returns true if any element is removed.
+ template <typename UnaryPredicate>
+ bool remove_if(UnaryPredicate P) {
+ typename vector_type::iterator I =
+ llvm::remove_if(vector_, TestAndEraseFromSet<UnaryPredicate>(P, set_));
+ if (I == vector_.end())
+ return false;
+ vector_.erase(I, vector_.end());
+ return true;
+ }
+
+ /// Check if the SetVector contains the given key.
+ bool contains(const key_type &key) const {
+ return set_.find(key) != set_.end();
+ }
+
+ /// Count the number of elements of a given key in the SetVector.
+ /// \returns 0 if the element is not in the SetVector, 1 if it is.
+ size_type count(const key_type &key) const {
+ return set_.count(key);
+ }
+
+ /// Completely clear the SetVector
+ void clear() {
+ set_.clear();
+ vector_.clear();
+ }
+
+ /// Remove the last element of the SetVector.
+ void pop_back() {
+ assert(!empty() && "Cannot remove an element from an empty SetVector!");
+ set_.erase(back());
+ vector_.pop_back();
+ }
+
+ LLVM_NODISCARD T pop_back_val() {
+ T Ret = back();
+ pop_back();
+ return Ret;
+ }
+
+ bool operator==(const SetVector &that) const {
+ return vector_ == that.vector_;
+ }
+
+ bool operator!=(const SetVector &that) const {
+ return vector_ != that.vector_;
+ }
+
+ /// Compute This := This u S, return whether 'This' changed.
+ /// TODO: We should be able to use set_union from SetOperations.h, but
+ /// SetVector interface is inconsistent with DenseSet.
+ template <class STy>
+ bool set_union(const STy &S) {
+ bool Changed = false;
+
+ for (typename STy::const_iterator SI = S.begin(), SE = S.end(); SI != SE;
+ ++SI)
+ if (insert(*SI))
+ Changed = true;
+
+ return Changed;
+ }
+
+ /// Compute This := This - B
+ /// TODO: We should be able to use set_subtract from SetOperations.h, but
+ /// SetVector interface is inconsistent with DenseSet.
+ template <class STy>
+ void set_subtract(const STy &S) {
+ for (typename STy::const_iterator SI = S.begin(), SE = S.end(); SI != SE;
+ ++SI)
+ remove(*SI);
+ }
+
+ void swap(SetVector<T, Vector, Set> &RHS) {
+ set_.swap(RHS.set_);
+ vector_.swap(RHS.vector_);
+ }
+
+private:
+ /// A wrapper predicate designed for use with std::remove_if.
+ ///
+ /// This predicate wraps a predicate suitable for use with std::remove_if to
+ /// call set_.erase(x) on each element which is slated for removal.
+ template <typename UnaryPredicate>
+ class TestAndEraseFromSet {
+ UnaryPredicate P;
+ set_type &set_;
+
+ public:
+ TestAndEraseFromSet(UnaryPredicate P, set_type &set_)
+ : P(std::move(P)), set_(set_) {}
+
+ template <typename ArgumentT>
+ bool operator()(const ArgumentT &Arg) {
+ if (P(Arg)) {
+ set_.erase(Arg);
+ return true;
+ }
+ return false;
+ }
+ };
+
+ set_type set_; ///< The set.
+ vector_type vector_; ///< The vector.
+};
+
+/// A SetVector that performs no allocations if smaller than
+/// a certain size.
+template <typename T, unsigned N>
+class SmallSetVector
+ : public SetVector<T, SmallVector<T, N>, SmallDenseSet<T, N>> {
+public:
+ SmallSetVector() = default;
+
+ /// Initialize a SmallSetVector with a range of elements
+ template<typename It>
+ SmallSetVector(It Start, It End) {
+ this->insert(Start, End);
+ }
+};
+
+} // end namespace llvm
+
+namespace std {
+
+/// Implement std::swap in terms of SetVector swap.
+template<typename T, typename V, typename S>
+inline void
+swap(llvm::SetVector<T, V, S> &LHS, llvm::SetVector<T, V, S> &RHS) {
+ LHS.swap(RHS);
+}
+
+/// Implement std::swap in terms of SmallSetVector swap.
+template<typename T, unsigned N>
+inline void
+swap(llvm::SmallSetVector<T, N> &LHS, llvm::SmallSetVector<T, N> &RHS) {
+ LHS.swap(RHS);
+}
+
+} // end namespace std
+
+#endif // LLVM_ADT_SETVECTOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/SmallBitVector.h b/contrib/libs/llvm14/include/llvm/ADT/SmallBitVector.h
new file mode 100644
index 0000000000..aae99f6b7b
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/SmallBitVector.h
@@ -0,0 +1,772 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/SmallBitVector.h - 'Normally small' bit vectors -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the SmallBitVector class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SMALLBITVECTOR_H
+#define LLVM_ADT_SMALLBITVECTOR_H
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/MathExtras.h"
+#include <algorithm>
+#include <cassert>
+#include <climits>
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+#include <utility>
+
+namespace llvm {
+
+/// This is a 'bitvector' (really, a variable-sized bit array), optimized for
+/// the case when the array is small. It contains one pointer-sized field, which
+/// is directly used as a plain collection of bits when possible, or as a
+/// pointer to a larger heap-allocated array when necessary. This allows normal
+/// "small" cases to be fast without losing generality for large inputs.
+class SmallBitVector {
+ // TODO: In "large" mode, a pointer to a BitVector is used, leading to an
+ // unnecessary level of indirection. It would be more efficient to use a
+ // pointer to memory containing size, allocation size, and the array of bits.
+ uintptr_t X = 1;
+
+ enum {
+ // The number of bits in this class.
+ NumBaseBits = sizeof(uintptr_t) * CHAR_BIT,
+
+ // One bit is used to discriminate between small and large mode. The
+ // remaining bits are used for the small-mode representation.
+ SmallNumRawBits = NumBaseBits - 1,
+
+ // A few more bits are used to store the size of the bit set in small mode.
+ // Theoretically this is a ceil-log2. These bits are encoded in the most
+ // significant bits of the raw bits.
+ SmallNumSizeBits = (NumBaseBits == 32 ? 5 :
+ NumBaseBits == 64 ? 6 :
+ SmallNumRawBits),
+
+ // The remaining bits are used to store the actual set in small mode.
+ SmallNumDataBits = SmallNumRawBits - SmallNumSizeBits
+ };
+
+ static_assert(NumBaseBits == 64 || NumBaseBits == 32,
+ "Unsupported word size");
+
+public:
+ using size_type = uintptr_t;
+
+ // Encapsulation of a single bit.
+ class reference {
+ SmallBitVector &TheVector;
+ unsigned BitPos;
+
+ public:
+ reference(SmallBitVector &b, unsigned Idx) : TheVector(b), BitPos(Idx) {}
+
+ reference(const reference&) = default;
+
+ reference& operator=(reference t) {
+ *this = bool(t);
+ return *this;
+ }
+
+ reference& operator=(bool t) {
+ if (t)
+ TheVector.set(BitPos);
+ else
+ TheVector.reset(BitPos);
+ return *this;
+ }
+
+ operator bool() const {
+ return const_cast<const SmallBitVector &>(TheVector).operator[](BitPos);
+ }
+ };
+
+private:
+ BitVector *getPointer() const {
+ assert(!isSmall());
+ return reinterpret_cast<BitVector *>(X);
+ }
+
+ void switchToSmall(uintptr_t NewSmallBits, size_type NewSize) {
+ X = 1;
+ setSmallSize(NewSize);
+ setSmallBits(NewSmallBits);
+ }
+
+ void switchToLarge(BitVector *BV) {
+ X = reinterpret_cast<uintptr_t>(BV);
+ assert(!isSmall() && "Tried to use an unaligned pointer");
+ }
+
+ // Return all the bits used for the "small" representation; this includes
+ // bits for the size as well as the element bits.
+ uintptr_t getSmallRawBits() const {
+ assert(isSmall());
+ return X >> 1;
+ }
+
+ void setSmallRawBits(uintptr_t NewRawBits) {
+ assert(isSmall());
+ X = (NewRawBits << 1) | uintptr_t(1);
+ }
+
+ // Return the size.
+ size_type getSmallSize() const {
+ return getSmallRawBits() >> SmallNumDataBits;
+ }
+
+ void setSmallSize(size_type Size) {
+ setSmallRawBits(getSmallBits() | (Size << SmallNumDataBits));
+ }
+
+ // Return the element bits.
+ uintptr_t getSmallBits() const {
+ return getSmallRawBits() & ~(~uintptr_t(0) << getSmallSize());
+ }
+
+ void setSmallBits(uintptr_t NewBits) {
+ setSmallRawBits((NewBits & ~(~uintptr_t(0) << getSmallSize())) |
+ (getSmallSize() << SmallNumDataBits));
+ }
+
+public:
+ /// Creates an empty bitvector.
+ SmallBitVector() = default;
+
+ /// Creates a bitvector of specified number of bits. All bits are initialized
+ /// to the specified value.
+ explicit SmallBitVector(unsigned s, bool t = false) {
+ if (s <= SmallNumDataBits)
+ switchToSmall(t ? ~uintptr_t(0) : 0, s);
+ else
+ switchToLarge(new BitVector(s, t));
+ }
+
+ /// SmallBitVector copy ctor.
+ SmallBitVector(const SmallBitVector &RHS) {
+ if (RHS.isSmall())
+ X = RHS.X;
+ else
+ switchToLarge(new BitVector(*RHS.getPointer()));
+ }
+
+ SmallBitVector(SmallBitVector &&RHS) : X(RHS.X) {
+ RHS.X = 1;
+ }
+
+ ~SmallBitVector() {
+ if (!isSmall())
+ delete getPointer();
+ }
+
+ using const_set_bits_iterator = const_set_bits_iterator_impl<SmallBitVector>;
+ using set_iterator = const_set_bits_iterator;
+
+ const_set_bits_iterator set_bits_begin() const {
+ return const_set_bits_iterator(*this);
+ }
+
+ const_set_bits_iterator set_bits_end() const {
+ return const_set_bits_iterator(*this, -1);
+ }
+
+ iterator_range<const_set_bits_iterator> set_bits() const {
+ return make_range(set_bits_begin(), set_bits_end());
+ }
+
+ bool isSmall() const { return X & uintptr_t(1); }
+
+ /// Tests whether there are no bits in this bitvector.
+ bool empty() const {
+ return isSmall() ? getSmallSize() == 0 : getPointer()->empty();
+ }
+
+ /// Returns the number of bits in this bitvector.
+ size_type size() const {
+ return isSmall() ? getSmallSize() : getPointer()->size();
+ }
+
+ /// Returns the number of bits which are set.
+ size_type count() const {
+ if (isSmall()) {
+ uintptr_t Bits = getSmallBits();
+ return countPopulation(Bits);
+ }
+ return getPointer()->count();
+ }
+
+ /// Returns true if any bit is set.
+ bool any() const {
+ if (isSmall())
+ return getSmallBits() != 0;
+ return getPointer()->any();
+ }
+
+ /// Returns true if all bits are set.
+ bool all() const {
+ if (isSmall())
+ return getSmallBits() == (uintptr_t(1) << getSmallSize()) - 1;
+ return getPointer()->all();
+ }
+
+ /// Returns true if none of the bits are set.
+ bool none() const {
+ if (isSmall())
+ return getSmallBits() == 0;
+ return getPointer()->none();
+ }
+
+ /// Returns the index of the first set bit, -1 if none of the bits are set.
+ int find_first() const {
+ if (isSmall()) {
+ uintptr_t Bits = getSmallBits();
+ if (Bits == 0)
+ return -1;
+ return countTrailingZeros(Bits);
+ }
+ return getPointer()->find_first();
+ }
+
+ int find_last() const {
+ if (isSmall()) {
+ uintptr_t Bits = getSmallBits();
+ if (Bits == 0)
+ return -1;
+ return NumBaseBits - countLeadingZeros(Bits) - 1;
+ }
+ return getPointer()->find_last();
+ }
+
+ /// Returns the index of the first unset bit, -1 if all of the bits are set.
+ int find_first_unset() const {
+ if (isSmall()) {
+ if (count() == getSmallSize())
+ return -1;
+
+ uintptr_t Bits = getSmallBits();
+ return countTrailingOnes(Bits);
+ }
+ return getPointer()->find_first_unset();
+ }
+
+ int find_last_unset() const {
+ if (isSmall()) {
+ if (count() == getSmallSize())
+ return -1;
+
+ uintptr_t Bits = getSmallBits();
+ // Set unused bits.
+ Bits |= ~uintptr_t(0) << getSmallSize();
+ return NumBaseBits - countLeadingOnes(Bits) - 1;
+ }
+ return getPointer()->find_last_unset();
+ }
+
+ /// Returns the index of the next set bit following the "Prev" bit.
+ /// Returns -1 if the next set bit is not found.
+ int find_next(unsigned Prev) const {
+ if (isSmall()) {
+ uintptr_t Bits = getSmallBits();
+ // Mask off previous bits.
+ Bits &= ~uintptr_t(0) << (Prev + 1);
+ if (Bits == 0 || Prev + 1 >= getSmallSize())
+ return -1;
+ return countTrailingZeros(Bits);
+ }
+ return getPointer()->find_next(Prev);
+ }
+
+ /// Returns the index of the next unset bit following the "Prev" bit.
+ /// Returns -1 if the next unset bit is not found.
+ int find_next_unset(unsigned Prev) const {
+ if (isSmall()) {
+ uintptr_t Bits = getSmallBits();
+ // Mask in previous bits.
+ Bits |= (uintptr_t(1) << (Prev + 1)) - 1;
+ // Mask in unused bits.
+ Bits |= ~uintptr_t(0) << getSmallSize();
+
+ if (Bits == ~uintptr_t(0) || Prev + 1 >= getSmallSize())
+ return -1;
+ return countTrailingOnes(Bits);
+ }
+ return getPointer()->find_next_unset(Prev);
+ }
+
+ /// find_prev - Returns the index of the first set bit that precedes the
+ /// the bit at \p PriorTo. Returns -1 if all previous bits are unset.
+ int find_prev(unsigned PriorTo) const {
+ if (isSmall()) {
+ if (PriorTo == 0)
+ return -1;
+
+ --PriorTo;
+ uintptr_t Bits = getSmallBits();
+ Bits &= maskTrailingOnes<uintptr_t>(PriorTo + 1);
+ if (Bits == 0)
+ return -1;
+
+ return NumBaseBits - countLeadingZeros(Bits) - 1;
+ }
+ return getPointer()->find_prev(PriorTo);
+ }
+
+ /// Clear all bits.
+ void clear() {
+ if (!isSmall())
+ delete getPointer();
+ switchToSmall(0, 0);
+ }
+
+ /// Grow or shrink the bitvector.
+ void resize(unsigned N, bool t = false) {
+ if (!isSmall()) {
+ getPointer()->resize(N, t);
+ } else if (SmallNumDataBits >= N) {
+ uintptr_t NewBits = t ? ~uintptr_t(0) << getSmallSize() : 0;
+ setSmallSize(N);
+ setSmallBits(NewBits | getSmallBits());
+ } else {
+ BitVector *BV = new BitVector(N, t);
+ uintptr_t OldBits = getSmallBits();
+ for (size_type I = 0, E = getSmallSize(); I != E; ++I)
+ (*BV)[I] = (OldBits >> I) & 1;
+ switchToLarge(BV);
+ }
+ }
+
+ void reserve(unsigned N) {
+ if (isSmall()) {
+ if (N > SmallNumDataBits) {
+ uintptr_t OldBits = getSmallRawBits();
+ size_type SmallSize = getSmallSize();
+ BitVector *BV = new BitVector(SmallSize);
+ for (size_type I = 0; I < SmallSize; ++I)
+ if ((OldBits >> I) & 1)
+ BV->set(I);
+ BV->reserve(N);
+ switchToLarge(BV);
+ }
+ } else {
+ getPointer()->reserve(N);
+ }
+ }
+
+ // Set, reset, flip
+ SmallBitVector &set() {
+ if (isSmall())
+ setSmallBits(~uintptr_t(0));
+ else
+ getPointer()->set();
+ return *this;
+ }
+
+ SmallBitVector &set(unsigned Idx) {
+ if (isSmall()) {
+ assert(Idx <= static_cast<unsigned>(
+ std::numeric_limits<uintptr_t>::digits) &&
+ "undefined behavior");
+ setSmallBits(getSmallBits() | (uintptr_t(1) << Idx));
+ }
+ else
+ getPointer()->set(Idx);
+ return *this;
+ }
+
+ /// Efficiently set a range of bits in [I, E)
+ SmallBitVector &set(unsigned I, unsigned E) {
+ assert(I <= E && "Attempted to set backwards range!");
+ assert(E <= size() && "Attempted to set out-of-bounds range!");
+ if (I == E) return *this;
+ if (isSmall()) {
+ uintptr_t EMask = ((uintptr_t)1) << E;
+ uintptr_t IMask = ((uintptr_t)1) << I;
+ uintptr_t Mask = EMask - IMask;
+ setSmallBits(getSmallBits() | Mask);
+ } else
+ getPointer()->set(I, E);
+ return *this;
+ }
+
+ SmallBitVector &reset() {
+ if (isSmall())
+ setSmallBits(0);
+ else
+ getPointer()->reset();
+ return *this;
+ }
+
+ SmallBitVector &reset(unsigned Idx) {
+ if (isSmall())
+ setSmallBits(getSmallBits() & ~(uintptr_t(1) << Idx));
+ else
+ getPointer()->reset(Idx);
+ return *this;
+ }
+
+ /// Efficiently reset a range of bits in [I, E)
+ SmallBitVector &reset(unsigned I, unsigned E) {
+ assert(I <= E && "Attempted to reset backwards range!");
+ assert(E <= size() && "Attempted to reset out-of-bounds range!");
+ if (I == E) return *this;
+ if (isSmall()) {
+ uintptr_t EMask = ((uintptr_t)1) << E;
+ uintptr_t IMask = ((uintptr_t)1) << I;
+ uintptr_t Mask = EMask - IMask;
+ setSmallBits(getSmallBits() & ~Mask);
+ } else
+ getPointer()->reset(I, E);
+ return *this;
+ }
+
+ SmallBitVector &flip() {
+ if (isSmall())
+ setSmallBits(~getSmallBits());
+ else
+ getPointer()->flip();
+ return *this;
+ }
+
+ SmallBitVector &flip(unsigned Idx) {
+ if (isSmall())
+ setSmallBits(getSmallBits() ^ (uintptr_t(1) << Idx));
+ else
+ getPointer()->flip(Idx);
+ return *this;
+ }
+
+ // No argument flip.
+ SmallBitVector operator~() const {
+ return SmallBitVector(*this).flip();
+ }
+
+ // Indexing.
+ reference operator[](unsigned Idx) {
+ assert(Idx < size() && "Out-of-bounds Bit access.");
+ return reference(*this, Idx);
+ }
+
+ bool operator[](unsigned Idx) const {
+ assert(Idx < size() && "Out-of-bounds Bit access.");
+ if (isSmall())
+ return ((getSmallBits() >> Idx) & 1) != 0;
+ return getPointer()->operator[](Idx);
+ }
+
+ /// Return the last element in the vector.
+ bool back() const {
+ assert(!empty() && "Getting last element of empty vector.");
+ return (*this)[size() - 1];
+ }
+
+ bool test(unsigned Idx) const {
+ return (*this)[Idx];
+ }
+
+ // Push single bit to end of vector.
+ void push_back(bool Val) {
+ resize(size() + 1, Val);
+ }
+
+ /// Pop one bit from the end of the vector.
+ void pop_back() {
+ assert(!empty() && "Empty vector has no element to pop.");
+ resize(size() - 1);
+ }
+
+ /// Test if any common bits are set.
+ bool anyCommon(const SmallBitVector &RHS) const {
+ if (isSmall() && RHS.isSmall())
+ return (getSmallBits() & RHS.getSmallBits()) != 0;
+ if (!isSmall() && !RHS.isSmall())
+ return getPointer()->anyCommon(*RHS.getPointer());
+
+ for (unsigned i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
+ if (test(i) && RHS.test(i))
+ return true;
+ return false;
+ }
+
+ // Comparison operators.
+ bool operator==(const SmallBitVector &RHS) const {
+ if (size() != RHS.size())
+ return false;
+ if (isSmall() && RHS.isSmall())
+ return getSmallBits() == RHS.getSmallBits();
+ else if (!isSmall() && !RHS.isSmall())
+ return *getPointer() == *RHS.getPointer();
+ else {
+ for (size_type I = 0, E = size(); I != E; ++I) {
+ if ((*this)[I] != RHS[I])
+ return false;
+ }
+ return true;
+ }
+ }
+
+ bool operator!=(const SmallBitVector &RHS) const {
+ return !(*this == RHS);
+ }
+
+ // Intersection, union, disjoint union.
+ // FIXME BitVector::operator&= does not resize the LHS but this does
+ SmallBitVector &operator&=(const SmallBitVector &RHS) {
+ resize(std::max(size(), RHS.size()));
+ if (isSmall() && RHS.isSmall())
+ setSmallBits(getSmallBits() & RHS.getSmallBits());
+ else if (!isSmall() && !RHS.isSmall())
+ getPointer()->operator&=(*RHS.getPointer());
+ else {
+ size_type I, E;
+ for (I = 0, E = std::min(size(), RHS.size()); I != E; ++I)
+ (*this)[I] = test(I) && RHS.test(I);
+ for (E = size(); I != E; ++I)
+ reset(I);
+ }
+ return *this;
+ }
+
+ /// Reset bits that are set in RHS. Same as *this &= ~RHS.
+ SmallBitVector &reset(const SmallBitVector &RHS) {
+ if (isSmall() && RHS.isSmall())
+ setSmallBits(getSmallBits() & ~RHS.getSmallBits());
+ else if (!isSmall() && !RHS.isSmall())
+ getPointer()->reset(*RHS.getPointer());
+ else
+ for (unsigned i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
+ if (RHS.test(i))
+ reset(i);
+
+ return *this;
+ }
+
+ /// Check if (This - RHS) is zero. This is the same as reset(RHS) and any().
+ bool test(const SmallBitVector &RHS) const {
+ if (isSmall() && RHS.isSmall())
+ return (getSmallBits() & ~RHS.getSmallBits()) != 0;
+ if (!isSmall() && !RHS.isSmall())
+ return getPointer()->test(*RHS.getPointer());
+
+ unsigned i, e;
+ for (i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
+ if (test(i) && !RHS.test(i))
+ return true;
+
+ for (e = size(); i != e; ++i)
+ if (test(i))
+ return true;
+
+ return false;
+ }
+
+ SmallBitVector &operator|=(const SmallBitVector &RHS) {
+ resize(std::max(size(), RHS.size()));
+ if (isSmall() && RHS.isSmall())
+ setSmallBits(getSmallBits() | RHS.getSmallBits());
+ else if (!isSmall() && !RHS.isSmall())
+ getPointer()->operator|=(*RHS.getPointer());
+ else {
+ for (size_type I = 0, E = RHS.size(); I != E; ++I)
+ (*this)[I] = test(I) || RHS.test(I);
+ }
+ return *this;
+ }
+
+ SmallBitVector &operator^=(const SmallBitVector &RHS) {
+ resize(std::max(size(), RHS.size()));
+ if (isSmall() && RHS.isSmall())
+ setSmallBits(getSmallBits() ^ RHS.getSmallBits());
+ else if (!isSmall() && !RHS.isSmall())
+ getPointer()->operator^=(*RHS.getPointer());
+ else {
+ for (size_type I = 0, E = RHS.size(); I != E; ++I)
+ (*this)[I] = test(I) != RHS.test(I);
+ }
+ return *this;
+ }
+
+ SmallBitVector &operator<<=(unsigned N) {
+ if (isSmall())
+ setSmallBits(getSmallBits() << N);
+ else
+ getPointer()->operator<<=(N);
+ return *this;
+ }
+
+ SmallBitVector &operator>>=(unsigned N) {
+ if (isSmall())
+ setSmallBits(getSmallBits() >> N);
+ else
+ getPointer()->operator>>=(N);
+ return *this;
+ }
+
+ // Assignment operator.
+ const SmallBitVector &operator=(const SmallBitVector &RHS) {
+ if (isSmall()) {
+ if (RHS.isSmall())
+ X = RHS.X;
+ else
+ switchToLarge(new BitVector(*RHS.getPointer()));
+ } else {
+ if (!RHS.isSmall())
+ *getPointer() = *RHS.getPointer();
+ else {
+ delete getPointer();
+ X = RHS.X;
+ }
+ }
+ return *this;
+ }
+
+ const SmallBitVector &operator=(SmallBitVector &&RHS) {
+ if (this != &RHS) {
+ clear();
+ swap(RHS);
+ }
+ return *this;
+ }
+
+ void swap(SmallBitVector &RHS) {
+ std::swap(X, RHS.X);
+ }
+
+ /// Add '1' bits from Mask to this vector. Don't resize.
+ /// This computes "*this |= Mask".
+ void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ if (isSmall())
+ applyMask<true, false>(Mask, MaskWords);
+ else
+ getPointer()->setBitsInMask(Mask, MaskWords);
+ }
+
+ /// Clear any bits in this vector that are set in Mask. Don't resize.
+ /// This computes "*this &= ~Mask".
+ void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ if (isSmall())
+ applyMask<false, false>(Mask, MaskWords);
+ else
+ getPointer()->clearBitsInMask(Mask, MaskWords);
+ }
+
+ /// Add a bit to this vector for every '0' bit in Mask. Don't resize.
+ /// This computes "*this |= ~Mask".
+ void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ if (isSmall())
+ applyMask<true, true>(Mask, MaskWords);
+ else
+ getPointer()->setBitsNotInMask(Mask, MaskWords);
+ }
+
+ /// Clear a bit in this vector for every '0' bit in Mask. Don't resize.
+ /// This computes "*this &= Mask".
+ void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ if (isSmall())
+ applyMask<false, true>(Mask, MaskWords);
+ else
+ getPointer()->clearBitsNotInMask(Mask, MaskWords);
+ }
+
+ void invalid() {
+ assert(empty());
+ X = (uintptr_t)-1;
+ }
+ bool isInvalid() const { return X == (uintptr_t)-1; }
+
+ ArrayRef<uintptr_t> getData(uintptr_t &Store) const {
+ if (!isSmall())
+ return getPointer()->getData();
+ Store = getSmallBits();
+ return makeArrayRef(Store);
+ }
+
+private:
+ template <bool AddBits, bool InvertMask>
+ void applyMask(const uint32_t *Mask, unsigned MaskWords) {
+ assert(MaskWords <= sizeof(uintptr_t) && "Mask is larger than base!");
+ uintptr_t M = Mask[0];
+ if (NumBaseBits == 64)
+ M |= uint64_t(Mask[1]) << 32;
+ if (InvertMask)
+ M = ~M;
+ if (AddBits)
+ setSmallBits(getSmallBits() | M);
+ else
+ setSmallBits(getSmallBits() & ~M);
+ }
+};
+
+inline SmallBitVector
+operator&(const SmallBitVector &LHS, const SmallBitVector &RHS) {
+ SmallBitVector Result(LHS);
+ Result &= RHS;
+ return Result;
+}
+
+inline SmallBitVector
+operator|(const SmallBitVector &LHS, const SmallBitVector &RHS) {
+ SmallBitVector Result(LHS);
+ Result |= RHS;
+ return Result;
+}
+
+inline SmallBitVector
+operator^(const SmallBitVector &LHS, const SmallBitVector &RHS) {
+ SmallBitVector Result(LHS);
+ Result ^= RHS;
+ return Result;
+}
+
+template <> struct DenseMapInfo<SmallBitVector> {
+ static inline SmallBitVector getEmptyKey() { return SmallBitVector(); }
+ static inline SmallBitVector getTombstoneKey() {
+ SmallBitVector V;
+ V.invalid();
+ return V;
+ }
+ static unsigned getHashValue(const SmallBitVector &V) {
+ uintptr_t Store;
+ return DenseMapInfo<
+ std::pair<SmallBitVector::size_type, ArrayRef<uintptr_t>>>::
+ getHashValue(std::make_pair(V.size(), V.getData(Store)));
+ }
+ static bool isEqual(const SmallBitVector &LHS, const SmallBitVector &RHS) {
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return LHS.isInvalid() == RHS.isInvalid();
+ return LHS == RHS;
+ }
+};
+} // end namespace llvm
+
+namespace std {
+
+/// Implement std::swap in terms of BitVector swap.
+inline void
+swap(llvm::SmallBitVector &LHS, llvm::SmallBitVector &RHS) {
+ LHS.swap(RHS);
+}
+
+} // end namespace std
+
+#endif // LLVM_ADT_SMALLBITVECTOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/SmallPtrSet.h b/contrib/libs/llvm14/include/llvm/ADT/SmallPtrSet.h
new file mode 100644
index 0000000000..301209a3c4
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/SmallPtrSet.h
@@ -0,0 +1,529 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/SmallPtrSet.h - 'Normally small' pointer set ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the SmallPtrSet class. See the doxygen comment for
+/// SmallPtrSetImplBase for more details on the algorithm used.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SMALLPTRSET_H
+#define LLVM_ADT_SMALLPTRSET_H
+
+#include "llvm/ADT/EpochTracker.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ReverseIteration.h"
+#include "llvm/Support/type_traits.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdlib>
+#include <cstring>
+#include <initializer_list>
+#include <iterator>
+#include <utility>
+
+namespace llvm {
+
+/// SmallPtrSetImplBase - This is the common code shared among all the
+/// SmallPtrSet<>'s, which is almost everything. SmallPtrSet has two modes, one
+/// for small and one for large sets.
+///
+/// Small sets use an array of pointers allocated in the SmallPtrSet object,
+/// which is treated as a simple array of pointers. When a pointer is added to
+/// the set, the array is scanned to see if the element already exists, if not
+/// the element is 'pushed back' onto the array. If we run out of space in the
+/// array, we grow into the 'large set' case. SmallSet should be used when the
+/// sets are often small. In this case, no memory allocation is used, and only
+/// light-weight and cache-efficient scanning is used.
+///
+/// Large sets use a classic exponentially-probed hash table. Empty buckets are
+/// represented with an illegal pointer value (-1) to allow null pointers to be
+/// inserted. Tombstones are represented with another illegal pointer value
+/// (-2), to allow deletion. The hash table is resized when the table is 3/4 or
+/// more. When this happens, the table is doubled in size.
+///
+class SmallPtrSetImplBase : public DebugEpochBase {
+ friend class SmallPtrSetIteratorImpl;
+
+protected:
+ /// SmallArray - Points to a fixed size set of buckets, used in 'small mode'.
+ const void **SmallArray;
+ /// CurArray - This is the current set of buckets. If equal to SmallArray,
+ /// then the set is in 'small mode'.
+ const void **CurArray;
+ /// CurArraySize - The allocated size of CurArray, always a power of two.
+ unsigned CurArraySize;
+
+ /// Number of elements in CurArray that contain a value or are a tombstone.
+ /// If small, all these elements are at the beginning of CurArray and the rest
+ /// is uninitialized.
+ unsigned NumNonEmpty;
+ /// Number of tombstones in CurArray.
+ unsigned NumTombstones;
+
+ // Helpers to copy and move construct a SmallPtrSet.
+ SmallPtrSetImplBase(const void **SmallStorage,
+ const SmallPtrSetImplBase &that);
+ SmallPtrSetImplBase(const void **SmallStorage, unsigned SmallSize,
+ SmallPtrSetImplBase &&that);
+
+ explicit SmallPtrSetImplBase(const void **SmallStorage, unsigned SmallSize)
+ : SmallArray(SmallStorage), CurArray(SmallStorage),
+ CurArraySize(SmallSize), NumNonEmpty(0), NumTombstones(0) {
+ assert(SmallSize && (SmallSize & (SmallSize-1)) == 0 &&
+ "Initial size must be a power of two!");
+ }
+
+ ~SmallPtrSetImplBase() {
+ if (!isSmall())
+ free(CurArray);
+ }
+
+public:
+ using size_type = unsigned;
+
+ SmallPtrSetImplBase &operator=(const SmallPtrSetImplBase &) = delete;
+
+ LLVM_NODISCARD bool empty() const { return size() == 0; }
+ size_type size() const { return NumNonEmpty - NumTombstones; }
+
+ void clear() {
+ incrementEpoch();
+ // If the capacity of the array is huge, and the # elements used is small,
+ // shrink the array.
+ if (!isSmall()) {
+ if (size() * 4 < CurArraySize && CurArraySize > 32)
+ return shrink_and_clear();
+ // Fill the array with empty markers.
+ memset(CurArray, -1, CurArraySize * sizeof(void *));
+ }
+
+ NumNonEmpty = 0;
+ NumTombstones = 0;
+ }
+
+protected:
+ static void *getTombstoneMarker() { return reinterpret_cast<void*>(-2); }
+
+ static void *getEmptyMarker() {
+ // Note that -1 is chosen to make clear() efficiently implementable with
+ // memset and because it's not a valid pointer value.
+ return reinterpret_cast<void*>(-1);
+ }
+
+ const void **EndPointer() const {
+ return isSmall() ? CurArray + NumNonEmpty : CurArray + CurArraySize;
+ }
+
+ /// insert_imp - This returns true if the pointer was new to the set, false if
+ /// it was already in the set. This is hidden from the client so that the
+ /// derived class can check that the right type of pointer is passed in.
+ std::pair<const void *const *, bool> insert_imp(const void *Ptr) {
+ if (isSmall()) {
+ // Check to see if it is already in the set.
+ const void **LastTombstone = nullptr;
+ for (const void **APtr = SmallArray, **E = SmallArray + NumNonEmpty;
+ APtr != E; ++APtr) {
+ const void *Value = *APtr;
+ if (Value == Ptr)
+ return std::make_pair(APtr, false);
+ if (Value == getTombstoneMarker())
+ LastTombstone = APtr;
+ }
+
+ // Did we find any tombstone marker?
+ if (LastTombstone != nullptr) {
+ *LastTombstone = Ptr;
+ --NumTombstones;
+ incrementEpoch();
+ return std::make_pair(LastTombstone, true);
+ }
+
+ // Nope, there isn't. If we stay small, just 'pushback' now.
+ if (NumNonEmpty < CurArraySize) {
+ SmallArray[NumNonEmpty++] = Ptr;
+ incrementEpoch();
+ return std::make_pair(SmallArray + (NumNonEmpty - 1), true);
+ }
+ // Otherwise, hit the big set case, which will call grow.
+ }
+ return insert_imp_big(Ptr);
+ }
+
+ /// erase_imp - If the set contains the specified pointer, remove it and
+ /// return true, otherwise return false. This is hidden from the client so
+ /// that the derived class can check that the right type of pointer is passed
+ /// in.
+ bool erase_imp(const void * Ptr) {
+ const void *const *P = find_imp(Ptr);
+ if (P == EndPointer())
+ return false;
+
+ const void **Loc = const_cast<const void **>(P);
+ assert(*Loc == Ptr && "broken find!");
+ *Loc = getTombstoneMarker();
+ NumTombstones++;
+ return true;
+ }
+
+ /// Returns the raw pointer needed to construct an iterator. If element not
+ /// found, this will be EndPointer. Otherwise, it will be a pointer to the
+ /// slot which stores Ptr;
+ const void *const * find_imp(const void * Ptr) const {
+ if (isSmall()) {
+ // Linear search for the item.
+ for (const void *const *APtr = SmallArray,
+ *const *E = SmallArray + NumNonEmpty; APtr != E; ++APtr)
+ if (*APtr == Ptr)
+ return APtr;
+ return EndPointer();
+ }
+
+ // Big set case.
+ auto *Bucket = FindBucketFor(Ptr);
+ if (*Bucket == Ptr)
+ return Bucket;
+ return EndPointer();
+ }
+
+private:
+ bool isSmall() const { return CurArray == SmallArray; }
+
+ std::pair<const void *const *, bool> insert_imp_big(const void *Ptr);
+
+ const void * const *FindBucketFor(const void *Ptr) const;
+ void shrink_and_clear();
+
+ /// Grow - Allocate a larger backing store for the buckets and move it over.
+ void Grow(unsigned NewSize);
+
+protected:
+ /// swap - Swaps the elements of two sets.
+ /// Note: This method assumes that both sets have the same small size.
+ void swap(SmallPtrSetImplBase &RHS);
+
+ void CopyFrom(const SmallPtrSetImplBase &RHS);
+ void MoveFrom(unsigned SmallSize, SmallPtrSetImplBase &&RHS);
+
+private:
+ /// Code shared by MoveFrom() and move constructor.
+ void MoveHelper(unsigned SmallSize, SmallPtrSetImplBase &&RHS);
+ /// Code shared by CopyFrom() and copy constructor.
+ void CopyHelper(const SmallPtrSetImplBase &RHS);
+};
+
+/// SmallPtrSetIteratorImpl - This is the common base class shared between all
+/// instances of SmallPtrSetIterator.
+class SmallPtrSetIteratorImpl {
+protected:
+ const void *const *Bucket;
+ const void *const *End;
+
+public:
+ explicit SmallPtrSetIteratorImpl(const void *const *BP, const void*const *E)
+ : Bucket(BP), End(E) {
+ if (shouldReverseIterate()) {
+ RetreatIfNotValid();
+ return;
+ }
+ AdvanceIfNotValid();
+ }
+
+ bool operator==(const SmallPtrSetIteratorImpl &RHS) const {
+ return Bucket == RHS.Bucket;
+ }
+ bool operator!=(const SmallPtrSetIteratorImpl &RHS) const {
+ return Bucket != RHS.Bucket;
+ }
+
+protected:
+ /// AdvanceIfNotValid - If the current bucket isn't valid, advance to a bucket
+ /// that is. This is guaranteed to stop because the end() bucket is marked
+ /// valid.
+ void AdvanceIfNotValid() {
+ assert(Bucket <= End);
+ while (Bucket != End &&
+ (*Bucket == SmallPtrSetImplBase::getEmptyMarker() ||
+ *Bucket == SmallPtrSetImplBase::getTombstoneMarker()))
+ ++Bucket;
+ }
+ void RetreatIfNotValid() {
+ assert(Bucket >= End);
+ while (Bucket != End &&
+ (Bucket[-1] == SmallPtrSetImplBase::getEmptyMarker() ||
+ Bucket[-1] == SmallPtrSetImplBase::getTombstoneMarker())) {
+ --Bucket;
+ }
+ }
+};
+
+/// SmallPtrSetIterator - This implements a const_iterator for SmallPtrSet.
+template <typename PtrTy>
+class SmallPtrSetIterator : public SmallPtrSetIteratorImpl,
+ DebugEpochBase::HandleBase {
+ using PtrTraits = PointerLikeTypeTraits<PtrTy>;
+
+public:
+ using value_type = PtrTy;
+ using reference = PtrTy;
+ using pointer = PtrTy;
+ using difference_type = std::ptrdiff_t;
+ using iterator_category = std::forward_iterator_tag;
+
+ explicit SmallPtrSetIterator(const void *const *BP, const void *const *E,
+ const DebugEpochBase &Epoch)
+ : SmallPtrSetIteratorImpl(BP, E), DebugEpochBase::HandleBase(&Epoch) {}
+
+ // Most methods are provided by the base class.
+
+ const PtrTy operator*() const {
+ assert(isHandleInSync() && "invalid iterator access!");
+ if (shouldReverseIterate()) {
+ assert(Bucket > End);
+ return PtrTraits::getFromVoidPointer(const_cast<void *>(Bucket[-1]));
+ }
+ assert(Bucket < End);
+ return PtrTraits::getFromVoidPointer(const_cast<void*>(*Bucket));
+ }
+
+ inline SmallPtrSetIterator& operator++() { // Preincrement
+ assert(isHandleInSync() && "invalid iterator access!");
+ if (shouldReverseIterate()) {
+ --Bucket;
+ RetreatIfNotValid();
+ return *this;
+ }
+ ++Bucket;
+ AdvanceIfNotValid();
+ return *this;
+ }
+
+ SmallPtrSetIterator operator++(int) { // Postincrement
+ SmallPtrSetIterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+};
+
+/// RoundUpToPowerOfTwo - This is a helper template that rounds N up to the next
+/// power of two (which means N itself if N is already a power of two).
+template<unsigned N>
+struct RoundUpToPowerOfTwo;
+
+/// RoundUpToPowerOfTwoH - If N is not a power of two, increase it. This is a
+/// helper template used to implement RoundUpToPowerOfTwo.
+template<unsigned N, bool isPowerTwo>
+struct RoundUpToPowerOfTwoH {
+ enum { Val = N };
+};
+template<unsigned N>
+struct RoundUpToPowerOfTwoH<N, false> {
+ enum {
+ // We could just use NextVal = N+1, but this converges faster. N|(N-1) sets
+ // the right-most zero bits to one all at once, e.g. 0b0011000 -> 0b0011111.
+ Val = RoundUpToPowerOfTwo<(N|(N-1)) + 1>::Val
+ };
+};
+
+template<unsigned N>
+struct RoundUpToPowerOfTwo {
+ enum { Val = RoundUpToPowerOfTwoH<N, (N&(N-1)) == 0>::Val };
+};
+
+/// A templated base class for \c SmallPtrSet which provides the
+/// typesafe interface that is common across all small sizes.
+///
+/// This is particularly useful for passing around between interface boundaries
+/// to avoid encoding a particular small size in the interface boundary.
+template <typename PtrType>
+class SmallPtrSetImpl : public SmallPtrSetImplBase {
+ using ConstPtrType = typename add_const_past_pointer<PtrType>::type;
+ using PtrTraits = PointerLikeTypeTraits<PtrType>;
+ using ConstPtrTraits = PointerLikeTypeTraits<ConstPtrType>;
+
+protected:
+ // Forward constructors to the base.
+ using SmallPtrSetImplBase::SmallPtrSetImplBase;
+
+public:
+ using iterator = SmallPtrSetIterator<PtrType>;
+ using const_iterator = SmallPtrSetIterator<PtrType>;
+ using key_type = ConstPtrType;
+ using value_type = PtrType;
+
+ SmallPtrSetImpl(const SmallPtrSetImpl &) = delete;
+
+ /// Inserts Ptr if and only if there is no element in the container equal to
+ /// Ptr. The bool component of the returned pair is true if and only if the
+ /// insertion takes place, and the iterator component of the pair points to
+ /// the element equal to Ptr.
+ std::pair<iterator, bool> insert(PtrType Ptr) {
+ auto p = insert_imp(PtrTraits::getAsVoidPointer(Ptr));
+ return std::make_pair(makeIterator(p.first), p.second);
+ }
+
+ /// Insert the given pointer with an iterator hint that is ignored. This is
+ /// identical to calling insert(Ptr), but allows SmallPtrSet to be used by
+ /// std::insert_iterator and std::inserter().
+ iterator insert(iterator, PtrType Ptr) {
+ return insert(Ptr).first;
+ }
+
+ /// erase - If the set contains the specified pointer, remove it and return
+ /// true, otherwise return false.
+ bool erase(PtrType Ptr) {
+ return erase_imp(PtrTraits::getAsVoidPointer(Ptr));
+ }
+ /// count - Return 1 if the specified pointer is in the set, 0 otherwise.
+ size_type count(ConstPtrType Ptr) const {
+ return find_imp(ConstPtrTraits::getAsVoidPointer(Ptr)) != EndPointer();
+ }
+ iterator find(ConstPtrType Ptr) const {
+ return makeIterator(find_imp(ConstPtrTraits::getAsVoidPointer(Ptr)));
+ }
+ bool contains(ConstPtrType Ptr) const {
+ return find_imp(ConstPtrTraits::getAsVoidPointer(Ptr)) != EndPointer();
+ }
+
+ template <typename IterT>
+ void insert(IterT I, IterT E) {
+ for (; I != E; ++I)
+ insert(*I);
+ }
+
+ void insert(std::initializer_list<PtrType> IL) {
+ insert(IL.begin(), IL.end());
+ }
+
+ iterator begin() const {
+ if (shouldReverseIterate())
+ return makeIterator(EndPointer() - 1);
+ return makeIterator(CurArray);
+ }
+ iterator end() const { return makeIterator(EndPointer()); }
+
+private:
+ /// Create an iterator that dereferences to same place as the given pointer.
+ iterator makeIterator(const void *const *P) const {
+ if (shouldReverseIterate())
+ return iterator(P == EndPointer() ? CurArray : P + 1, CurArray, *this);
+ return iterator(P, EndPointer(), *this);
+ }
+};
+
+/// Equality comparison for SmallPtrSet.
+///
+/// Iterates over elements of LHS confirming that each value from LHS is also in
+/// RHS, and that no additional values are in RHS.
+template <typename PtrType>
+bool operator==(const SmallPtrSetImpl<PtrType> &LHS,
+ const SmallPtrSetImpl<PtrType> &RHS) {
+ if (LHS.size() != RHS.size())
+ return false;
+
+ for (const auto *KV : LHS)
+ if (!RHS.count(KV))
+ return false;
+
+ return true;
+}
+
+/// Inequality comparison for SmallPtrSet.
+///
+/// Equivalent to !(LHS == RHS).
+template <typename PtrType>
+bool operator!=(const SmallPtrSetImpl<PtrType> &LHS,
+ const SmallPtrSetImpl<PtrType> &RHS) {
+ return !(LHS == RHS);
+}
+
+/// SmallPtrSet - This class implements a set which is optimized for holding
+/// SmallSize or less elements. This internally rounds up SmallSize to the next
+/// power of two if it is not already a power of two. See the comments above
+/// SmallPtrSetImplBase for details of the algorithm.
+template<class PtrType, unsigned SmallSize>
+class SmallPtrSet : public SmallPtrSetImpl<PtrType> {
+ // In small mode SmallPtrSet uses linear search for the elements, so it is
+ // not a good idea to choose this value too high. You may consider using a
+ // DenseSet<> instead if you expect many elements in the set.
+ static_assert(SmallSize <= 32, "SmallSize should be small");
+
+ using BaseT = SmallPtrSetImpl<PtrType>;
+
+ // Make sure that SmallSize is a power of two, round up if not.
+ enum { SmallSizePowTwo = RoundUpToPowerOfTwo<SmallSize>::Val };
+ /// SmallStorage - Fixed size storage used in 'small mode'.
+ const void *SmallStorage[SmallSizePowTwo];
+
+public:
+ SmallPtrSet() : BaseT(SmallStorage, SmallSizePowTwo) {}
+ SmallPtrSet(const SmallPtrSet &that) : BaseT(SmallStorage, that) {}
+ SmallPtrSet(SmallPtrSet &&that)
+ : BaseT(SmallStorage, SmallSizePowTwo, std::move(that)) {}
+
+ template<typename It>
+ SmallPtrSet(It I, It E) : BaseT(SmallStorage, SmallSizePowTwo) {
+ this->insert(I, E);
+ }
+
+ SmallPtrSet(std::initializer_list<PtrType> IL)
+ : BaseT(SmallStorage, SmallSizePowTwo) {
+ this->insert(IL.begin(), IL.end());
+ }
+
+ SmallPtrSet<PtrType, SmallSize> &
+ operator=(const SmallPtrSet<PtrType, SmallSize> &RHS) {
+ if (&RHS != this)
+ this->CopyFrom(RHS);
+ return *this;
+ }
+
+ SmallPtrSet<PtrType, SmallSize> &
+ operator=(SmallPtrSet<PtrType, SmallSize> &&RHS) {
+ if (&RHS != this)
+ this->MoveFrom(SmallSizePowTwo, std::move(RHS));
+ return *this;
+ }
+
+ SmallPtrSet<PtrType, SmallSize> &
+ operator=(std::initializer_list<PtrType> IL) {
+ this->clear();
+ this->insert(IL.begin(), IL.end());
+ return *this;
+ }
+
+ /// swap - Swaps the elements of two sets.
+ void swap(SmallPtrSet<PtrType, SmallSize> &RHS) {
+ SmallPtrSetImplBase::swap(RHS);
+ }
+};
+
+} // end namespace llvm
+
+namespace std {
+
+ /// Implement std::swap in terms of SmallPtrSet swap.
+ template<class T, unsigned N>
+ inline void swap(llvm::SmallPtrSet<T, N> &LHS, llvm::SmallPtrSet<T, N> &RHS) {
+ LHS.swap(RHS);
+ }
+
+} // end namespace std
+
+#endif // LLVM_ADT_SMALLPTRSET_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/SmallSet.h b/contrib/libs/llvm14/include/llvm/ADT/SmallSet.h
new file mode 100644
index 0000000000..472b8fb362
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/SmallSet.h
@@ -0,0 +1,298 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/SmallSet.h - 'Normally small' sets --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the SmallSet class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SMALLSET_H
+#define LLVM_ADT_SMALLSET_H
+
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/type_traits.h"
+#include <cstddef>
+#include <functional>
+#include <set>
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+
+/// SmallSetIterator - This class implements a const_iterator for SmallSet by
+/// delegating to the underlying SmallVector or Set iterators.
+template <typename T, unsigned N, typename C>
+class SmallSetIterator
+ : public iterator_facade_base<SmallSetIterator<T, N, C>,
+ std::forward_iterator_tag, T> {
+private:
+ using SetIterTy = typename std::set<T, C>::const_iterator;
+ using VecIterTy = typename SmallVector<T, N>::const_iterator;
+ using SelfTy = SmallSetIterator<T, N, C>;
+
+ /// Iterators to the parts of the SmallSet containing the data. They are set
+ /// depending on isSmall.
+ union {
+ SetIterTy SetIter;
+ VecIterTy VecIter;
+ };
+
+ bool isSmall;
+
+public:
+ SmallSetIterator(SetIterTy SetIter) : SetIter(SetIter), isSmall(false) {}
+
+ SmallSetIterator(VecIterTy VecIter) : VecIter(VecIter), isSmall(true) {}
+
+ // Spell out destructor, copy/move constructor and assignment operators for
+ // MSVC STL, where set<T>::const_iterator is not trivially copy constructible.
+ ~SmallSetIterator() {
+ if (isSmall)
+ VecIter.~VecIterTy();
+ else
+ SetIter.~SetIterTy();
+ }
+
+ SmallSetIterator(const SmallSetIterator &Other) : isSmall(Other.isSmall) {
+ if (isSmall)
+ VecIter = Other.VecIter;
+ else
+ // Use placement new, to make sure SetIter is properly constructed, even
+ // if it is not trivially copy-able (e.g. in MSVC).
+ new (&SetIter) SetIterTy(Other.SetIter);
+ }
+
+ SmallSetIterator(SmallSetIterator &&Other) : isSmall(Other.isSmall) {
+ if (isSmall)
+ VecIter = std::move(Other.VecIter);
+ else
+ // Use placement new, to make sure SetIter is properly constructed, even
+ // if it is not trivially copy-able (e.g. in MSVC).
+ new (&SetIter) SetIterTy(std::move(Other.SetIter));
+ }
+
+ SmallSetIterator& operator=(const SmallSetIterator& Other) {
+ // Call destructor for SetIter, so it gets properly destroyed if it is
+ // not trivially destructible in case we are setting VecIter.
+ if (!isSmall)
+ SetIter.~SetIterTy();
+
+ isSmall = Other.isSmall;
+ if (isSmall)
+ VecIter = Other.VecIter;
+ else
+ new (&SetIter) SetIterTy(Other.SetIter);
+ return *this;
+ }
+
+ SmallSetIterator& operator=(SmallSetIterator&& Other) {
+ // Call destructor for SetIter, so it gets properly destroyed if it is
+ // not trivially destructible in case we are setting VecIter.
+ if (!isSmall)
+ SetIter.~SetIterTy();
+
+ isSmall = Other.isSmall;
+ if (isSmall)
+ VecIter = std::move(Other.VecIter);
+ else
+ new (&SetIter) SetIterTy(std::move(Other.SetIter));
+ return *this;
+ }
+
+ bool operator==(const SmallSetIterator &RHS) const {
+ if (isSmall != RHS.isSmall)
+ return false;
+ if (isSmall)
+ return VecIter == RHS.VecIter;
+ return SetIter == RHS.SetIter;
+ }
+
+ SmallSetIterator &operator++() { // Preincrement
+ if (isSmall)
+ VecIter++;
+ else
+ SetIter++;
+ return *this;
+ }
+
+ const T &operator*() const { return isSmall ? *VecIter : *SetIter; }
+};
+
+/// SmallSet - This maintains a set of unique values, optimizing for the case
+/// when the set is small (less than N). In this case, the set can be
+/// maintained with no mallocs. If the set gets large, we expand to using an
+/// std::set to maintain reasonable lookup times.
+template <typename T, unsigned N, typename C = std::less<T>>
+class SmallSet {
+ /// Use a SmallVector to hold the elements here (even though it will never
+ /// reach its 'large' stage) to avoid calling the default ctors of elements
+ /// we will never use.
+ SmallVector<T, N> Vector;
+ std::set<T, C> Set;
+
+ using VIterator = typename SmallVector<T, N>::const_iterator;
+ using mutable_iterator = typename SmallVector<T, N>::iterator;
+
+ // In small mode SmallPtrSet uses linear search for the elements, so it is
+ // not a good idea to choose this value too high. You may consider using a
+ // DenseSet<> instead if you expect many elements in the set.
+ static_assert(N <= 32, "N should be small");
+
+public:
+ using size_type = size_t;
+ using const_iterator = SmallSetIterator<T, N, C>;
+
+ SmallSet() = default;
+
+ LLVM_NODISCARD bool empty() const {
+ return Vector.empty() && Set.empty();
+ }
+
+ size_type size() const {
+ return isSmall() ? Vector.size() : Set.size();
+ }
+
+ /// count - Return 1 if the element is in the set, 0 otherwise.
+ size_type count(const T &V) const {
+ if (isSmall()) {
+ // Since the collection is small, just do a linear search.
+ return vfind(V) == Vector.end() ? 0 : 1;
+ } else {
+ return Set.count(V);
+ }
+ }
+
+ /// insert - Insert an element into the set if it isn't already there.
+ /// Returns true if the element is inserted (it was not in the set before).
+ /// The first value of the returned pair is unused and provided for
+ /// partial compatibility with the standard library self-associative container
+ /// concept.
+ // FIXME: Add iterators that abstract over the small and large form, and then
+ // return those here.
+ std::pair<NoneType, bool> insert(const T &V) {
+ if (!isSmall())
+ return std::make_pair(None, Set.insert(V).second);
+
+ VIterator I = vfind(V);
+ if (I != Vector.end()) // Don't reinsert if it already exists.
+ return std::make_pair(None, false);
+ if (Vector.size() < N) {
+ Vector.push_back(V);
+ return std::make_pair(None, true);
+ }
+
+ // Otherwise, grow from vector to set.
+ while (!Vector.empty()) {
+ Set.insert(Vector.back());
+ Vector.pop_back();
+ }
+ Set.insert(V);
+ return std::make_pair(None, true);
+ }
+
+ template <typename IterT>
+ void insert(IterT I, IterT E) {
+ for (; I != E; ++I)
+ insert(*I);
+ }
+
+ bool erase(const T &V) {
+ if (!isSmall())
+ return Set.erase(V);
+ for (mutable_iterator I = Vector.begin(), E = Vector.end(); I != E; ++I)
+ if (*I == V) {
+ Vector.erase(I);
+ return true;
+ }
+ return false;
+ }
+
+ void clear() {
+ Vector.clear();
+ Set.clear();
+ }
+
+ const_iterator begin() const {
+ if (isSmall())
+ return {Vector.begin()};
+ return {Set.begin()};
+ }
+
+ const_iterator end() const {
+ if (isSmall())
+ return {Vector.end()};
+ return {Set.end()};
+ }
+
+ /// Check if the SmallSet contains the given element.
+ bool contains(const T &V) const {
+ if (isSmall())
+ return vfind(V) != Vector.end();
+ return Set.find(V) != Set.end();
+ }
+
+private:
+ bool isSmall() const { return Set.empty(); }
+
+ VIterator vfind(const T &V) const {
+ for (VIterator I = Vector.begin(), E = Vector.end(); I != E; ++I)
+ if (*I == V)
+ return I;
+ return Vector.end();
+ }
+};
+
+/// If this set is of pointer values, transparently switch over to using
+/// SmallPtrSet for performance.
+template <typename PointeeType, unsigned N>
+class SmallSet<PointeeType*, N> : public SmallPtrSet<PointeeType*, N> {};
+
+/// Equality comparison for SmallSet.
+///
+/// Iterates over elements of LHS confirming that each element is also a member
+/// of RHS, and that RHS contains no additional values.
+/// Equivalent to N calls to RHS.count.
+/// For small-set mode amortized complexity is O(N^2)
+/// For large-set mode amortized complexity is linear, worst case is O(N^2) (if
+/// every hash collides).
+template <typename T, unsigned LN, unsigned RN, typename C>
+bool operator==(const SmallSet<T, LN, C> &LHS, const SmallSet<T, RN, C> &RHS) {
+ if (LHS.size() != RHS.size())
+ return false;
+
+ // All elements in LHS must also be in RHS
+ return all_of(LHS, [&RHS](const T &E) { return RHS.count(E); });
+}
+
+/// Inequality comparison for SmallSet.
+///
+/// Equivalent to !(LHS == RHS). See operator== for performance notes.
+template <typename T, unsigned LN, unsigned RN, typename C>
+bool operator!=(const SmallSet<T, LN, C> &LHS, const SmallSet<T, RN, C> &RHS) {
+ return !(LHS == RHS);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SMALLSET_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/SmallString.h b/contrib/libs/llvm14/include/llvm/ADT/SmallString.h
new file mode 100644
index 0000000000..9e7998a1b6
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/SmallString.h
@@ -0,0 +1,305 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/SmallString.h - 'Normally small' strings --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the SmallString class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SMALLSTRING_H
+#define LLVM_ADT_SMALLSTRING_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include <cstddef>
+
+namespace llvm {
+
+/// SmallString - A SmallString is just a SmallVector with methods and accessors
+/// that make it work better as a string (e.g. operator+ etc).
+template<unsigned InternalLen>
+class SmallString : public SmallVector<char, InternalLen> {
+public:
+ /// Default ctor - Initialize to empty.
+ SmallString() = default;
+
+ /// Initialize from a StringRef.
+ SmallString(StringRef S) : SmallVector<char, InternalLen>(S.begin(), S.end()) {}
+
+ /// Initialize by concatenating a list of StringRefs.
+ SmallString(std::initializer_list<StringRef> Refs)
+ : SmallVector<char, InternalLen>() {
+ this->append(Refs);
+ }
+
+ /// Initialize with a range.
+ template<typename ItTy>
+ SmallString(ItTy S, ItTy E) : SmallVector<char, InternalLen>(S, E) {}
+
+ /// @}
+ /// @name String Assignment
+ /// @{
+
+ using SmallVector<char, InternalLen>::assign;
+
+ /// Assign from a StringRef.
+ void assign(StringRef RHS) {
+ SmallVectorImpl<char>::assign(RHS.begin(), RHS.end());
+ }
+
+ /// Assign from a list of StringRefs.
+ void assign(std::initializer_list<StringRef> Refs) {
+ this->clear();
+ append(Refs);
+ }
+
+ /// @}
+ /// @name String Concatenation
+ /// @{
+
+ using SmallVector<char, InternalLen>::append;
+
+ /// Append from a StringRef.
+ void append(StringRef RHS) {
+ SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
+ }
+
+ /// Append from a list of StringRefs.
+ void append(std::initializer_list<StringRef> Refs) {
+ size_t CurrentSize = this->size();
+ size_t SizeNeeded = CurrentSize;
+ for (const StringRef &Ref : Refs)
+ SizeNeeded += Ref.size();
+ this->resize_for_overwrite(SizeNeeded);
+ for (const StringRef &Ref : Refs) {
+ std::copy(Ref.begin(), Ref.end(), this->begin() + CurrentSize);
+ CurrentSize += Ref.size();
+ }
+ assert(CurrentSize == this->size());
+ }
+
+ /// @}
+ /// @name String Comparison
+ /// @{
+
+ /// Check for string equality. This is more efficient than compare() when
+ /// the relative ordering of inequal strings isn't needed.
+ bool equals(StringRef RHS) const {
+ return str().equals(RHS);
+ }
+
+ /// Check for string equality, ignoring case.
+ bool equals_insensitive(StringRef RHS) const {
+ return str().equals_insensitive(RHS);
+ }
+
+ /// Compare two strings; the result is -1, 0, or 1 if this string is
+ /// lexicographically less than, equal to, or greater than the \p RHS.
+ int compare(StringRef RHS) const {
+ return str().compare(RHS);
+ }
+
+ /// compare_insensitive - Compare two strings, ignoring case.
+ int compare_insensitive(StringRef RHS) const {
+ return str().compare_insensitive(RHS);
+ }
+
+ /// compare_numeric - Compare two strings, treating sequences of digits as
+ /// numbers.
+ int compare_numeric(StringRef RHS) const {
+ return str().compare_numeric(RHS);
+ }
+
+ /// @}
+ /// @name String Predicates
+ /// @{
+
+ /// startswith - Check if this string starts with the given \p Prefix.
+ bool startswith(StringRef Prefix) const {
+ return str().startswith(Prefix);
+ }
+
+ /// endswith - Check if this string ends with the given \p Suffix.
+ bool endswith(StringRef Suffix) const {
+ return str().endswith(Suffix);
+ }
+
+ /// @}
+ /// @name String Searching
+ /// @{
+
+ /// find - Search for the first character \p C in the string.
+ ///
+ /// \return - The index of the first occurrence of \p C, or npos if not
+ /// found.
+ size_t find(char C, size_t From = 0) const {
+ return str().find(C, From);
+ }
+
+ /// Search for the first string \p Str in the string.
+ ///
+ /// \returns The index of the first occurrence of \p Str, or npos if not
+ /// found.
+ size_t find(StringRef Str, size_t From = 0) const {
+ return str().find(Str, From);
+ }
+
+ /// Search for the last character \p C in the string.
+ ///
+ /// \returns The index of the last occurrence of \p C, or npos if not
+ /// found.
+ size_t rfind(char C, size_t From = StringRef::npos) const {
+ return str().rfind(C, From);
+ }
+
+ /// Search for the last string \p Str in the string.
+ ///
+ /// \returns The index of the last occurrence of \p Str, or npos if not
+ /// found.
+ size_t rfind(StringRef Str) const {
+ return str().rfind(Str);
+ }
+
+ /// Find the first character in the string that is \p C, or npos if not
+ /// found. Same as find.
+ size_t find_first_of(char C, size_t From = 0) const {
+ return str().find_first_of(C, From);
+ }
+
+ /// Find the first character in the string that is in \p Chars, or npos if
+ /// not found.
+ ///
+ /// Complexity: O(size() + Chars.size())
+ size_t find_first_of(StringRef Chars, size_t From = 0) const {
+ return str().find_first_of(Chars, From);
+ }
+
+ /// Find the first character in the string that is not \p C or npos if not
+ /// found.
+ size_t find_first_not_of(char C, size_t From = 0) const {
+ return str().find_first_not_of(C, From);
+ }
+
+ /// Find the first character in the string that is not in the string
+ /// \p Chars, or npos if not found.
+ ///
+ /// Complexity: O(size() + Chars.size())
+ size_t find_first_not_of(StringRef Chars, size_t From = 0) const {
+ return str().find_first_not_of(Chars, From);
+ }
+
+ /// Find the last character in the string that is \p C, or npos if not
+ /// found.
+ size_t find_last_of(char C, size_t From = StringRef::npos) const {
+ return str().find_last_of(C, From);
+ }
+
+ /// Find the last character in the string that is in \p C, or npos if not
+ /// found.
+ ///
+ /// Complexity: O(size() + Chars.size())
+ size_t find_last_of(
+ StringRef Chars, size_t From = StringRef::npos) const {
+ return str().find_last_of(Chars, From);
+ }
+
+ /// @}
+ /// @name Helpful Algorithms
+ /// @{
+
+ /// Return the number of occurrences of \p C in the string.
+ size_t count(char C) const {
+ return str().count(C);
+ }
+
+ /// Return the number of non-overlapped occurrences of \p Str in the
+ /// string.
+ size_t count(StringRef Str) const {
+ return str().count(Str);
+ }
+
+ /// @}
+ /// @name Substring Operations
+ /// @{
+
+ /// Return a reference to the substring from [Start, Start + N).
+ ///
+ /// \param Start The index of the starting character in the substring; if
+ /// the index is npos or greater than the length of the string then the
+ /// empty substring will be returned.
+ ///
+ /// \param N The number of characters to included in the substring. If \p N
+ /// exceeds the number of characters remaining in the string, the string
+ /// suffix (starting with \p Start) will be returned.
+ StringRef substr(size_t Start, size_t N = StringRef::npos) const {
+ return str().substr(Start, N);
+ }
+
+ /// Return a reference to the substring from [Start, End).
+ ///
+ /// \param Start The index of the starting character in the substring; if
+ /// the index is npos or greater than the length of the string then the
+ /// empty substring will be returned.
+ ///
+ /// \param End The index following the last character to include in the
+ /// substring. If this is npos, or less than \p Start, or exceeds the
+ /// number of characters remaining in the string, the string suffix
+ /// (starting with \p Start) will be returned.
+ StringRef slice(size_t Start, size_t End) const {
+ return str().slice(Start, End);
+ }
+
+ // Extra methods.
+
+ /// Explicit conversion to StringRef.
+ StringRef str() const { return StringRef(this->data(), this->size()); }
+
+ // TODO: Make this const, if it's safe...
+ const char* c_str() {
+ this->push_back(0);
+ this->pop_back();
+ return this->data();
+ }
+
+ /// Implicit conversion to StringRef.
+ operator StringRef() const { return str(); }
+
+ explicit operator std::string() const {
+ return std::string(this->data(), this->size());
+ }
+
+ // Extra operators.
+ SmallString &operator=(StringRef RHS) {
+ this->assign(RHS);
+ return *this;
+ }
+
+ SmallString &operator+=(StringRef RHS) {
+ this->append(RHS.begin(), RHS.end());
+ return *this;
+ }
+ SmallString &operator+=(char C) {
+ this->push_back(C);
+ return *this;
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SMALLSTRING_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/SmallVector.h b/contrib/libs/llvm14/include/llvm/ADT/SmallVector.h
new file mode 100644
index 0000000000..cb04df7a70
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/SmallVector.h
@@ -0,0 +1,1320 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// /file
+/// This file defines the SmallVector class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SMALLVECTOR_H
+#define LLVM_ADT_SMALLVECTOR_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/type_traits.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdlib>
+#include <cstring>
+#include <functional>
+#include <initializer_list>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <new>
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+
+template <typename IteratorT> class iterator_range;
+
+/// This is all the stuff common to all SmallVectors.
+///
+/// The template parameter specifies the type which should be used to hold the
+/// Size and Capacity of the SmallVector, so it can be adjusted.
+/// Using 32 bit size is desirable to shrink the size of the SmallVector.
+/// Using 64 bit size is desirable for cases like SmallVector<char>, where a
+/// 32 bit size would limit the vector to ~4GB. SmallVectors are used for
+/// buffering bitcode output - which can exceed 4GB.
+template <class Size_T> class SmallVectorBase {
+protected:
+ void *BeginX;
+ Size_T Size = 0, Capacity;
+
+ /// The maximum value of the Size_T used.
+ static constexpr size_t SizeTypeMax() {
+ return std::numeric_limits<Size_T>::max();
+ }
+
+ SmallVectorBase() = delete;
+ SmallVectorBase(void *FirstEl, size_t TotalCapacity)
+ : BeginX(FirstEl), Capacity(TotalCapacity) {}
+
+ /// This is a helper for \a grow() that's out of line to reduce code
+ /// duplication. This function will report a fatal error if it can't grow at
+ /// least to \p MinSize.
+ void *mallocForGrow(size_t MinSize, size_t TSize, size_t &NewCapacity);
+
+ /// This is an implementation of the grow() method which only works
+ /// on POD-like data types and is out of line to reduce code duplication.
+ /// This function will report a fatal error if it cannot increase capacity.
+ void grow_pod(void *FirstEl, size_t MinSize, size_t TSize);
+
+public:
+ size_t size() const { return Size; }
+ size_t capacity() const { return Capacity; }
+
+ LLVM_NODISCARD bool empty() const { return !Size; }
+
+protected:
+ /// Set the array size to \p N, which the current array must have enough
+ /// capacity for.
+ ///
+ /// This does not construct or destroy any elements in the vector.
+ void set_size(size_t N) {
+ assert(N <= capacity());
+ Size = N;
+ }
+};
+
+template <class T>
+using SmallVectorSizeType =
+ typename std::conditional<sizeof(T) < 4 && sizeof(void *) >= 8, uint64_t,
+ uint32_t>::type;
+
+/// Figure out the offset of the first element.
+template <class T, typename = void> struct SmallVectorAlignmentAndSize {
+ alignas(SmallVectorBase<SmallVectorSizeType<T>>) char Base[sizeof(
+ SmallVectorBase<SmallVectorSizeType<T>>)];
+ alignas(T) char FirstEl[sizeof(T)];
+};
+
+/// This is the part of SmallVectorTemplateBase which does not depend on whether
+/// the type T is a POD. The extra dummy template argument is used by ArrayRef
+/// to avoid unnecessarily requiring T to be complete.
+template <typename T, typename = void>
+class SmallVectorTemplateCommon
+ : public SmallVectorBase<SmallVectorSizeType<T>> {
+ using Base = SmallVectorBase<SmallVectorSizeType<T>>;
+
+ /// Find the address of the first element. For this pointer math to be valid
+ /// with small-size of 0 for T with lots of alignment, it's important that
+ /// SmallVectorStorage is properly-aligned even for small-size of 0.
+ void *getFirstEl() const {
+ return const_cast<void *>(reinterpret_cast<const void *>(
+ reinterpret_cast<const char *>(this) +
+ offsetof(SmallVectorAlignmentAndSize<T>, FirstEl)));
+ }
+ // Space after 'FirstEl' is clobbered, do not add any instance vars after it.
+
+protected:
+ SmallVectorTemplateCommon(size_t Size) : Base(getFirstEl(), Size) {}
+
+ void grow_pod(size_t MinSize, size_t TSize) {
+ Base::grow_pod(getFirstEl(), MinSize, TSize);
+ }
+
+ /// Return true if this is a smallvector which has not had dynamic
+ /// memory allocated for it.
+ bool isSmall() const { return this->BeginX == getFirstEl(); }
+
+ /// Put this vector in a state of being small.
+ void resetToSmall() {
+ this->BeginX = getFirstEl();
+ this->Size = this->Capacity = 0; // FIXME: Setting Capacity to 0 is suspect.
+ }
+
+ /// Return true if V is an internal reference to the given range.
+ bool isReferenceToRange(const void *V, const void *First, const void *Last) const {
+ // Use std::less to avoid UB.
+ std::less<> LessThan;
+ return !LessThan(V, First) && LessThan(V, Last);
+ }
+
+ /// Return true if V is an internal reference to this vector.
+ bool isReferenceToStorage(const void *V) const {
+ return isReferenceToRange(V, this->begin(), this->end());
+ }
+
+ /// Return true if First and Last form a valid (possibly empty) range in this
+ /// vector's storage.
+ bool isRangeInStorage(const void *First, const void *Last) const {
+ // Use std::less to avoid UB.
+ std::less<> LessThan;
+ return !LessThan(First, this->begin()) && !LessThan(Last, First) &&
+ !LessThan(this->end(), Last);
+ }
+
+ /// Return true unless Elt will be invalidated by resizing the vector to
+ /// NewSize.
+ bool isSafeToReferenceAfterResize(const void *Elt, size_t NewSize) {
+ // Past the end.
+ if (LLVM_LIKELY(!isReferenceToStorage(Elt)))
+ return true;
+
+ // Return false if Elt will be destroyed by shrinking.
+ if (NewSize <= this->size())
+ return Elt < this->begin() + NewSize;
+
+ // Return false if we need to grow.
+ return NewSize <= this->capacity();
+ }
+
+ /// Check whether Elt will be invalidated by resizing the vector to NewSize.
+ void assertSafeToReferenceAfterResize(const void *Elt, size_t NewSize) {
+ assert(isSafeToReferenceAfterResize(Elt, NewSize) &&
+ "Attempting to reference an element of the vector in an operation "
+ "that invalidates it");
+ }
+
+ /// Check whether Elt will be invalidated by increasing the size of the
+ /// vector by N.
+ void assertSafeToAdd(const void *Elt, size_t N = 1) {
+ this->assertSafeToReferenceAfterResize(Elt, this->size() + N);
+ }
+
+ /// Check whether any part of the range will be invalidated by clearing.
+ void assertSafeToReferenceAfterClear(const T *From, const T *To) {
+ if (From == To)
+ return;
+ this->assertSafeToReferenceAfterResize(From, 0);
+ this->assertSafeToReferenceAfterResize(To - 1, 0);
+ }
+ template <
+ class ItTy,
+ std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value,
+ bool> = false>
+ void assertSafeToReferenceAfterClear(ItTy, ItTy) {}
+
+ /// Check whether any part of the range will be invalidated by growing.
+ void assertSafeToAddRange(const T *From, const T *To) {
+ if (From == To)
+ return;
+ this->assertSafeToAdd(From, To - From);
+ this->assertSafeToAdd(To - 1, To - From);
+ }
+ template <
+ class ItTy,
+ std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value,
+ bool> = false>
+ void assertSafeToAddRange(ItTy, ItTy) {}
+
+ /// Reserve enough space to add one element, and return the updated element
+ /// pointer in case it was a reference to the storage.
+ template <class U>
+ static const T *reserveForParamAndGetAddressImpl(U *This, const T &Elt,
+ size_t N) {
+ size_t NewSize = This->size() + N;
+ if (LLVM_LIKELY(NewSize <= This->capacity()))
+ return &Elt;
+
+ bool ReferencesStorage = false;
+ int64_t Index = -1;
+ if (!U::TakesParamByValue) {
+ if (LLVM_UNLIKELY(This->isReferenceToStorage(&Elt))) {
+ ReferencesStorage = true;
+ Index = &Elt - This->begin();
+ }
+ }
+ This->grow(NewSize);
+ return ReferencesStorage ? This->begin() + Index : &Elt;
+ }
+
+public:
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
+ using value_type = T;
+ using iterator = T *;
+ using const_iterator = const T *;
+
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+
+ using reference = T &;
+ using const_reference = const T &;
+ using pointer = T *;
+ using const_pointer = const T *;
+
+ using Base::capacity;
+ using Base::empty;
+ using Base::size;
+
+ // forward iterator creation methods.
+ iterator begin() { return (iterator)this->BeginX; }
+ const_iterator begin() const { return (const_iterator)this->BeginX; }
+ iterator end() { return begin() + size(); }
+ const_iterator end() const { return begin() + size(); }
+
+ // reverse iterator creation methods.
+ reverse_iterator rbegin() { return reverse_iterator(end()); }
+ const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
+ reverse_iterator rend() { return reverse_iterator(begin()); }
+ const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
+
+ size_type size_in_bytes() const { return size() * sizeof(T); }
+ size_type max_size() const {
+ return std::min(this->SizeTypeMax(), size_type(-1) / sizeof(T));
+ }
+
+ size_t capacity_in_bytes() const { return capacity() * sizeof(T); }
+
+ /// Return a pointer to the vector's buffer, even if empty().
+ pointer data() { return pointer(begin()); }
+ /// Return a pointer to the vector's buffer, even if empty().
+ const_pointer data() const { return const_pointer(begin()); }
+
+ reference operator[](size_type idx) {
+ assert(idx < size());
+ return begin()[idx];
+ }
+ const_reference operator[](size_type idx) const {
+ assert(idx < size());
+ return begin()[idx];
+ }
+
+ reference front() {
+ assert(!empty());
+ return begin()[0];
+ }
+ const_reference front() const {
+ assert(!empty());
+ return begin()[0];
+ }
+
+ reference back() {
+ assert(!empty());
+ return end()[-1];
+ }
+ const_reference back() const {
+ assert(!empty());
+ return end()[-1];
+ }
+};
+
+/// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put
+/// method implementations that are designed to work with non-trivial T's.
+///
+/// We approximate is_trivially_copyable with trivial move/copy construction and
+/// trivial destruction. While the standard doesn't specify that you're allowed
+/// copy these types with memcpy, there is no way for the type to observe this.
+/// This catches the important case of std::pair<POD, POD>, which is not
+/// trivially assignable.
+template <typename T, bool = (is_trivially_copy_constructible<T>::value) &&
+ (is_trivially_move_constructible<T>::value) &&
+ std::is_trivially_destructible<T>::value>
+class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
+ friend class SmallVectorTemplateCommon<T>;
+
+protected:
+ static constexpr bool TakesParamByValue = false;
+ using ValueParamT = const T &;
+
+ SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
+
+ static void destroy_range(T *S, T *E) {
+ while (S != E) {
+ --E;
+ E->~T();
+ }
+ }
+
+ /// Move the range [I, E) into the uninitialized memory starting with "Dest",
+ /// constructing elements as needed.
+ template<typename It1, typename It2>
+ static void uninitialized_move(It1 I, It1 E, It2 Dest) {
+ std::uninitialized_copy(std::make_move_iterator(I),
+ std::make_move_iterator(E), Dest);
+ }
+
+ /// Copy the range [I, E) onto the uninitialized memory starting with "Dest",
+ /// constructing elements as needed.
+ template<typename It1, typename It2>
+ static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
+ std::uninitialized_copy(I, E, Dest);
+ }
+
+ /// Grow the allocated memory (without initializing new elements), doubling
+ /// the size of the allocated memory. Guarantees space for at least one more
+ /// element, or MinSize more elements if specified.
+ void grow(size_t MinSize = 0);
+
+ /// Create a new allocation big enough for \p MinSize and pass back its size
+ /// in \p NewCapacity. This is the first section of \a grow().
+ T *mallocForGrow(size_t MinSize, size_t &NewCapacity) {
+ return static_cast<T *>(
+ SmallVectorBase<SmallVectorSizeType<T>>::mallocForGrow(
+ MinSize, sizeof(T), NewCapacity));
+ }
+
+ /// Move existing elements over to the new allocation \p NewElts, the middle
+ /// section of \a grow().
+ void moveElementsForGrow(T *NewElts);
+
+ /// Transfer ownership of the allocation, finishing up \a grow().
+ void takeAllocationForGrow(T *NewElts, size_t NewCapacity);
+
+ /// Reserve enough space to add one element, and return the updated element
+ /// pointer in case it was a reference to the storage.
+ const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) {
+ return this->reserveForParamAndGetAddressImpl(this, Elt, N);
+ }
+
+ /// Reserve enough space to add one element, and return the updated element
+ /// pointer in case it was a reference to the storage.
+ T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) {
+ return const_cast<T *>(
+ this->reserveForParamAndGetAddressImpl(this, Elt, N));
+ }
+
+ static T &&forward_value_param(T &&V) { return std::move(V); }
+ static const T &forward_value_param(const T &V) { return V; }
+
+ void growAndAssign(size_t NumElts, const T &Elt) {
+ // Grow manually in case Elt is an internal reference.
+ size_t NewCapacity;
+ T *NewElts = mallocForGrow(NumElts, NewCapacity);
+ std::uninitialized_fill_n(NewElts, NumElts, Elt);
+ this->destroy_range(this->begin(), this->end());
+ takeAllocationForGrow(NewElts, NewCapacity);
+ this->set_size(NumElts);
+ }
+
+ template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) {
+ // Grow manually in case one of Args is an internal reference.
+ size_t NewCapacity;
+ T *NewElts = mallocForGrow(0, NewCapacity);
+ ::new ((void *)(NewElts + this->size())) T(std::forward<ArgTypes>(Args)...);
+ moveElementsForGrow(NewElts);
+ takeAllocationForGrow(NewElts, NewCapacity);
+ this->set_size(this->size() + 1);
+ return this->back();
+ }
+
+public:
+ void push_back(const T &Elt) {
+ const T *EltPtr = reserveForParamAndGetAddress(Elt);
+ ::new ((void *)this->end()) T(*EltPtr);
+ this->set_size(this->size() + 1);
+ }
+
+ void push_back(T &&Elt) {
+ T *EltPtr = reserveForParamAndGetAddress(Elt);
+ ::new ((void *)this->end()) T(::std::move(*EltPtr));
+ this->set_size(this->size() + 1);
+ }
+
+ void pop_back() {
+ this->set_size(this->size() - 1);
+ this->end()->~T();
+ }
+};
+
+// Define this out-of-line to dissuade the C++ compiler from inlining it.
+template <typename T, bool TriviallyCopyable>
+void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) {
+ size_t NewCapacity;
+ T *NewElts = mallocForGrow(MinSize, NewCapacity);
+ moveElementsForGrow(NewElts);
+ takeAllocationForGrow(NewElts, NewCapacity);
+}
+
+// Define this out-of-line to dissuade the C++ compiler from inlining it.
+template <typename T, bool TriviallyCopyable>
+void SmallVectorTemplateBase<T, TriviallyCopyable>::moveElementsForGrow(
+ T *NewElts) {
+ // Move the elements over.
+ this->uninitialized_move(this->begin(), this->end(), NewElts);
+
+ // Destroy the original elements.
+ destroy_range(this->begin(), this->end());
+}
+
+// Define this out-of-line to dissuade the C++ compiler from inlining it.
+template <typename T, bool TriviallyCopyable>
+void SmallVectorTemplateBase<T, TriviallyCopyable>::takeAllocationForGrow(
+ T *NewElts, size_t NewCapacity) {
+ // If this wasn't grown from the inline copy, deallocate the old space.
+ if (!this->isSmall())
+ free(this->begin());
+
+ this->BeginX = NewElts;
+ this->Capacity = NewCapacity;
+}
+
+/// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put
+/// method implementations that are designed to work with trivially copyable
+/// T's. This allows using memcpy in place of copy/move construction and
+/// skipping destruction.
+template <typename T>
+class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
+ friend class SmallVectorTemplateCommon<T>;
+
+protected:
+ /// True if it's cheap enough to take parameters by value. Doing so avoids
+ /// overhead related to mitigations for reference invalidation.
+ static constexpr bool TakesParamByValue = sizeof(T) <= 2 * sizeof(void *);
+
+ /// Either const T& or T, depending on whether it's cheap enough to take
+ /// parameters by value.
+ using ValueParamT =
+ typename std::conditional<TakesParamByValue, T, const T &>::type;
+
+ SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
+
+ // No need to do a destroy loop for POD's.
+ static void destroy_range(T *, T *) {}
+
+ /// Move the range [I, E) onto the uninitialized memory
+ /// starting with "Dest", constructing elements into it as needed.
+ template<typename It1, typename It2>
+ static void uninitialized_move(It1 I, It1 E, It2 Dest) {
+ // Just do a copy.
+ uninitialized_copy(I, E, Dest);
+ }
+
+ /// Copy the range [I, E) onto the uninitialized memory
+ /// starting with "Dest", constructing elements into it as needed.
+ template<typename It1, typename It2>
+ static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
+ // Arbitrary iterator types; just use the basic implementation.
+ std::uninitialized_copy(I, E, Dest);
+ }
+
+ /// Copy the range [I, E) onto the uninitialized memory
+ /// starting with "Dest", constructing elements into it as needed.
+ template <typename T1, typename T2>
+ static void uninitialized_copy(
+ T1 *I, T1 *E, T2 *Dest,
+ std::enable_if_t<std::is_same<typename std::remove_const<T1>::type,
+ T2>::value> * = nullptr) {
+ // Use memcpy for PODs iterated by pointers (which includes SmallVector
+ // iterators): std::uninitialized_copy optimizes to memmove, but we can
+ // use memcpy here. Note that I and E are iterators and thus might be
+ // invalid for memcpy if they are equal.
+ if (I != E)
+ memcpy(reinterpret_cast<void *>(Dest), I, (E - I) * sizeof(T));
+ }
+
+ /// Double the size of the allocated memory, guaranteeing space for at
+ /// least one more element or MinSize if specified.
+ void grow(size_t MinSize = 0) { this->grow_pod(MinSize, sizeof(T)); }
+
+ /// Reserve enough space to add one element, and return the updated element
+ /// pointer in case it was a reference to the storage.
+ const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) {
+ return this->reserveForParamAndGetAddressImpl(this, Elt, N);
+ }
+
+ /// Reserve enough space to add one element, and return the updated element
+ /// pointer in case it was a reference to the storage.
+ T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) {
+ return const_cast<T *>(
+ this->reserveForParamAndGetAddressImpl(this, Elt, N));
+ }
+
+ /// Copy \p V or return a reference, depending on \a ValueParamT.
+ static ValueParamT forward_value_param(ValueParamT V) { return V; }
+
+ void growAndAssign(size_t NumElts, T Elt) {
+ // Elt has been copied in case it's an internal reference, side-stepping
+ // reference invalidation problems without losing the realloc optimization.
+ this->set_size(0);
+ this->grow(NumElts);
+ std::uninitialized_fill_n(this->begin(), NumElts, Elt);
+ this->set_size(NumElts);
+ }
+
+ template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) {
+ // Use push_back with a copy in case Args has an internal reference,
+ // side-stepping reference invalidation problems without losing the realloc
+ // optimization.
+ push_back(T(std::forward<ArgTypes>(Args)...));
+ return this->back();
+ }
+
+public:
+ void push_back(ValueParamT Elt) {
+ const T *EltPtr = reserveForParamAndGetAddress(Elt);
+ memcpy(reinterpret_cast<void *>(this->end()), EltPtr, sizeof(T));
+ this->set_size(this->size() + 1);
+ }
+
+ void pop_back() { this->set_size(this->size() - 1); }
+};
+
+/// This class consists of common code factored out of the SmallVector class to
+/// reduce code duplication based on the SmallVector 'N' template parameter.
+template <typename T>
+class SmallVectorImpl : public SmallVectorTemplateBase<T> {
+ using SuperClass = SmallVectorTemplateBase<T>;
+
+public:
+ using iterator = typename SuperClass::iterator;
+ using const_iterator = typename SuperClass::const_iterator;
+ using reference = typename SuperClass::reference;
+ using size_type = typename SuperClass::size_type;
+
+protected:
+ using SmallVectorTemplateBase<T>::TakesParamByValue;
+ using ValueParamT = typename SuperClass::ValueParamT;
+
+ // Default ctor - Initialize to empty.
+ explicit SmallVectorImpl(unsigned N)
+ : SmallVectorTemplateBase<T>(N) {}
+
+ void assignRemote(SmallVectorImpl &&RHS) {
+ this->destroy_range(this->begin(), this->end());
+ if (!this->isSmall())
+ free(this->begin());
+ this->BeginX = RHS.BeginX;
+ this->Size = RHS.Size;
+ this->Capacity = RHS.Capacity;
+ RHS.resetToSmall();
+ }
+
+public:
+ SmallVectorImpl(const SmallVectorImpl &) = delete;
+
+ ~SmallVectorImpl() {
+ // Subclass has already destructed this vector's elements.
+ // If this wasn't grown from the inline copy, deallocate the old space.
+ if (!this->isSmall())
+ free(this->begin());
+ }
+
+ void clear() {
+ this->destroy_range(this->begin(), this->end());
+ this->Size = 0;
+ }
+
+private:
+ // Make set_size() private to avoid misuse in subclasses.
+ using SuperClass::set_size;
+
+ template <bool ForOverwrite> void resizeImpl(size_type N) {
+ if (N == this->size())
+ return;
+
+ if (N < this->size()) {
+ this->truncate(N);
+ return;
+ }
+
+ this->reserve(N);
+ for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
+ if (ForOverwrite)
+ new (&*I) T;
+ else
+ new (&*I) T();
+ this->set_size(N);
+ }
+
+public:
+ void resize(size_type N) { resizeImpl<false>(N); }
+
+ /// Like resize, but \ref T is POD, the new values won't be initialized.
+ void resize_for_overwrite(size_type N) { resizeImpl<true>(N); }
+
+ /// Like resize, but requires that \p N is less than \a size().
+ void truncate(size_type N) {
+ assert(this->size() >= N && "Cannot increase size with truncate");
+ this->destroy_range(this->begin() + N, this->end());
+ this->set_size(N);
+ }
+
+ void resize(size_type N, ValueParamT NV) {
+ if (N == this->size())
+ return;
+
+ if (N < this->size()) {
+ this->truncate(N);
+ return;
+ }
+
+ // N > this->size(). Defer to append.
+ this->append(N - this->size(), NV);
+ }
+
+ void reserve(size_type N) {
+ if (this->capacity() < N)
+ this->grow(N);
+ }
+
+ void pop_back_n(size_type NumItems) {
+ assert(this->size() >= NumItems);
+ truncate(this->size() - NumItems);
+ }
+
+ LLVM_NODISCARD T pop_back_val() {
+ T Result = ::std::move(this->back());
+ this->pop_back();
+ return Result;
+ }
+
+ void swap(SmallVectorImpl &RHS);
+
+ /// Add the specified range to the end of the SmallVector.
+ template <typename in_iter,
+ typename = std::enable_if_t<std::is_convertible<
+ typename std::iterator_traits<in_iter>::iterator_category,
+ std::input_iterator_tag>::value>>
+ void append(in_iter in_start, in_iter in_end) {
+ this->assertSafeToAddRange(in_start, in_end);
+ size_type NumInputs = std::distance(in_start, in_end);
+ this->reserve(this->size() + NumInputs);
+ this->uninitialized_copy(in_start, in_end, this->end());
+ this->set_size(this->size() + NumInputs);
+ }
+
+ /// Append \p NumInputs copies of \p Elt to the end.
+ void append(size_type NumInputs, ValueParamT Elt) {
+ const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumInputs);
+ std::uninitialized_fill_n(this->end(), NumInputs, *EltPtr);
+ this->set_size(this->size() + NumInputs);
+ }
+
+ void append(std::initializer_list<T> IL) {
+ append(IL.begin(), IL.end());
+ }
+
+ void append(const SmallVectorImpl &RHS) { append(RHS.begin(), RHS.end()); }
+
+ void assign(size_type NumElts, ValueParamT Elt) {
+ // Note that Elt could be an internal reference.
+ if (NumElts > this->capacity()) {
+ this->growAndAssign(NumElts, Elt);
+ return;
+ }
+
+ // Assign over existing elements.
+ std::fill_n(this->begin(), std::min(NumElts, this->size()), Elt);
+ if (NumElts > this->size())
+ std::uninitialized_fill_n(this->end(), NumElts - this->size(), Elt);
+ else if (NumElts < this->size())
+ this->destroy_range(this->begin() + NumElts, this->end());
+ this->set_size(NumElts);
+ }
+
+ // FIXME: Consider assigning over existing elements, rather than clearing &
+ // re-initializing them - for all assign(...) variants.
+
+ template <typename in_iter,
+ typename = std::enable_if_t<std::is_convertible<
+ typename std::iterator_traits<in_iter>::iterator_category,
+ std::input_iterator_tag>::value>>
+ void assign(in_iter in_start, in_iter in_end) {
+ this->assertSafeToReferenceAfterClear(in_start, in_end);
+ clear();
+ append(in_start, in_end);
+ }
+
+ void assign(std::initializer_list<T> IL) {
+ clear();
+ append(IL);
+ }
+
+ void assign(const SmallVectorImpl &RHS) { assign(RHS.begin(), RHS.end()); }
+
+ iterator erase(const_iterator CI) {
+ // Just cast away constness because this is a non-const member function.
+ iterator I = const_cast<iterator>(CI);
+
+ assert(this->isReferenceToStorage(CI) && "Iterator to erase is out of bounds.");
+
+ iterator N = I;
+ // Shift all elts down one.
+ std::move(I+1, this->end(), I);
+ // Drop the last elt.
+ this->pop_back();
+ return(N);
+ }
+
+ iterator erase(const_iterator CS, const_iterator CE) {
+ // Just cast away constness because this is a non-const member function.
+ iterator S = const_cast<iterator>(CS);
+ iterator E = const_cast<iterator>(CE);
+
+ assert(this->isRangeInStorage(S, E) && "Range to erase is out of bounds.");
+
+ iterator N = S;
+ // Shift all elts down.
+ iterator I = std::move(E, this->end(), S);
+ // Drop the last elts.
+ this->destroy_range(I, this->end());
+ this->set_size(I - this->begin());
+ return(N);
+ }
+
+private:
+ template <class ArgType> iterator insert_one_impl(iterator I, ArgType &&Elt) {
+ // Callers ensure that ArgType is derived from T.
+ static_assert(
+ std::is_same<std::remove_const_t<std::remove_reference_t<ArgType>>,
+ T>::value,
+ "ArgType must be derived from T!");
+
+ if (I == this->end()) { // Important special case for empty vector.
+ this->push_back(::std::forward<ArgType>(Elt));
+ return this->end()-1;
+ }
+
+ assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.");
+
+ // Grow if necessary.
+ size_t Index = I - this->begin();
+ std::remove_reference_t<ArgType> *EltPtr =
+ this->reserveForParamAndGetAddress(Elt);
+ I = this->begin() + Index;
+
+ ::new ((void*) this->end()) T(::std::move(this->back()));
+ // Push everything else over.
+ std::move_backward(I, this->end()-1, this->end());
+ this->set_size(this->size() + 1);
+
+ // If we just moved the element we're inserting, be sure to update
+ // the reference (never happens if TakesParamByValue).
+ static_assert(!TakesParamByValue || std::is_same<ArgType, T>::value,
+ "ArgType must be 'T' when taking by value!");
+ if (!TakesParamByValue && this->isReferenceToRange(EltPtr, I, this->end()))
+ ++EltPtr;
+
+ *I = ::std::forward<ArgType>(*EltPtr);
+ return I;
+ }
+
+public:
+ iterator insert(iterator I, T &&Elt) {
+ return insert_one_impl(I, this->forward_value_param(std::move(Elt)));
+ }
+
+ iterator insert(iterator I, const T &Elt) {
+ return insert_one_impl(I, this->forward_value_param(Elt));
+ }
+
+ iterator insert(iterator I, size_type NumToInsert, ValueParamT Elt) {
+ // Convert iterator to elt# to avoid invalidating iterator when we reserve()
+ size_t InsertElt = I - this->begin();
+
+ if (I == this->end()) { // Important special case for empty vector.
+ append(NumToInsert, Elt);
+ return this->begin()+InsertElt;
+ }
+
+ assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.");
+
+ // Ensure there is enough space, and get the (maybe updated) address of
+ // Elt.
+ const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumToInsert);
+
+ // Uninvalidate the iterator.
+ I = this->begin()+InsertElt;
+
+ // If there are more elements between the insertion point and the end of the
+ // range than there are being inserted, we can use a simple approach to
+ // insertion. Since we already reserved space, we know that this won't
+ // reallocate the vector.
+ if (size_t(this->end()-I) >= NumToInsert) {
+ T *OldEnd = this->end();
+ append(std::move_iterator<iterator>(this->end() - NumToInsert),
+ std::move_iterator<iterator>(this->end()));
+
+ // Copy the existing elements that get replaced.
+ std::move_backward(I, OldEnd-NumToInsert, OldEnd);
+
+ // If we just moved the element we're inserting, be sure to update
+ // the reference (never happens if TakesParamByValue).
+ if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end())
+ EltPtr += NumToInsert;
+
+ std::fill_n(I, NumToInsert, *EltPtr);
+ return I;
+ }
+
+ // Otherwise, we're inserting more elements than exist already, and we're
+ // not inserting at the end.
+
+ // Move over the elements that we're about to overwrite.
+ T *OldEnd = this->end();
+ this->set_size(this->size() + NumToInsert);
+ size_t NumOverwritten = OldEnd-I;
+ this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
+
+ // If we just moved the element we're inserting, be sure to update
+ // the reference (never happens if TakesParamByValue).
+ if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end())
+ EltPtr += NumToInsert;
+
+ // Replace the overwritten part.
+ std::fill_n(I, NumOverwritten, *EltPtr);
+
+ // Insert the non-overwritten middle part.
+ std::uninitialized_fill_n(OldEnd, NumToInsert - NumOverwritten, *EltPtr);
+ return I;
+ }
+
+ template <typename ItTy,
+ typename = std::enable_if_t<std::is_convertible<
+ typename std::iterator_traits<ItTy>::iterator_category,
+ std::input_iterator_tag>::value>>
+ iterator insert(iterator I, ItTy From, ItTy To) {
+ // Convert iterator to elt# to avoid invalidating iterator when we reserve()
+ size_t InsertElt = I - this->begin();
+
+ if (I == this->end()) { // Important special case for empty vector.
+ append(From, To);
+ return this->begin()+InsertElt;
+ }
+
+ assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.");
+
+ // Check that the reserve that follows doesn't invalidate the iterators.
+ this->assertSafeToAddRange(From, To);
+
+ size_t NumToInsert = std::distance(From, To);
+
+ // Ensure there is enough space.
+ reserve(this->size() + NumToInsert);
+
+ // Uninvalidate the iterator.
+ I = this->begin()+InsertElt;
+
+ // If there are more elements between the insertion point and the end of the
+ // range than there are being inserted, we can use a simple approach to
+ // insertion. Since we already reserved space, we know that this won't
+ // reallocate the vector.
+ if (size_t(this->end()-I) >= NumToInsert) {
+ T *OldEnd = this->end();
+ append(std::move_iterator<iterator>(this->end() - NumToInsert),
+ std::move_iterator<iterator>(this->end()));
+
+ // Copy the existing elements that get replaced.
+ std::move_backward(I, OldEnd-NumToInsert, OldEnd);
+
+ std::copy(From, To, I);
+ return I;
+ }
+
+ // Otherwise, we're inserting more elements than exist already, and we're
+ // not inserting at the end.
+
+ // Move over the elements that we're about to overwrite.
+ T *OldEnd = this->end();
+ this->set_size(this->size() + NumToInsert);
+ size_t NumOverwritten = OldEnd-I;
+ this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
+
+ // Replace the overwritten part.
+ for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
+ *J = *From;
+ ++J; ++From;
+ }
+
+ // Insert the non-overwritten middle part.
+ this->uninitialized_copy(From, To, OldEnd);
+ return I;
+ }
+
+ void insert(iterator I, std::initializer_list<T> IL) {
+ insert(I, IL.begin(), IL.end());
+ }
+
+ template <typename... ArgTypes> reference emplace_back(ArgTypes &&... Args) {
+ if (LLVM_UNLIKELY(this->size() >= this->capacity()))
+ return this->growAndEmplaceBack(std::forward<ArgTypes>(Args)...);
+
+ ::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...);
+ this->set_size(this->size() + 1);
+ return this->back();
+ }
+
+ SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
+
+ SmallVectorImpl &operator=(SmallVectorImpl &&RHS);
+
+ bool operator==(const SmallVectorImpl &RHS) const {
+ if (this->size() != RHS.size()) return false;
+ return std::equal(this->begin(), this->end(), RHS.begin());
+ }
+ bool operator!=(const SmallVectorImpl &RHS) const {
+ return !(*this == RHS);
+ }
+
+ bool operator<(const SmallVectorImpl &RHS) const {
+ return std::lexicographical_compare(this->begin(), this->end(),
+ RHS.begin(), RHS.end());
+ }
+};
+
+template <typename T>
+void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
+ if (this == &RHS) return;
+
+ // We can only avoid copying elements if neither vector is small.
+ if (!this->isSmall() && !RHS.isSmall()) {
+ std::swap(this->BeginX, RHS.BeginX);
+ std::swap(this->Size, RHS.Size);
+ std::swap(this->Capacity, RHS.Capacity);
+ return;
+ }
+ this->reserve(RHS.size());
+ RHS.reserve(this->size());
+
+ // Swap the shared elements.
+ size_t NumShared = this->size();
+ if (NumShared > RHS.size()) NumShared = RHS.size();
+ for (size_type i = 0; i != NumShared; ++i)
+ std::swap((*this)[i], RHS[i]);
+
+ // Copy over the extra elts.
+ if (this->size() > RHS.size()) {
+ size_t EltDiff = this->size() - RHS.size();
+ this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end());
+ RHS.set_size(RHS.size() + EltDiff);
+ this->destroy_range(this->begin()+NumShared, this->end());
+ this->set_size(NumShared);
+ } else if (RHS.size() > this->size()) {
+ size_t EltDiff = RHS.size() - this->size();
+ this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end());
+ this->set_size(this->size() + EltDiff);
+ this->destroy_range(RHS.begin()+NumShared, RHS.end());
+ RHS.set_size(NumShared);
+ }
+}
+
+template <typename T>
+SmallVectorImpl<T> &SmallVectorImpl<T>::
+ operator=(const SmallVectorImpl<T> &RHS) {
+ // Avoid self-assignment.
+ if (this == &RHS) return *this;
+
+ // If we already have sufficient space, assign the common elements, then
+ // destroy any excess.
+ size_t RHSSize = RHS.size();
+ size_t CurSize = this->size();
+ if (CurSize >= RHSSize) {
+ // Assign common elements.
+ iterator NewEnd;
+ if (RHSSize)
+ NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin());
+ else
+ NewEnd = this->begin();
+
+ // Destroy excess elements.
+ this->destroy_range(NewEnd, this->end());
+
+ // Trim.
+ this->set_size(RHSSize);
+ return *this;
+ }
+
+ // If we have to grow to have enough elements, destroy the current elements.
+ // This allows us to avoid copying them during the grow.
+ // FIXME: don't do this if they're efficiently moveable.
+ if (this->capacity() < RHSSize) {
+ // Destroy current elements.
+ this->clear();
+ CurSize = 0;
+ this->grow(RHSSize);
+ } else if (CurSize) {
+ // Otherwise, use assignment for the already-constructed elements.
+ std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin());
+ }
+
+ // Copy construct the new elements in place.
+ this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(),
+ this->begin()+CurSize);
+
+ // Set end.
+ this->set_size(RHSSize);
+ return *this;
+}
+
+template <typename T>
+SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
+ // Avoid self-assignment.
+ if (this == &RHS) return *this;
+
+ // If the RHS isn't small, clear this vector and then steal its buffer.
+ if (!RHS.isSmall()) {
+ this->assignRemote(std::move(RHS));
+ return *this;
+ }
+
+ // If we already have sufficient space, assign the common elements, then
+ // destroy any excess.
+ size_t RHSSize = RHS.size();
+ size_t CurSize = this->size();
+ if (CurSize >= RHSSize) {
+ // Assign common elements.
+ iterator NewEnd = this->begin();
+ if (RHSSize)
+ NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd);
+
+ // Destroy excess elements and trim the bounds.
+ this->destroy_range(NewEnd, this->end());
+ this->set_size(RHSSize);
+
+ // Clear the RHS.
+ RHS.clear();
+
+ return *this;
+ }
+
+ // If we have to grow to have enough elements, destroy the current elements.
+ // This allows us to avoid copying them during the grow.
+ // FIXME: this may not actually make any sense if we can efficiently move
+ // elements.
+ if (this->capacity() < RHSSize) {
+ // Destroy current elements.
+ this->clear();
+ CurSize = 0;
+ this->grow(RHSSize);
+ } else if (CurSize) {
+ // Otherwise, use assignment for the already-constructed elements.
+ std::move(RHS.begin(), RHS.begin()+CurSize, this->begin());
+ }
+
+ // Move-construct the new elements in place.
+ this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
+ this->begin()+CurSize);
+
+ // Set end.
+ this->set_size(RHSSize);
+
+ RHS.clear();
+ return *this;
+}
+
+/// Storage for the SmallVector elements. This is specialized for the N=0 case
+/// to avoid allocating unnecessary storage.
+template <typename T, unsigned N>
+struct SmallVectorStorage {
+ alignas(T) char InlineElts[N * sizeof(T)];
+};
+
+/// We need the storage to be properly aligned even for small-size of 0 so that
+/// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is
+/// well-defined.
+template <typename T> struct alignas(T) SmallVectorStorage<T, 0> {};
+
+/// Forward declaration of SmallVector so that
+/// calculateSmallVectorDefaultInlinedElements can reference
+/// `sizeof(SmallVector<T, 0>)`.
+template <typename T, unsigned N> class LLVM_GSL_OWNER SmallVector;
+
+/// Helper class for calculating the default number of inline elements for
+/// `SmallVector<T>`.
+///
+/// This should be migrated to a constexpr function when our minimum
+/// compiler support is enough for multi-statement constexpr functions.
+template <typename T> struct CalculateSmallVectorDefaultInlinedElements {
+ // Parameter controlling the default number of inlined elements
+ // for `SmallVector<T>`.
+ //
+ // The default number of inlined elements ensures that
+ // 1. There is at least one inlined element.
+ // 2. `sizeof(SmallVector<T>) <= kPreferredSmallVectorSizeof` unless
+ // it contradicts 1.
+ static constexpr size_t kPreferredSmallVectorSizeof = 64;
+
+ // static_assert that sizeof(T) is not "too big".
+ //
+ // Because our policy guarantees at least one inlined element, it is possible
+ // for an arbitrarily large inlined element to allocate an arbitrarily large
+ // amount of inline storage. We generally consider it an antipattern for a
+ // SmallVector to allocate an excessive amount of inline storage, so we want
+ // to call attention to these cases and make sure that users are making an
+ // intentional decision if they request a lot of inline storage.
+ //
+ // We want this assertion to trigger in pathological cases, but otherwise
+ // not be too easy to hit. To accomplish that, the cutoff is actually somewhat
+ // larger than kPreferredSmallVectorSizeof (otherwise,
+ // `SmallVector<SmallVector<T>>` would be one easy way to trip it, and that
+ // pattern seems useful in practice).
+ //
+ // One wrinkle is that this assertion is in theory non-portable, since
+ // sizeof(T) is in general platform-dependent. However, we don't expect this
+ // to be much of an issue, because most LLVM development happens on 64-bit
+ // hosts, and therefore sizeof(T) is expected to *decrease* when compiled for
+ // 32-bit hosts, dodging the issue. The reverse situation, where development
+ // happens on a 32-bit host and then fails due to sizeof(T) *increasing* on a
+ // 64-bit host, is expected to be very rare.
+ static_assert(
+ sizeof(T) <= 256,
+ "You are trying to use a default number of inlined elements for "
+ "`SmallVector<T>` but `sizeof(T)` is really big! Please use an "
+ "explicit number of inlined elements with `SmallVector<T, N>` to make "
+ "sure you really want that much inline storage.");
+
+ // Discount the size of the header itself when calculating the maximum inline
+ // bytes.
+ static constexpr size_t PreferredInlineBytes =
+ kPreferredSmallVectorSizeof - sizeof(SmallVector<T, 0>);
+ static constexpr size_t NumElementsThatFit = PreferredInlineBytes / sizeof(T);
+ static constexpr size_t value =
+ NumElementsThatFit == 0 ? 1 : NumElementsThatFit;
+};
+
+/// This is a 'vector' (really, a variable-sized array), optimized
+/// for the case when the array is small. It contains some number of elements
+/// in-place, which allows it to avoid heap allocation when the actual number of
+/// elements is below that threshold. This allows normal "small" cases to be
+/// fast without losing generality for large inputs.
+///
+/// \note
+/// In the absence of a well-motivated choice for the number of inlined
+/// elements \p N, it is recommended to use \c SmallVector<T> (that is,
+/// omitting the \p N). This will choose a default number of inlined elements
+/// reasonable for allocation on the stack (for example, trying to keep \c
+/// sizeof(SmallVector<T>) around 64 bytes).
+///
+/// \warning This does not attempt to be exception safe.
+///
+/// \see https://llvm.org/docs/ProgrammersManual.html#llvm-adt-smallvector-h
+template <typename T,
+ unsigned N = CalculateSmallVectorDefaultInlinedElements<T>::value>
+class LLVM_GSL_OWNER SmallVector : public SmallVectorImpl<T>,
+ SmallVectorStorage<T, N> {
+public:
+ SmallVector() : SmallVectorImpl<T>(N) {}
+
+ ~SmallVector() {
+ // Destroy the constructed elements in the vector.
+ this->destroy_range(this->begin(), this->end());
+ }
+
+ explicit SmallVector(size_t Size, const T &Value = T())
+ : SmallVectorImpl<T>(N) {
+ this->assign(Size, Value);
+ }
+
+ template <typename ItTy,
+ typename = std::enable_if_t<std::is_convertible<
+ typename std::iterator_traits<ItTy>::iterator_category,
+ std::input_iterator_tag>::value>>
+ SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
+ this->append(S, E);
+ }
+
+ template <typename RangeTy>
+ explicit SmallVector(const iterator_range<RangeTy> &R)
+ : SmallVectorImpl<T>(N) {
+ this->append(R.begin(), R.end());
+ }
+
+ SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) {
+ this->assign(IL);
+ }
+
+ SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) {
+ if (!RHS.empty())
+ SmallVectorImpl<T>::operator=(RHS);
+ }
+
+ SmallVector &operator=(const SmallVector &RHS) {
+ SmallVectorImpl<T>::operator=(RHS);
+ return *this;
+ }
+
+ SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) {
+ if (!RHS.empty())
+ SmallVectorImpl<T>::operator=(::std::move(RHS));
+ }
+
+ SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) {
+ if (!RHS.empty())
+ SmallVectorImpl<T>::operator=(::std::move(RHS));
+ }
+
+ SmallVector &operator=(SmallVector &&RHS) {
+ if (N) {
+ SmallVectorImpl<T>::operator=(::std::move(RHS));
+ return *this;
+ }
+ // SmallVectorImpl<T>::operator= does not leverage N==0. Optimize the
+ // case.
+ if (this == &RHS)
+ return *this;
+ if (RHS.empty()) {
+ this->destroy_range(this->begin(), this->end());
+ this->Size = 0;
+ } else {
+ this->assignRemote(std::move(RHS));
+ }
+ return *this;
+ }
+
+ SmallVector &operator=(SmallVectorImpl<T> &&RHS) {
+ SmallVectorImpl<T>::operator=(::std::move(RHS));
+ return *this;
+ }
+
+ SmallVector &operator=(std::initializer_list<T> IL) {
+ this->assign(IL);
+ return *this;
+ }
+};
+
+template <typename T, unsigned N>
+inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
+ return X.capacity_in_bytes();
+}
+
+template <typename RangeType>
+using ValueTypeFromRangeType =
+ typename std::remove_const<typename std::remove_reference<
+ decltype(*std::begin(std::declval<RangeType &>()))>::type>::type;
+
+/// Given a range of type R, iterate the entire range and return a
+/// SmallVector with elements of the vector. This is useful, for example,
+/// when you want to iterate a range and then sort the results.
+template <unsigned Size, typename R>
+SmallVector<ValueTypeFromRangeType<R>, Size> to_vector(R &&Range) {
+ return {std::begin(Range), std::end(Range)};
+}
+template <typename R>
+SmallVector<ValueTypeFromRangeType<R>,
+ CalculateSmallVectorDefaultInlinedElements<
+ ValueTypeFromRangeType<R>>::value>
+to_vector(R &&Range) {
+ return {std::begin(Range), std::end(Range)};
+}
+
+} // end namespace llvm
+
+namespace std {
+
+ /// Implement std::swap in terms of SmallVector swap.
+ template<typename T>
+ inline void
+ swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) {
+ LHS.swap(RHS);
+ }
+
+ /// Implement std::swap in terms of SmallVector swap.
+ template<typename T, unsigned N>
+ inline void
+ swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) {
+ LHS.swap(RHS);
+ }
+
+} // end namespace std
+
+#endif // LLVM_ADT_SMALLVECTOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/SparseBitVector.h b/contrib/libs/llvm14/include/llvm/ADT/SparseBitVector.h
new file mode 100644
index 0000000000..4609c871bf
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/SparseBitVector.h
@@ -0,0 +1,904 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/SparseBitVector.h - Efficient Sparse BitVector --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the SparseBitVector class. See the doxygen comment for
+/// SparseBitVector for more details on the algorithm used.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SPARSEBITVECTOR_H
+#define LLVM_ADT_SPARSEBITVECTOR_H
+
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <climits>
+#include <cstring>
+#include <iterator>
+#include <list>
+
+namespace llvm {
+
+/// SparseBitVector is an implementation of a bitvector that is sparse by only
+/// storing the elements that have non-zero bits set. In order to make this
+/// fast for the most common cases, SparseBitVector is implemented as a linked
+/// list of SparseBitVectorElements. We maintain a pointer to the last
+/// SparseBitVectorElement accessed (in the form of a list iterator), in order
+/// to make multiple in-order test/set constant time after the first one is
+/// executed. Note that using vectors to store SparseBitVectorElement's does
+/// not work out very well because it causes insertion in the middle to take
+/// enormous amounts of time with a large amount of bits. Other structures that
+/// have better worst cases for insertion in the middle (various balanced trees,
+/// etc) do not perform as well in practice as a linked list with this iterator
+/// kept up to date. They are also significantly more memory intensive.
+
+template <unsigned ElementSize = 128> struct SparseBitVectorElement {
+public:
+ using BitWord = unsigned long;
+ using size_type = unsigned;
+ enum {
+ BITWORD_SIZE = sizeof(BitWord) * CHAR_BIT,
+ BITWORDS_PER_ELEMENT = (ElementSize + BITWORD_SIZE - 1) / BITWORD_SIZE,
+ BITS_PER_ELEMENT = ElementSize
+ };
+
+private:
+ // Index of Element in terms of where first bit starts.
+ unsigned ElementIndex;
+ BitWord Bits[BITWORDS_PER_ELEMENT];
+
+ SparseBitVectorElement() {
+ ElementIndex = ~0U;
+ memset(&Bits[0], 0, sizeof (BitWord) * BITWORDS_PER_ELEMENT);
+ }
+
+public:
+ explicit SparseBitVectorElement(unsigned Idx) {
+ ElementIndex = Idx;
+ memset(&Bits[0], 0, sizeof (BitWord) * BITWORDS_PER_ELEMENT);
+ }
+
+ // Comparison.
+ bool operator==(const SparseBitVectorElement &RHS) const {
+ if (ElementIndex != RHS.ElementIndex)
+ return false;
+ for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
+ if (Bits[i] != RHS.Bits[i])
+ return false;
+ return true;
+ }
+
+ bool operator!=(const SparseBitVectorElement &RHS) const {
+ return !(*this == RHS);
+ }
+
+ // Return the bits that make up word Idx in our element.
+ BitWord word(unsigned Idx) const {
+ assert(Idx < BITWORDS_PER_ELEMENT);
+ return Bits[Idx];
+ }
+
+ unsigned index() const {
+ return ElementIndex;
+ }
+
+ bool empty() const {
+ for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
+ if (Bits[i])
+ return false;
+ return true;
+ }
+
+ void set(unsigned Idx) {
+ Bits[Idx / BITWORD_SIZE] |= 1L << (Idx % BITWORD_SIZE);
+ }
+
+ bool test_and_set(unsigned Idx) {
+ bool old = test(Idx);
+ if (!old) {
+ set(Idx);
+ return true;
+ }
+ return false;
+ }
+
+ void reset(unsigned Idx) {
+ Bits[Idx / BITWORD_SIZE] &= ~(1L << (Idx % BITWORD_SIZE));
+ }
+
+ bool test(unsigned Idx) const {
+ return Bits[Idx / BITWORD_SIZE] & (1L << (Idx % BITWORD_SIZE));
+ }
+
+ size_type count() const {
+ unsigned NumBits = 0;
+ for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
+ NumBits += countPopulation(Bits[i]);
+ return NumBits;
+ }
+
+ /// find_first - Returns the index of the first set bit.
+ int find_first() const {
+ for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
+ if (Bits[i] != 0)
+ return i * BITWORD_SIZE + countTrailingZeros(Bits[i]);
+ llvm_unreachable("Illegal empty element");
+ }
+
+ /// find_last - Returns the index of the last set bit.
+ int find_last() const {
+ for (unsigned I = 0; I < BITWORDS_PER_ELEMENT; ++I) {
+ unsigned Idx = BITWORDS_PER_ELEMENT - I - 1;
+ if (Bits[Idx] != 0)
+ return Idx * BITWORD_SIZE + BITWORD_SIZE -
+ countLeadingZeros(Bits[Idx]) - 1;
+ }
+ llvm_unreachable("Illegal empty element");
+ }
+
+ /// find_next - Returns the index of the next set bit starting from the
+ /// "Curr" bit. Returns -1 if the next set bit is not found.
+ int find_next(unsigned Curr) const {
+ if (Curr >= BITS_PER_ELEMENT)
+ return -1;
+
+ unsigned WordPos = Curr / BITWORD_SIZE;
+ unsigned BitPos = Curr % BITWORD_SIZE;
+ BitWord Copy = Bits[WordPos];
+ assert(WordPos <= BITWORDS_PER_ELEMENT
+ && "Word Position outside of element");
+
+ // Mask off previous bits.
+ Copy &= ~0UL << BitPos;
+
+ if (Copy != 0)
+ return WordPos * BITWORD_SIZE + countTrailingZeros(Copy);
+
+ // Check subsequent words.
+ for (unsigned i = WordPos+1; i < BITWORDS_PER_ELEMENT; ++i)
+ if (Bits[i] != 0)
+ return i * BITWORD_SIZE + countTrailingZeros(Bits[i]);
+ return -1;
+ }
+
+ // Union this element with RHS and return true if this one changed.
+ bool unionWith(const SparseBitVectorElement &RHS) {
+ bool changed = false;
+ for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
+ BitWord old = changed ? 0 : Bits[i];
+
+ Bits[i] |= RHS.Bits[i];
+ if (!changed && old != Bits[i])
+ changed = true;
+ }
+ return changed;
+ }
+
+ // Return true if we have any bits in common with RHS
+ bool intersects(const SparseBitVectorElement &RHS) const {
+ for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
+ if (RHS.Bits[i] & Bits[i])
+ return true;
+ }
+ return false;
+ }
+
+ // Intersect this Element with RHS and return true if this one changed.
+ // BecameZero is set to true if this element became all-zero bits.
+ bool intersectWith(const SparseBitVectorElement &RHS,
+ bool &BecameZero) {
+ bool changed = false;
+ bool allzero = true;
+
+ BecameZero = false;
+ for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
+ BitWord old = changed ? 0 : Bits[i];
+
+ Bits[i] &= RHS.Bits[i];
+ if (Bits[i] != 0)
+ allzero = false;
+
+ if (!changed && old != Bits[i])
+ changed = true;
+ }
+ BecameZero = allzero;
+ return changed;
+ }
+
+ // Intersect this Element with the complement of RHS and return true if this
+ // one changed. BecameZero is set to true if this element became all-zero
+ // bits.
+ bool intersectWithComplement(const SparseBitVectorElement &RHS,
+ bool &BecameZero) {
+ bool changed = false;
+ bool allzero = true;
+
+ BecameZero = false;
+ for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
+ BitWord old = changed ? 0 : Bits[i];
+
+ Bits[i] &= ~RHS.Bits[i];
+ if (Bits[i] != 0)
+ allzero = false;
+
+ if (!changed && old != Bits[i])
+ changed = true;
+ }
+ BecameZero = allzero;
+ return changed;
+ }
+
+ // Three argument version of intersectWithComplement that intersects
+ // RHS1 & ~RHS2 into this element
+ void intersectWithComplement(const SparseBitVectorElement &RHS1,
+ const SparseBitVectorElement &RHS2,
+ bool &BecameZero) {
+ bool allzero = true;
+
+ BecameZero = false;
+ for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
+ Bits[i] = RHS1.Bits[i] & ~RHS2.Bits[i];
+ if (Bits[i] != 0)
+ allzero = false;
+ }
+ BecameZero = allzero;
+ }
+};
+
+template <unsigned ElementSize = 128>
+class SparseBitVector {
+ using ElementList = std::list<SparseBitVectorElement<ElementSize>>;
+ using ElementListIter = typename ElementList::iterator;
+ using ElementListConstIter = typename ElementList::const_iterator;
+ enum {
+ BITWORD_SIZE = SparseBitVectorElement<ElementSize>::BITWORD_SIZE
+ };
+
+ ElementList Elements;
+ // Pointer to our current Element. This has no visible effect on the external
+ // state of a SparseBitVector, it's just used to improve performance in the
+ // common case of testing/modifying bits with similar indices.
+ mutable ElementListIter CurrElementIter;
+
+ // This is like std::lower_bound, except we do linear searching from the
+ // current position.
+ ElementListIter FindLowerBoundImpl(unsigned ElementIndex) const {
+
+ // We cache a non-const iterator so we're forced to resort to const_cast to
+ // get the begin/end in the case where 'this' is const. To avoid duplication
+ // of code with the only difference being whether the const cast is present
+ // 'this' is always const in this particular function and we sort out the
+ // difference in FindLowerBound and FindLowerBoundConst.
+ ElementListIter Begin =
+ const_cast<SparseBitVector<ElementSize> *>(this)->Elements.begin();
+ ElementListIter End =
+ const_cast<SparseBitVector<ElementSize> *>(this)->Elements.end();
+
+ if (Elements.empty()) {
+ CurrElementIter = Begin;
+ return CurrElementIter;
+ }
+
+ // Make sure our current iterator is valid.
+ if (CurrElementIter == End)
+ --CurrElementIter;
+
+ // Search from our current iterator, either backwards or forwards,
+ // depending on what element we are looking for.
+ ElementListIter ElementIter = CurrElementIter;
+ if (CurrElementIter->index() == ElementIndex) {
+ return ElementIter;
+ } else if (CurrElementIter->index() > ElementIndex) {
+ while (ElementIter != Begin
+ && ElementIter->index() > ElementIndex)
+ --ElementIter;
+ } else {
+ while (ElementIter != End &&
+ ElementIter->index() < ElementIndex)
+ ++ElementIter;
+ }
+ CurrElementIter = ElementIter;
+ return ElementIter;
+ }
+ ElementListConstIter FindLowerBoundConst(unsigned ElementIndex) const {
+ return FindLowerBoundImpl(ElementIndex);
+ }
+ ElementListIter FindLowerBound(unsigned ElementIndex) {
+ return FindLowerBoundImpl(ElementIndex);
+ }
+
+ // Iterator to walk set bits in the bitmap. This iterator is a lot uglier
+ // than it would be, in order to be efficient.
+ class SparseBitVectorIterator {
+ private:
+ bool AtEnd;
+
+ const SparseBitVector<ElementSize> *BitVector = nullptr;
+
+ // Current element inside of bitmap.
+ ElementListConstIter Iter;
+
+ // Current bit number inside of our bitmap.
+ unsigned BitNumber;
+
+ // Current word number inside of our element.
+ unsigned WordNumber;
+
+ // Current bits from the element.
+ typename SparseBitVectorElement<ElementSize>::BitWord Bits;
+
+ // Move our iterator to the first non-zero bit in the bitmap.
+ void AdvanceToFirstNonZero() {
+ if (AtEnd)
+ return;
+ if (BitVector->Elements.empty()) {
+ AtEnd = true;
+ return;
+ }
+ Iter = BitVector->Elements.begin();
+ BitNumber = Iter->index() * ElementSize;
+ unsigned BitPos = Iter->find_first();
+ BitNumber += BitPos;
+ WordNumber = (BitNumber % ElementSize) / BITWORD_SIZE;
+ Bits = Iter->word(WordNumber);
+ Bits >>= BitPos % BITWORD_SIZE;
+ }
+
+ // Move our iterator to the next non-zero bit.
+ void AdvanceToNextNonZero() {
+ if (AtEnd)
+ return;
+
+ while (Bits && !(Bits & 1)) {
+ Bits >>= 1;
+ BitNumber += 1;
+ }
+
+ // See if we ran out of Bits in this word.
+ if (!Bits) {
+ int NextSetBitNumber = Iter->find_next(BitNumber % ElementSize) ;
+ // If we ran out of set bits in this element, move to next element.
+ if (NextSetBitNumber == -1 || (BitNumber % ElementSize == 0)) {
+ ++Iter;
+ WordNumber = 0;
+
+ // We may run out of elements in the bitmap.
+ if (Iter == BitVector->Elements.end()) {
+ AtEnd = true;
+ return;
+ }
+ // Set up for next non-zero word in bitmap.
+ BitNumber = Iter->index() * ElementSize;
+ NextSetBitNumber = Iter->find_first();
+ BitNumber += NextSetBitNumber;
+ WordNumber = (BitNumber % ElementSize) / BITWORD_SIZE;
+ Bits = Iter->word(WordNumber);
+ Bits >>= NextSetBitNumber % BITWORD_SIZE;
+ } else {
+ WordNumber = (NextSetBitNumber % ElementSize) / BITWORD_SIZE;
+ Bits = Iter->word(WordNumber);
+ Bits >>= NextSetBitNumber % BITWORD_SIZE;
+ BitNumber = Iter->index() * ElementSize;
+ BitNumber += NextSetBitNumber;
+ }
+ }
+ }
+
+ public:
+ SparseBitVectorIterator() = default;
+
+ SparseBitVectorIterator(const SparseBitVector<ElementSize> *RHS,
+ bool end = false):BitVector(RHS) {
+ Iter = BitVector->Elements.begin();
+ BitNumber = 0;
+ Bits = 0;
+ WordNumber = ~0;
+ AtEnd = end;
+ AdvanceToFirstNonZero();
+ }
+
+ // Preincrement.
+ inline SparseBitVectorIterator& operator++() {
+ ++BitNumber;
+ Bits >>= 1;
+ AdvanceToNextNonZero();
+ return *this;
+ }
+
+ // Postincrement.
+ inline SparseBitVectorIterator operator++(int) {
+ SparseBitVectorIterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ // Return the current set bit number.
+ unsigned operator*() const {
+ return BitNumber;
+ }
+
+ bool operator==(const SparseBitVectorIterator &RHS) const {
+ // If they are both at the end, ignore the rest of the fields.
+ if (AtEnd && RHS.AtEnd)
+ return true;
+ // Otherwise they are the same if they have the same bit number and
+ // bitmap.
+ return AtEnd == RHS.AtEnd && RHS.BitNumber == BitNumber;
+ }
+
+ bool operator!=(const SparseBitVectorIterator &RHS) const {
+ return !(*this == RHS);
+ }
+ };
+
+public:
+ using iterator = SparseBitVectorIterator;
+
+ SparseBitVector() : Elements(), CurrElementIter(Elements.begin()) {}
+
+ SparseBitVector(const SparseBitVector &RHS)
+ : Elements(RHS.Elements), CurrElementIter(Elements.begin()) {}
+ SparseBitVector(SparseBitVector &&RHS)
+ : Elements(std::move(RHS.Elements)), CurrElementIter(Elements.begin()) {}
+
+ // Clear.
+ void clear() {
+ Elements.clear();
+ }
+
+ // Assignment
+ SparseBitVector& operator=(const SparseBitVector& RHS) {
+ if (this == &RHS)
+ return *this;
+
+ Elements = RHS.Elements;
+ CurrElementIter = Elements.begin();
+ return *this;
+ }
+ SparseBitVector &operator=(SparseBitVector &&RHS) {
+ Elements = std::move(RHS.Elements);
+ CurrElementIter = Elements.begin();
+ return *this;
+ }
+
+ // Test, Reset, and Set a bit in the bitmap.
+ bool test(unsigned Idx) const {
+ if (Elements.empty())
+ return false;
+
+ unsigned ElementIndex = Idx / ElementSize;
+ ElementListConstIter ElementIter = FindLowerBoundConst(ElementIndex);
+
+ // If we can't find an element that is supposed to contain this bit, there
+ // is nothing more to do.
+ if (ElementIter == Elements.end() ||
+ ElementIter->index() != ElementIndex)
+ return false;
+ return ElementIter->test(Idx % ElementSize);
+ }
+
+ void reset(unsigned Idx) {
+ if (Elements.empty())
+ return;
+
+ unsigned ElementIndex = Idx / ElementSize;
+ ElementListIter ElementIter = FindLowerBound(ElementIndex);
+
+ // If we can't find an element that is supposed to contain this bit, there
+ // is nothing more to do.
+ if (ElementIter == Elements.end() ||
+ ElementIter->index() != ElementIndex)
+ return;
+ ElementIter->reset(Idx % ElementSize);
+
+ // When the element is zeroed out, delete it.
+ if (ElementIter->empty()) {
+ ++CurrElementIter;
+ Elements.erase(ElementIter);
+ }
+ }
+
+ void set(unsigned Idx) {
+ unsigned ElementIndex = Idx / ElementSize;
+ ElementListIter ElementIter;
+ if (Elements.empty()) {
+ ElementIter = Elements.emplace(Elements.end(), ElementIndex);
+ } else {
+ ElementIter = FindLowerBound(ElementIndex);
+
+ if (ElementIter == Elements.end() ||
+ ElementIter->index() != ElementIndex) {
+ // We may have hit the beginning of our SparseBitVector, in which case,
+ // we may need to insert right after this element, which requires moving
+ // the current iterator forward one, because insert does insert before.
+ if (ElementIter != Elements.end() &&
+ ElementIter->index() < ElementIndex)
+ ++ElementIter;
+ ElementIter = Elements.emplace(ElementIter, ElementIndex);
+ }
+ }
+ CurrElementIter = ElementIter;
+
+ ElementIter->set(Idx % ElementSize);
+ }
+
+ bool test_and_set(unsigned Idx) {
+ bool old = test(Idx);
+ if (!old) {
+ set(Idx);
+ return true;
+ }
+ return false;
+ }
+
+ bool operator!=(const SparseBitVector &RHS) const {
+ return !(*this == RHS);
+ }
+
+ bool operator==(const SparseBitVector &RHS) const {
+ ElementListConstIter Iter1 = Elements.begin();
+ ElementListConstIter Iter2 = RHS.Elements.begin();
+
+ for (; Iter1 != Elements.end() && Iter2 != RHS.Elements.end();
+ ++Iter1, ++Iter2) {
+ if (*Iter1 != *Iter2)
+ return false;
+ }
+ return Iter1 == Elements.end() && Iter2 == RHS.Elements.end();
+ }
+
+ // Union our bitmap with the RHS and return true if we changed.
+ bool operator|=(const SparseBitVector &RHS) {
+ if (this == &RHS)
+ return false;
+
+ bool changed = false;
+ ElementListIter Iter1 = Elements.begin();
+ ElementListConstIter Iter2 = RHS.Elements.begin();
+
+ // If RHS is empty, we are done
+ if (RHS.Elements.empty())
+ return false;
+
+ while (Iter2 != RHS.Elements.end()) {
+ if (Iter1 == Elements.end() || Iter1->index() > Iter2->index()) {
+ Elements.insert(Iter1, *Iter2);
+ ++Iter2;
+ changed = true;
+ } else if (Iter1->index() == Iter2->index()) {
+ changed |= Iter1->unionWith(*Iter2);
+ ++Iter1;
+ ++Iter2;
+ } else {
+ ++Iter1;
+ }
+ }
+ CurrElementIter = Elements.begin();
+ return changed;
+ }
+
+ // Intersect our bitmap with the RHS and return true if ours changed.
+ bool operator&=(const SparseBitVector &RHS) {
+ if (this == &RHS)
+ return false;
+
+ bool changed = false;
+ ElementListIter Iter1 = Elements.begin();
+ ElementListConstIter Iter2 = RHS.Elements.begin();
+
+ // Check if both bitmaps are empty.
+ if (Elements.empty() && RHS.Elements.empty())
+ return false;
+
+ // Loop through, intersecting as we go, erasing elements when necessary.
+ while (Iter2 != RHS.Elements.end()) {
+ if (Iter1 == Elements.end()) {
+ CurrElementIter = Elements.begin();
+ return changed;
+ }
+
+ if (Iter1->index() > Iter2->index()) {
+ ++Iter2;
+ } else if (Iter1->index() == Iter2->index()) {
+ bool BecameZero;
+ changed |= Iter1->intersectWith(*Iter2, BecameZero);
+ if (BecameZero) {
+ ElementListIter IterTmp = Iter1;
+ ++Iter1;
+ Elements.erase(IterTmp);
+ } else {
+ ++Iter1;
+ }
+ ++Iter2;
+ } else {
+ ElementListIter IterTmp = Iter1;
+ ++Iter1;
+ Elements.erase(IterTmp);
+ changed = true;
+ }
+ }
+ if (Iter1 != Elements.end()) {
+ Elements.erase(Iter1, Elements.end());
+ changed = true;
+ }
+ CurrElementIter = Elements.begin();
+ return changed;
+ }
+
+ // Intersect our bitmap with the complement of the RHS and return true
+ // if ours changed.
+ bool intersectWithComplement(const SparseBitVector &RHS) {
+ if (this == &RHS) {
+ if (!empty()) {
+ clear();
+ return true;
+ }
+ return false;
+ }
+
+ bool changed = false;
+ ElementListIter Iter1 = Elements.begin();
+ ElementListConstIter Iter2 = RHS.Elements.begin();
+
+ // If either our bitmap or RHS is empty, we are done
+ if (Elements.empty() || RHS.Elements.empty())
+ return false;
+
+ // Loop through, intersecting as we go, erasing elements when necessary.
+ while (Iter2 != RHS.Elements.end()) {
+ if (Iter1 == Elements.end()) {
+ CurrElementIter = Elements.begin();
+ return changed;
+ }
+
+ if (Iter1->index() > Iter2->index()) {
+ ++Iter2;
+ } else if (Iter1->index() == Iter2->index()) {
+ bool BecameZero;
+ changed |= Iter1->intersectWithComplement(*Iter2, BecameZero);
+ if (BecameZero) {
+ ElementListIter IterTmp = Iter1;
+ ++Iter1;
+ Elements.erase(IterTmp);
+ } else {
+ ++Iter1;
+ }
+ ++Iter2;
+ } else {
+ ++Iter1;
+ }
+ }
+ CurrElementIter = Elements.begin();
+ return changed;
+ }
+
+ bool intersectWithComplement(const SparseBitVector<ElementSize> *RHS) const {
+ return intersectWithComplement(*RHS);
+ }
+
+ // Three argument version of intersectWithComplement.
+ // Result of RHS1 & ~RHS2 is stored into this bitmap.
+ void intersectWithComplement(const SparseBitVector<ElementSize> &RHS1,
+ const SparseBitVector<ElementSize> &RHS2)
+ {
+ if (this == &RHS1) {
+ intersectWithComplement(RHS2);
+ return;
+ } else if (this == &RHS2) {
+ SparseBitVector RHS2Copy(RHS2);
+ intersectWithComplement(RHS1, RHS2Copy);
+ return;
+ }
+
+ Elements.clear();
+ CurrElementIter = Elements.begin();
+ ElementListConstIter Iter1 = RHS1.Elements.begin();
+ ElementListConstIter Iter2 = RHS2.Elements.begin();
+
+ // If RHS1 is empty, we are done
+ // If RHS2 is empty, we still have to copy RHS1
+ if (RHS1.Elements.empty())
+ return;
+
+ // Loop through, intersecting as we go, erasing elements when necessary.
+ while (Iter2 != RHS2.Elements.end()) {
+ if (Iter1 == RHS1.Elements.end())
+ return;
+
+ if (Iter1->index() > Iter2->index()) {
+ ++Iter2;
+ } else if (Iter1->index() == Iter2->index()) {
+ bool BecameZero = false;
+ Elements.emplace_back(Iter1->index());
+ Elements.back().intersectWithComplement(*Iter1, *Iter2, BecameZero);
+ if (BecameZero)
+ Elements.pop_back();
+ ++Iter1;
+ ++Iter2;
+ } else {
+ Elements.push_back(*Iter1++);
+ }
+ }
+
+ // copy the remaining elements
+ std::copy(Iter1, RHS1.Elements.end(), std::back_inserter(Elements));
+ }
+
+ void intersectWithComplement(const SparseBitVector<ElementSize> *RHS1,
+ const SparseBitVector<ElementSize> *RHS2) {
+ intersectWithComplement(*RHS1, *RHS2);
+ }
+
+ bool intersects(const SparseBitVector<ElementSize> *RHS) const {
+ return intersects(*RHS);
+ }
+
+ // Return true if we share any bits in common with RHS
+ bool intersects(const SparseBitVector<ElementSize> &RHS) const {
+ ElementListConstIter Iter1 = Elements.begin();
+ ElementListConstIter Iter2 = RHS.Elements.begin();
+
+ // Check if both bitmaps are empty.
+ if (Elements.empty() && RHS.Elements.empty())
+ return false;
+
+ // Loop through, intersecting stopping when we hit bits in common.
+ while (Iter2 != RHS.Elements.end()) {
+ if (Iter1 == Elements.end())
+ return false;
+
+ if (Iter1->index() > Iter2->index()) {
+ ++Iter2;
+ } else if (Iter1->index() == Iter2->index()) {
+ if (Iter1->intersects(*Iter2))
+ return true;
+ ++Iter1;
+ ++Iter2;
+ } else {
+ ++Iter1;
+ }
+ }
+ return false;
+ }
+
+ // Return true iff all bits set in this SparseBitVector are
+ // also set in RHS.
+ bool contains(const SparseBitVector<ElementSize> &RHS) const {
+ SparseBitVector<ElementSize> Result(*this);
+ Result &= RHS;
+ return (Result == RHS);
+ }
+
+ // Return the first set bit in the bitmap. Return -1 if no bits are set.
+ int find_first() const {
+ if (Elements.empty())
+ return -1;
+ const SparseBitVectorElement<ElementSize> &First = *(Elements.begin());
+ return (First.index() * ElementSize) + First.find_first();
+ }
+
+ // Return the last set bit in the bitmap. Return -1 if no bits are set.
+ int find_last() const {
+ if (Elements.empty())
+ return -1;
+ const SparseBitVectorElement<ElementSize> &Last = *(Elements.rbegin());
+ return (Last.index() * ElementSize) + Last.find_last();
+ }
+
+ // Return true if the SparseBitVector is empty
+ bool empty() const {
+ return Elements.empty();
+ }
+
+ unsigned count() const {
+ unsigned BitCount = 0;
+ for (ElementListConstIter Iter = Elements.begin();
+ Iter != Elements.end();
+ ++Iter)
+ BitCount += Iter->count();
+
+ return BitCount;
+ }
+
+ iterator begin() const {
+ return iterator(this);
+ }
+
+ iterator end() const {
+ return iterator(this, true);
+ }
+};
+
+// Convenience functions to allow Or and And without dereferencing in the user
+// code.
+
+template <unsigned ElementSize>
+inline bool operator |=(SparseBitVector<ElementSize> &LHS,
+ const SparseBitVector<ElementSize> *RHS) {
+ return LHS |= *RHS;
+}
+
+template <unsigned ElementSize>
+inline bool operator |=(SparseBitVector<ElementSize> *LHS,
+ const SparseBitVector<ElementSize> &RHS) {
+ return LHS->operator|=(RHS);
+}
+
+template <unsigned ElementSize>
+inline bool operator &=(SparseBitVector<ElementSize> *LHS,
+ const SparseBitVector<ElementSize> &RHS) {
+ return LHS->operator&=(RHS);
+}
+
+template <unsigned ElementSize>
+inline bool operator &=(SparseBitVector<ElementSize> &LHS,
+ const SparseBitVector<ElementSize> *RHS) {
+ return LHS &= *RHS;
+}
+
+// Convenience functions for infix union, intersection, difference operators.
+
+template <unsigned ElementSize>
+inline SparseBitVector<ElementSize>
+operator|(const SparseBitVector<ElementSize> &LHS,
+ const SparseBitVector<ElementSize> &RHS) {
+ SparseBitVector<ElementSize> Result(LHS);
+ Result |= RHS;
+ return Result;
+}
+
+template <unsigned ElementSize>
+inline SparseBitVector<ElementSize>
+operator&(const SparseBitVector<ElementSize> &LHS,
+ const SparseBitVector<ElementSize> &RHS) {
+ SparseBitVector<ElementSize> Result(LHS);
+ Result &= RHS;
+ return Result;
+}
+
+template <unsigned ElementSize>
+inline SparseBitVector<ElementSize>
+operator-(const SparseBitVector<ElementSize> &LHS,
+ const SparseBitVector<ElementSize> &RHS) {
+ SparseBitVector<ElementSize> Result;
+ Result.intersectWithComplement(LHS, RHS);
+ return Result;
+}
+
+// Dump a SparseBitVector to a stream
+template <unsigned ElementSize>
+void dump(const SparseBitVector<ElementSize> &LHS, raw_ostream &out) {
+ out << "[";
+
+ typename SparseBitVector<ElementSize>::iterator bi = LHS.begin(),
+ be = LHS.end();
+ if (bi != be) {
+ out << *bi;
+ for (++bi; bi != be; ++bi) {
+ out << " " << *bi;
+ }
+ }
+ out << "]\n";
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SPARSEBITVECTOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/SparseMultiSet.h b/contrib/libs/llvm14/include/llvm/ADT/SparseMultiSet.h
new file mode 100644
index 0000000000..1bb07867be
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/SparseMultiSet.h
@@ -0,0 +1,534 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/SparseMultiSet.h - Sparse multiset --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the SparseMultiSet class, which adds multiset behavior to
+/// the SparseSet.
+///
+/// A sparse multiset holds a small number of objects identified by integer keys
+/// from a moderately sized universe. The sparse multiset uses more memory than
+/// other containers in order to provide faster operations. Any key can map to
+/// multiple values. A SparseMultiSetNode class is provided, which serves as a
+/// convenient base class for the contents of a SparseMultiSet.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SPARSEMULTISET_H
+#define LLVM_ADT_SPARSEMULTISET_H
+
+#include "llvm/ADT/identity.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SparseSet.h"
+#include <cassert>
+#include <cstdint>
+#include <cstdlib>
+#include <iterator>
+#include <limits>
+#include <utility>
+
+namespace llvm {
+
+/// Fast multiset implementation for objects that can be identified by small
+/// unsigned keys.
+///
+/// SparseMultiSet allocates memory proportional to the size of the key
+/// universe, so it is not recommended for building composite data structures.
+/// It is useful for algorithms that require a single set with fast operations.
+///
+/// Compared to DenseSet and DenseMap, SparseMultiSet provides constant-time
+/// fast clear() as fast as a vector. The find(), insert(), and erase()
+/// operations are all constant time, and typically faster than a hash table.
+/// The iteration order doesn't depend on numerical key values, it only depends
+/// on the order of insert() and erase() operations. Iteration order is the
+/// insertion order. Iteration is only provided over elements of equivalent
+/// keys, but iterators are bidirectional.
+///
+/// Compared to BitVector, SparseMultiSet<unsigned> uses 8x-40x more memory, but
+/// offers constant-time clear() and size() operations as well as fast iteration
+/// independent on the size of the universe.
+///
+/// SparseMultiSet contains a dense vector holding all the objects and a sparse
+/// array holding indexes into the dense vector. Most of the memory is used by
+/// the sparse array which is the size of the key universe. The SparseT template
+/// parameter provides a space/speed tradeoff for sets holding many elements.
+///
+/// When SparseT is uint32_t, find() only touches up to 3 cache lines, but the
+/// sparse array uses 4 x Universe bytes.
+///
+/// When SparseT is uint8_t (the default), find() touches up to 3+[N/256] cache
+/// lines, but the sparse array is 4x smaller. N is the number of elements in
+/// the set.
+///
+/// For sets that may grow to thousands of elements, SparseT should be set to
+/// uint16_t or uint32_t.
+///
+/// Multiset behavior is provided by providing doubly linked lists for values
+/// that are inlined in the dense vector. SparseMultiSet is a good choice when
+/// one desires a growable number of entries per key, as it will retain the
+/// SparseSet algorithmic properties despite being growable. Thus, it is often a
+/// better choice than a SparseSet of growable containers or a vector of
+/// vectors. SparseMultiSet also keeps iterators valid after erasure (provided
+/// the iterators don't point to the element erased), allowing for more
+/// intuitive and fast removal.
+///
+/// @tparam ValueT The type of objects in the set.
+/// @tparam KeyFunctorT A functor that computes an unsigned index from KeyT.
+/// @tparam SparseT An unsigned integer type. See above.
+///
+template<typename ValueT,
+ typename KeyFunctorT = identity<unsigned>,
+ typename SparseT = uint8_t>
+class SparseMultiSet {
+ static_assert(std::numeric_limits<SparseT>::is_integer &&
+ !std::numeric_limits<SparseT>::is_signed,
+ "SparseT must be an unsigned integer type");
+
+ /// The actual data that's stored, as a doubly-linked list implemented via
+ /// indices into the DenseVector. The doubly linked list is implemented
+ /// circular in Prev indices, and INVALID-terminated in Next indices. This
+ /// provides efficient access to list tails. These nodes can also be
+ /// tombstones, in which case they are actually nodes in a single-linked
+ /// freelist of recyclable slots.
+ struct SMSNode {
+ static constexpr unsigned INVALID = ~0U;
+
+ ValueT Data;
+ unsigned Prev;
+ unsigned Next;
+
+ SMSNode(ValueT D, unsigned P, unsigned N) : Data(D), Prev(P), Next(N) {}
+
+ /// List tails have invalid Nexts.
+ bool isTail() const {
+ return Next == INVALID;
+ }
+
+ /// Whether this node is a tombstone node, and thus is in our freelist.
+ bool isTombstone() const {
+ return Prev == INVALID;
+ }
+
+ /// Since the list is circular in Prev, all non-tombstone nodes have a valid
+ /// Prev.
+ bool isValid() const { return Prev != INVALID; }
+ };
+
+ using KeyT = typename KeyFunctorT::argument_type;
+ using DenseT = SmallVector<SMSNode, 8>;
+ DenseT Dense;
+ SparseT *Sparse = nullptr;
+ unsigned Universe = 0;
+ KeyFunctorT KeyIndexOf;
+ SparseSetValFunctor<KeyT, ValueT, KeyFunctorT> ValIndexOf;
+
+ /// We have a built-in recycler for reusing tombstone slots. This recycler
+ /// puts a singly-linked free list into tombstone slots, allowing us quick
+ /// erasure, iterator preservation, and dense size.
+ unsigned FreelistIdx = SMSNode::INVALID;
+ unsigned NumFree = 0;
+
+ unsigned sparseIndex(const ValueT &Val) const {
+ assert(ValIndexOf(Val) < Universe &&
+ "Invalid key in set. Did object mutate?");
+ return ValIndexOf(Val);
+ }
+ unsigned sparseIndex(const SMSNode &N) const { return sparseIndex(N.Data); }
+
+ /// Whether the given entry is the head of the list. List heads's previous
+ /// pointers are to the tail of the list, allowing for efficient access to the
+ /// list tail. D must be a valid entry node.
+ bool isHead(const SMSNode &D) const {
+ assert(D.isValid() && "Invalid node for head");
+ return Dense[D.Prev].isTail();
+ }
+
+ /// Whether the given entry is a singleton entry, i.e. the only entry with
+ /// that key.
+ bool isSingleton(const SMSNode &N) const {
+ assert(N.isValid() && "Invalid node for singleton");
+ // Is N its own predecessor?
+ return &Dense[N.Prev] == &N;
+ }
+
+ /// Add in the given SMSNode. Uses a free entry in our freelist if
+ /// available. Returns the index of the added node.
+ unsigned addValue(const ValueT& V, unsigned Prev, unsigned Next) {
+ if (NumFree == 0) {
+ Dense.push_back(SMSNode(V, Prev, Next));
+ return Dense.size() - 1;
+ }
+
+ // Peel off a free slot
+ unsigned Idx = FreelistIdx;
+ unsigned NextFree = Dense[Idx].Next;
+ assert(Dense[Idx].isTombstone() && "Non-tombstone free?");
+
+ Dense[Idx] = SMSNode(V, Prev, Next);
+ FreelistIdx = NextFree;
+ --NumFree;
+ return Idx;
+ }
+
+ /// Make the current index a new tombstone. Pushes it onto the freelist.
+ void makeTombstone(unsigned Idx) {
+ Dense[Idx].Prev = SMSNode::INVALID;
+ Dense[Idx].Next = FreelistIdx;
+ FreelistIdx = Idx;
+ ++NumFree;
+ }
+
+public:
+ using value_type = ValueT;
+ using reference = ValueT &;
+ using const_reference = const ValueT &;
+ using pointer = ValueT *;
+ using const_pointer = const ValueT *;
+ using size_type = unsigned;
+
+ SparseMultiSet() = default;
+ SparseMultiSet(const SparseMultiSet &) = delete;
+ SparseMultiSet &operator=(const SparseMultiSet &) = delete;
+ ~SparseMultiSet() { free(Sparse); }
+
+ /// Set the universe size which determines the largest key the set can hold.
+ /// The universe must be sized before any elements can be added.
+ ///
+ /// @param U Universe size. All object keys must be less than U.
+ ///
+ void setUniverse(unsigned U) {
+ // It's not hard to resize the universe on a non-empty set, but it doesn't
+ // seem like a likely use case, so we can add that code when we need it.
+ assert(empty() && "Can only resize universe on an empty map");
+ // Hysteresis prevents needless reallocations.
+ if (U >= Universe/4 && U <= Universe)
+ return;
+ free(Sparse);
+ // The Sparse array doesn't actually need to be initialized, so malloc
+ // would be enough here, but that will cause tools like valgrind to
+ // complain about branching on uninitialized data.
+ Sparse = static_cast<SparseT*>(safe_calloc(U, sizeof(SparseT)));
+ Universe = U;
+ }
+
+ /// Our iterators are iterators over the collection of objects that share a
+ /// key.
+ template <typename SMSPtrTy> class iterator_base {
+ friend class SparseMultiSet;
+
+ public:
+ using iterator_category = std::bidirectional_iterator_tag;
+ using value_type = ValueT;
+ using difference_type = std::ptrdiff_t;
+ using pointer = value_type *;
+ using reference = value_type &;
+
+ private:
+ SMSPtrTy SMS;
+ unsigned Idx;
+ unsigned SparseIdx;
+
+ iterator_base(SMSPtrTy P, unsigned I, unsigned SI)
+ : SMS(P), Idx(I), SparseIdx(SI) {}
+
+ /// Whether our iterator has fallen outside our dense vector.
+ bool isEnd() const {
+ if (Idx == SMSNode::INVALID)
+ return true;
+
+ assert(Idx < SMS->Dense.size() && "Out of range, non-INVALID Idx?");
+ return false;
+ }
+
+ /// Whether our iterator is properly keyed, i.e. the SparseIdx is valid
+ bool isKeyed() const { return SparseIdx < SMS->Universe; }
+
+ unsigned Prev() const { return SMS->Dense[Idx].Prev; }
+ unsigned Next() const { return SMS->Dense[Idx].Next; }
+
+ void setPrev(unsigned P) { SMS->Dense[Idx].Prev = P; }
+ void setNext(unsigned N) { SMS->Dense[Idx].Next = N; }
+
+ public:
+ reference operator*() const {
+ assert(isKeyed() && SMS->sparseIndex(SMS->Dense[Idx].Data) == SparseIdx &&
+ "Dereferencing iterator of invalid key or index");
+
+ return SMS->Dense[Idx].Data;
+ }
+ pointer operator->() const { return &operator*(); }
+
+ /// Comparison operators
+ bool operator==(const iterator_base &RHS) const {
+ // end compares equal
+ if (SMS == RHS.SMS && Idx == RHS.Idx) {
+ assert((isEnd() || SparseIdx == RHS.SparseIdx) &&
+ "Same dense entry, but different keys?");
+ return true;
+ }
+
+ return false;
+ }
+
+ bool operator!=(const iterator_base &RHS) const {
+ return !operator==(RHS);
+ }
+
+ /// Increment and decrement operators
+ iterator_base &operator--() { // predecrement - Back up
+ assert(isKeyed() && "Decrementing an invalid iterator");
+ assert((isEnd() || !SMS->isHead(SMS->Dense[Idx])) &&
+ "Decrementing head of list");
+
+ // If we're at the end, then issue a new find()
+ if (isEnd())
+ Idx = SMS->findIndex(SparseIdx).Prev();
+ else
+ Idx = Prev();
+
+ return *this;
+ }
+ iterator_base &operator++() { // preincrement - Advance
+ assert(!isEnd() && isKeyed() && "Incrementing an invalid/end iterator");
+ Idx = Next();
+ return *this;
+ }
+ iterator_base operator--(int) { // postdecrement
+ iterator_base I(*this);
+ --*this;
+ return I;
+ }
+ iterator_base operator++(int) { // postincrement
+ iterator_base I(*this);
+ ++*this;
+ return I;
+ }
+ };
+
+ using iterator = iterator_base<SparseMultiSet *>;
+ using const_iterator = iterator_base<const SparseMultiSet *>;
+
+ // Convenience types
+ using RangePair = std::pair<iterator, iterator>;
+
+ /// Returns an iterator past this container. Note that such an iterator cannot
+ /// be decremented, but will compare equal to other end iterators.
+ iterator end() { return iterator(this, SMSNode::INVALID, SMSNode::INVALID); }
+ const_iterator end() const {
+ return const_iterator(this, SMSNode::INVALID, SMSNode::INVALID);
+ }
+
+ /// Returns true if the set is empty.
+ ///
+ /// This is not the same as BitVector::empty().
+ ///
+ bool empty() const { return size() == 0; }
+
+ /// Returns the number of elements in the set.
+ ///
+ /// This is not the same as BitVector::size() which returns the size of the
+ /// universe.
+ ///
+ size_type size() const {
+ assert(NumFree <= Dense.size() && "Out-of-bounds free entries");
+ return Dense.size() - NumFree;
+ }
+
+ /// Clears the set. This is a very fast constant time operation.
+ ///
+ void clear() {
+ // Sparse does not need to be cleared, see find().
+ Dense.clear();
+ NumFree = 0;
+ FreelistIdx = SMSNode::INVALID;
+ }
+
+ /// Find an element by its index.
+ ///
+ /// @param Idx A valid index to find.
+ /// @returns An iterator to the element identified by key, or end().
+ ///
+ iterator findIndex(unsigned Idx) {
+ assert(Idx < Universe && "Key out of range");
+ const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u;
+ for (unsigned i = Sparse[Idx], e = Dense.size(); i < e; i += Stride) {
+ const unsigned FoundIdx = sparseIndex(Dense[i]);
+ // Check that we're pointing at the correct entry and that it is the head
+ // of a valid list.
+ if (Idx == FoundIdx && Dense[i].isValid() && isHead(Dense[i]))
+ return iterator(this, i, Idx);
+ // Stride is 0 when SparseT >= unsigned. We don't need to loop.
+ if (!Stride)
+ break;
+ }
+ return end();
+ }
+
+ /// Find an element by its key.
+ ///
+ /// @param Key A valid key to find.
+ /// @returns An iterator to the element identified by key, or end().
+ ///
+ iterator find(const KeyT &Key) {
+ return findIndex(KeyIndexOf(Key));
+ }
+
+ const_iterator find(const KeyT &Key) const {
+ iterator I = const_cast<SparseMultiSet*>(this)->findIndex(KeyIndexOf(Key));
+ return const_iterator(I.SMS, I.Idx, KeyIndexOf(Key));
+ }
+
+ /// Returns the number of elements identified by Key. This will be linear in
+ /// the number of elements of that key.
+ size_type count(const KeyT &Key) const {
+ unsigned Ret = 0;
+ for (const_iterator It = find(Key); It != end(); ++It)
+ ++Ret;
+
+ return Ret;
+ }
+
+ /// Returns true if this set contains an element identified by Key.
+ bool contains(const KeyT &Key) const {
+ return find(Key) != end();
+ }
+
+ /// Return the head and tail of the subset's list, otherwise returns end().
+ iterator getHead(const KeyT &Key) { return find(Key); }
+ iterator getTail(const KeyT &Key) {
+ iterator I = find(Key);
+ if (I != end())
+ I = iterator(this, I.Prev(), KeyIndexOf(Key));
+ return I;
+ }
+
+ /// The bounds of the range of items sharing Key K. First member is the head
+ /// of the list, and the second member is a decrementable end iterator for
+ /// that key.
+ RangePair equal_range(const KeyT &K) {
+ iterator B = find(K);
+ iterator E = iterator(this, SMSNode::INVALID, B.SparseIdx);
+ return std::make_pair(B, E);
+ }
+
+ /// Insert a new element at the tail of the subset list. Returns an iterator
+ /// to the newly added entry.
+ iterator insert(const ValueT &Val) {
+ unsigned Idx = sparseIndex(Val);
+ iterator I = findIndex(Idx);
+
+ unsigned NodeIdx = addValue(Val, SMSNode::INVALID, SMSNode::INVALID);
+
+ if (I == end()) {
+ // Make a singleton list
+ Sparse[Idx] = NodeIdx;
+ Dense[NodeIdx].Prev = NodeIdx;
+ return iterator(this, NodeIdx, Idx);
+ }
+
+ // Stick it at the end.
+ unsigned HeadIdx = I.Idx;
+ unsigned TailIdx = I.Prev();
+ Dense[TailIdx].Next = NodeIdx;
+ Dense[HeadIdx].Prev = NodeIdx;
+ Dense[NodeIdx].Prev = TailIdx;
+
+ return iterator(this, NodeIdx, Idx);
+ }
+
+ /// Erases an existing element identified by a valid iterator.
+ ///
+ /// This invalidates iterators pointing at the same entry, but erase() returns
+ /// an iterator pointing to the next element in the subset's list. This makes
+ /// it possible to erase selected elements while iterating over the subset:
+ ///
+ /// tie(I, E) = Set.equal_range(Key);
+ /// while (I != E)
+ /// if (test(*I))
+ /// I = Set.erase(I);
+ /// else
+ /// ++I;
+ ///
+ /// Note that if the last element in the subset list is erased, this will
+ /// return an end iterator which can be decremented to get the new tail (if it
+ /// exists):
+ ///
+ /// tie(B, I) = Set.equal_range(Key);
+ /// for (bool isBegin = B == I; !isBegin; /* empty */) {
+ /// isBegin = (--I) == B;
+ /// if (test(I))
+ /// break;
+ /// I = erase(I);
+ /// }
+ iterator erase(iterator I) {
+ assert(I.isKeyed() && !I.isEnd() && !Dense[I.Idx].isTombstone() &&
+ "erasing invalid/end/tombstone iterator");
+
+ // First, unlink the node from its list. Then swap the node out with the
+ // dense vector's last entry
+ iterator NextI = unlink(Dense[I.Idx]);
+
+ // Put in a tombstone.
+ makeTombstone(I.Idx);
+
+ return NextI;
+ }
+
+ /// Erase all elements with the given key. This invalidates all
+ /// iterators of that key.
+ void eraseAll(const KeyT &K) {
+ for (iterator I = find(K); I != end(); /* empty */)
+ I = erase(I);
+ }
+
+private:
+ /// Unlink the node from its list. Returns the next node in the list.
+ iterator unlink(const SMSNode &N) {
+ if (isSingleton(N)) {
+ // Singleton is already unlinked
+ assert(N.Next == SMSNode::INVALID && "Singleton has next?");
+ return iterator(this, SMSNode::INVALID, ValIndexOf(N.Data));
+ }
+
+ if (isHead(N)) {
+ // If we're the head, then update the sparse array and our next.
+ Sparse[sparseIndex(N)] = N.Next;
+ Dense[N.Next].Prev = N.Prev;
+ return iterator(this, N.Next, ValIndexOf(N.Data));
+ }
+
+ if (N.isTail()) {
+ // If we're the tail, then update our head and our previous.
+ findIndex(sparseIndex(N)).setPrev(N.Prev);
+ Dense[N.Prev].Next = N.Next;
+
+ // Give back an end iterator that can be decremented
+ iterator I(this, N.Prev, ValIndexOf(N.Data));
+ return ++I;
+ }
+
+ // Otherwise, just drop us
+ Dense[N.Next].Prev = N.Prev;
+ Dense[N.Prev].Next = N.Next;
+ return iterator(this, N.Next, ValIndexOf(N.Data));
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SPARSEMULTISET_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/SparseSet.h b/contrib/libs/llvm14/include/llvm/ADT/SparseSet.h
new file mode 100644
index 0000000000..f0b94afda2
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/SparseSet.h
@@ -0,0 +1,330 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/SparseSet.h - Sparse set ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the SparseSet class derived from the version described in
+/// Briggs, Torczon, "An efficient representation for sparse sets", ACM Letters
+/// on Programming Languages and Systems, Volume 2 Issue 1-4, March-Dec. 1993.
+///
+/// A sparse set holds a small number of objects identified by integer keys from
+/// a moderately sized universe. The sparse set uses more memory than other
+/// containers in order to provide faster operations.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SPARSESET_H
+#define LLVM_ADT_SPARSESET_H
+
+#include "llvm/ADT/identity.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/AllocatorBase.h"
+#include <cassert>
+#include <cstdint>
+#include <cstdlib>
+#include <limits>
+#include <utility>
+
+namespace llvm {
+
+/// SparseSetValTraits - Objects in a SparseSet are identified by keys that can
+/// be uniquely converted to a small integer less than the set's universe. This
+/// class allows the set to hold values that differ from the set's key type as
+/// long as an index can still be derived from the value. SparseSet never
+/// directly compares ValueT, only their indices, so it can map keys to
+/// arbitrary values. SparseSetValTraits computes the index from the value
+/// object. To compute the index from a key, SparseSet uses a separate
+/// KeyFunctorT template argument.
+///
+/// A simple type declaration, SparseSet<Type>, handles these cases:
+/// - unsigned key, identity index, identity value
+/// - unsigned key, identity index, fat value providing getSparseSetIndex()
+///
+/// The type declaration SparseSet<Type, UnaryFunction> handles:
+/// - unsigned key, remapped index, identity value (virtual registers)
+/// - pointer key, pointer-derived index, identity value (node+ID)
+/// - pointer key, pointer-derived index, fat value with getSparseSetIndex()
+///
+/// Only other, unexpected cases require specializing SparseSetValTraits.
+///
+/// For best results, ValueT should not require a destructor.
+///
+template<typename ValueT>
+struct SparseSetValTraits {
+ static unsigned getValIndex(const ValueT &Val) {
+ return Val.getSparseSetIndex();
+ }
+};
+
+/// SparseSetValFunctor - Helper class for selecting SparseSetValTraits. The
+/// generic implementation handles ValueT classes which either provide
+/// getSparseSetIndex() or specialize SparseSetValTraits<>.
+///
+template<typename KeyT, typename ValueT, typename KeyFunctorT>
+struct SparseSetValFunctor {
+ unsigned operator()(const ValueT &Val) const {
+ return SparseSetValTraits<ValueT>::getValIndex(Val);
+ }
+};
+
+/// SparseSetValFunctor<KeyT, KeyT> - Helper class for the common case of
+/// identity key/value sets.
+template<typename KeyT, typename KeyFunctorT>
+struct SparseSetValFunctor<KeyT, KeyT, KeyFunctorT> {
+ unsigned operator()(const KeyT &Key) const {
+ return KeyFunctorT()(Key);
+ }
+};
+
+/// SparseSet - Fast set implementation for objects that can be identified by
+/// small unsigned keys.
+///
+/// SparseSet allocates memory proportional to the size of the key universe, so
+/// it is not recommended for building composite data structures. It is useful
+/// for algorithms that require a single set with fast operations.
+///
+/// Compared to DenseSet and DenseMap, SparseSet provides constant-time fast
+/// clear() and iteration as fast as a vector. The find(), insert(), and
+/// erase() operations are all constant time, and typically faster than a hash
+/// table. The iteration order doesn't depend on numerical key values, it only
+/// depends on the order of insert() and erase() operations. When no elements
+/// have been erased, the iteration order is the insertion order.
+///
+/// Compared to BitVector, SparseSet<unsigned> uses 8x-40x more memory, but
+/// offers constant-time clear() and size() operations as well as fast
+/// iteration independent on the size of the universe.
+///
+/// SparseSet contains a dense vector holding all the objects and a sparse
+/// array holding indexes into the dense vector. Most of the memory is used by
+/// the sparse array which is the size of the key universe. The SparseT
+/// template parameter provides a space/speed tradeoff for sets holding many
+/// elements.
+///
+/// When SparseT is uint32_t, find() only touches 2 cache lines, but the sparse
+/// array uses 4 x Universe bytes.
+///
+/// When SparseT is uint8_t (the default), find() touches up to 2+[N/256] cache
+/// lines, but the sparse array is 4x smaller. N is the number of elements in
+/// the set.
+///
+/// For sets that may grow to thousands of elements, SparseT should be set to
+/// uint16_t or uint32_t.
+///
+/// @tparam ValueT The type of objects in the set.
+/// @tparam KeyFunctorT A functor that computes an unsigned index from KeyT.
+/// @tparam SparseT An unsigned integer type. See above.
+///
+template<typename ValueT,
+ typename KeyFunctorT = identity<unsigned>,
+ typename SparseT = uint8_t>
+class SparseSet {
+ static_assert(std::numeric_limits<SparseT>::is_integer &&
+ !std::numeric_limits<SparseT>::is_signed,
+ "SparseT must be an unsigned integer type");
+
+ using KeyT = typename KeyFunctorT::argument_type;
+ using DenseT = SmallVector<ValueT, 8>;
+ using size_type = unsigned;
+ DenseT Dense;
+ SparseT *Sparse = nullptr;
+ unsigned Universe = 0;
+ KeyFunctorT KeyIndexOf;
+ SparseSetValFunctor<KeyT, ValueT, KeyFunctorT> ValIndexOf;
+
+public:
+ using value_type = ValueT;
+ using reference = ValueT &;
+ using const_reference = const ValueT &;
+ using pointer = ValueT *;
+ using const_pointer = const ValueT *;
+
+ SparseSet() = default;
+ SparseSet(const SparseSet &) = delete;
+ SparseSet &operator=(const SparseSet &) = delete;
+ ~SparseSet() { free(Sparse); }
+
+ /// setUniverse - Set the universe size which determines the largest key the
+ /// set can hold. The universe must be sized before any elements can be
+ /// added.
+ ///
+ /// @param U Universe size. All object keys must be less than U.
+ ///
+ void setUniverse(unsigned U) {
+ // It's not hard to resize the universe on a non-empty set, but it doesn't
+ // seem like a likely use case, so we can add that code when we need it.
+ assert(empty() && "Can only resize universe on an empty map");
+ // Hysteresis prevents needless reallocations.
+ if (U >= Universe/4 && U <= Universe)
+ return;
+ free(Sparse);
+ // The Sparse array doesn't actually need to be initialized, so malloc
+ // would be enough here, but that will cause tools like valgrind to
+ // complain about branching on uninitialized data.
+ Sparse = static_cast<SparseT*>(safe_calloc(U, sizeof(SparseT)));
+ Universe = U;
+ }
+
+ // Import trivial vector stuff from DenseT.
+ using iterator = typename DenseT::iterator;
+ using const_iterator = typename DenseT::const_iterator;
+
+ const_iterator begin() const { return Dense.begin(); }
+ const_iterator end() const { return Dense.end(); }
+ iterator begin() { return Dense.begin(); }
+ iterator end() { return Dense.end(); }
+
+ /// empty - Returns true if the set is empty.
+ ///
+ /// This is not the same as BitVector::empty().
+ ///
+ bool empty() const { return Dense.empty(); }
+
+ /// size - Returns the number of elements in the set.
+ ///
+ /// This is not the same as BitVector::size() which returns the size of the
+ /// universe.
+ ///
+ size_type size() const { return Dense.size(); }
+
+ /// clear - Clears the set. This is a very fast constant time operation.
+ ///
+ void clear() {
+ // Sparse does not need to be cleared, see find().
+ Dense.clear();
+ }
+
+ /// findIndex - Find an element by its index.
+ ///
+ /// @param Idx A valid index to find.
+ /// @returns An iterator to the element identified by key, or end().
+ ///
+ iterator findIndex(unsigned Idx) {
+ assert(Idx < Universe && "Key out of range");
+ const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u;
+ for (unsigned i = Sparse[Idx], e = size(); i < e; i += Stride) {
+ const unsigned FoundIdx = ValIndexOf(Dense[i]);
+ assert(FoundIdx < Universe && "Invalid key in set. Did object mutate?");
+ if (Idx == FoundIdx)
+ return begin() + i;
+ // Stride is 0 when SparseT >= unsigned. We don't need to loop.
+ if (!Stride)
+ break;
+ }
+ return end();
+ }
+
+ /// find - Find an element by its key.
+ ///
+ /// @param Key A valid key to find.
+ /// @returns An iterator to the element identified by key, or end().
+ ///
+ iterator find(const KeyT &Key) {
+ return findIndex(KeyIndexOf(Key));
+ }
+
+ const_iterator find(const KeyT &Key) const {
+ return const_cast<SparseSet*>(this)->findIndex(KeyIndexOf(Key));
+ }
+
+ /// Check if the set contains the given \c Key.
+ ///
+ /// @param Key A valid key to find.
+ bool contains(const KeyT &Key) const { return find(Key) == end() ? 0 : 1; }
+
+ /// count - Returns 1 if this set contains an element identified by Key,
+ /// 0 otherwise.
+ ///
+ size_type count(const KeyT &Key) const { return contains(Key) ? 1 : 0; }
+
+ /// insert - Attempts to insert a new element.
+ ///
+ /// If Val is successfully inserted, return (I, true), where I is an iterator
+ /// pointing to the newly inserted element.
+ ///
+ /// If the set already contains an element with the same key as Val, return
+ /// (I, false), where I is an iterator pointing to the existing element.
+ ///
+ /// Insertion invalidates all iterators.
+ ///
+ std::pair<iterator, bool> insert(const ValueT &Val) {
+ unsigned Idx = ValIndexOf(Val);
+ iterator I = findIndex(Idx);
+ if (I != end())
+ return std::make_pair(I, false);
+ Sparse[Idx] = size();
+ Dense.push_back(Val);
+ return std::make_pair(end() - 1, true);
+ }
+
+ /// array subscript - If an element already exists with this key, return it.
+ /// Otherwise, automatically construct a new value from Key, insert it,
+ /// and return the newly inserted element.
+ ValueT &operator[](const KeyT &Key) {
+ return *insert(ValueT(Key)).first;
+ }
+
+ ValueT pop_back_val() {
+ // Sparse does not need to be cleared, see find().
+ return Dense.pop_back_val();
+ }
+
+ /// erase - Erases an existing element identified by a valid iterator.
+ ///
+ /// This invalidates all iterators, but erase() returns an iterator pointing
+ /// to the next element. This makes it possible to erase selected elements
+ /// while iterating over the set:
+ ///
+ /// for (SparseSet::iterator I = Set.begin(); I != Set.end();)
+ /// if (test(*I))
+ /// I = Set.erase(I);
+ /// else
+ /// ++I;
+ ///
+ /// Note that end() changes when elements are erased, unlike std::list.
+ ///
+ iterator erase(iterator I) {
+ assert(unsigned(I - begin()) < size() && "Invalid iterator");
+ if (I != end() - 1) {
+ *I = Dense.back();
+ unsigned BackIdx = ValIndexOf(Dense.back());
+ assert(BackIdx < Universe && "Invalid key in set. Did object mutate?");
+ Sparse[BackIdx] = I - begin();
+ }
+ // This depends on SmallVector::pop_back() not invalidating iterators.
+ // std::vector::pop_back() doesn't give that guarantee.
+ Dense.pop_back();
+ return I;
+ }
+
+ /// erase - Erases an element identified by Key, if it exists.
+ ///
+ /// @param Key The key identifying the element to erase.
+ /// @returns True when an element was erased, false if no element was found.
+ ///
+ bool erase(const KeyT &Key) {
+ iterator I = find(Key);
+ if (I == end())
+ return false;
+ erase(I);
+ return true;
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SPARSESET_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/Statistic.h b/contrib/libs/llvm14/include/llvm/ADT/Statistic.h
new file mode 100644
index 0000000000..a10666757a
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/Statistic.h
@@ -0,0 +1,233 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- llvm/ADT/Statistic.h - Easy way to expose stats ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the 'Statistic' class, which is designed to be an easy way
+/// to expose various metrics from passes. These statistics are printed at the
+/// end of a run (from llvm_shutdown), when the -stats command line option is
+/// passed on the command line.
+///
+/// This is useful for reporting information like the number of instructions
+/// simplified, optimized or removed by various transformations, like this:
+///
+/// static Statistic NumInstsKilled("gcse", "Number of instructions killed");
+///
+/// Later, in the code: ++NumInstsKilled;
+///
+/// NOTE: Statistics *must* be declared as global variables.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STATISTIC_H
+#define LLVM_ADT_STATISTIC_H
+
+#include "llvm/Config/llvm-config.h"
+#include "llvm/Support/Compiler.h"
+#include <atomic>
+#include <memory>
+#include <vector>
+
+// Determine whether statistics should be enabled. We must do it here rather
+// than in CMake because multi-config generators cannot determine this at
+// configure time.
+#if !defined(NDEBUG) || LLVM_FORCE_ENABLE_STATS
+#define LLVM_ENABLE_STATS 1
+#else
+#define LLVM_ENABLE_STATS 0
+#endif
+
+namespace llvm {
+
+class raw_ostream;
+class raw_fd_ostream;
+class StringRef;
+
+class TrackingStatistic {
+public:
+ const char *const DebugType;
+ const char *const Name;
+ const char *const Desc;
+
+ std::atomic<unsigned> Value;
+ std::atomic<bool> Initialized;
+
+ constexpr TrackingStatistic(const char *DebugType, const char *Name,
+ const char *Desc)
+ : DebugType(DebugType), Name(Name), Desc(Desc), Value(0),
+ Initialized(false) {}
+
+ const char *getDebugType() const { return DebugType; }
+ const char *getName() const { return Name; }
+ const char *getDesc() const { return Desc; }
+
+ unsigned getValue() const { return Value.load(std::memory_order_relaxed); }
+
+ // Allow use of this class as the value itself.
+ operator unsigned() const { return getValue(); }
+
+ const TrackingStatistic &operator=(unsigned Val) {
+ Value.store(Val, std::memory_order_relaxed);
+ return init();
+ }
+
+ const TrackingStatistic &operator++() {
+ Value.fetch_add(1, std::memory_order_relaxed);
+ return init();
+ }
+
+ unsigned operator++(int) {
+ init();
+ return Value.fetch_add(1, std::memory_order_relaxed);
+ }
+
+ const TrackingStatistic &operator--() {
+ Value.fetch_sub(1, std::memory_order_relaxed);
+ return init();
+ }
+
+ unsigned operator--(int) {
+ init();
+ return Value.fetch_sub(1, std::memory_order_relaxed);
+ }
+
+ const TrackingStatistic &operator+=(unsigned V) {
+ if (V == 0)
+ return *this;
+ Value.fetch_add(V, std::memory_order_relaxed);
+ return init();
+ }
+
+ const TrackingStatistic &operator-=(unsigned V) {
+ if (V == 0)
+ return *this;
+ Value.fetch_sub(V, std::memory_order_relaxed);
+ return init();
+ }
+
+ void updateMax(unsigned V) {
+ unsigned PrevMax = Value.load(std::memory_order_relaxed);
+ // Keep trying to update max until we succeed or another thread produces
+ // a bigger max than us.
+ while (V > PrevMax && !Value.compare_exchange_weak(
+ PrevMax, V, std::memory_order_relaxed)) {
+ }
+ init();
+ }
+
+protected:
+ TrackingStatistic &init() {
+ if (!Initialized.load(std::memory_order_acquire))
+ RegisterStatistic();
+ return *this;
+ }
+
+ void RegisterStatistic();
+};
+
+class NoopStatistic {
+public:
+ NoopStatistic(const char * /*DebugType*/, const char * /*Name*/,
+ const char * /*Desc*/) {}
+
+ unsigned getValue() const { return 0; }
+
+ // Allow use of this class as the value itself.
+ operator unsigned() const { return 0; }
+
+ const NoopStatistic &operator=(unsigned Val) { return *this; }
+
+ const NoopStatistic &operator++() { return *this; }
+
+ unsigned operator++(int) { return 0; }
+
+ const NoopStatistic &operator--() { return *this; }
+
+ unsigned operator--(int) { return 0; }
+
+ const NoopStatistic &operator+=(const unsigned &V) { return *this; }
+
+ const NoopStatistic &operator-=(const unsigned &V) { return *this; }
+
+ void updateMax(unsigned V) {}
+};
+
+#if LLVM_ENABLE_STATS
+using Statistic = TrackingStatistic;
+#else
+using Statistic = NoopStatistic;
+#endif
+
+// STATISTIC - A macro to make definition of statistics really simple. This
+// automatically passes the DEBUG_TYPE of the file into the statistic.
+#define STATISTIC(VARNAME, DESC) \
+ static llvm::Statistic VARNAME = {DEBUG_TYPE, #VARNAME, DESC}
+
+// ALWAYS_ENABLED_STATISTIC - A macro to define a statistic like STATISTIC but
+// it is enabled even if LLVM_ENABLE_STATS is off.
+#define ALWAYS_ENABLED_STATISTIC(VARNAME, DESC) \
+ static llvm::TrackingStatistic VARNAME = {DEBUG_TYPE, #VARNAME, DESC}
+
+/// Enable the collection and printing of statistics.
+void EnableStatistics(bool DoPrintOnExit = true);
+
+/// Check if statistics are enabled.
+bool AreStatisticsEnabled();
+
+/// Return a file stream to print our output on.
+std::unique_ptr<raw_fd_ostream> CreateInfoOutputFile();
+
+/// Print statistics to the file returned by CreateInfoOutputFile().
+void PrintStatistics();
+
+/// Print statistics to the given output stream.
+void PrintStatistics(raw_ostream &OS);
+
+/// Print statistics in JSON format. This does include all global timers (\see
+/// Timer, TimerGroup). Note that the timers are cleared after printing and will
+/// not be printed in human readable form or in a second call of
+/// PrintStatisticsJSON().
+void PrintStatisticsJSON(raw_ostream &OS);
+
+/// Get the statistics. This can be used to look up the value of
+/// statistics without needing to parse JSON.
+///
+/// This function does not prevent statistics being updated by other threads
+/// during it's execution. It will return the value at the point that it is
+/// read. However, it will prevent new statistics from registering until it
+/// completes.
+const std::vector<std::pair<StringRef, unsigned>> GetStatistics();
+
+/// Reset the statistics. This can be used to zero and de-register the
+/// statistics in order to measure a compilation.
+///
+/// When this function begins to call destructors prior to returning, all
+/// statistics will be zero and unregistered. However, that might not remain the
+/// case by the time this function finishes returning. Whether update from other
+/// threads are lost or merely deferred until during the function return is
+/// timing sensitive.
+///
+/// Callers who intend to use this to measure statistics for a single
+/// compilation should ensure that no compilations are in progress at the point
+/// this function is called and that only one compilation executes until calling
+/// GetStatistics().
+void ResetStatistics();
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STATISTIC_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/StringExtras.h b/contrib/libs/llvm14/include/llvm/ADT/StringExtras.h
new file mode 100644
index 0000000000..8de07bf2d2
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/StringExtras.h
@@ -0,0 +1,612 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/StringExtras.h - Useful string functions --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains some functions that are useful when dealing with strings.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STRINGEXTRAS_H
+#define LLVM_ADT_STRINGEXTRAS_H
+
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <iterator>
+#include <string>
+#include <utility>
+
+namespace llvm {
+
+class raw_ostream;
+
+/// hexdigit - Return the hexadecimal character for the
+/// given number \p X (which should be less than 16).
+inline char hexdigit(unsigned X, bool LowerCase = false) {
+ assert(X < 16);
+ static const char LUT[] = "0123456789ABCDEF";
+ const uint8_t Offset = LowerCase ? 32 : 0;
+ return LUT[X] | Offset;
+}
+
+/// Given an array of c-style strings terminated by a null pointer, construct
+/// a vector of StringRefs representing the same strings without the terminating
+/// null string.
+inline std::vector<StringRef> toStringRefArray(const char *const *Strings) {
+ std::vector<StringRef> Result;
+ while (*Strings)
+ Result.push_back(*Strings++);
+ return Result;
+}
+
+/// Construct a string ref from a boolean.
+inline StringRef toStringRef(bool B) { return StringRef(B ? "true" : "false"); }
+
+/// Construct a string ref from an array ref of unsigned chars.
+inline StringRef toStringRef(ArrayRef<uint8_t> Input) {
+ return StringRef(reinterpret_cast<const char *>(Input.begin()), Input.size());
+}
+
+/// Construct a string ref from an array ref of unsigned chars.
+inline ArrayRef<uint8_t> arrayRefFromStringRef(StringRef Input) {
+ return {Input.bytes_begin(), Input.bytes_end()};
+}
+
+/// Interpret the given character \p C as a hexadecimal digit and return its
+/// value.
+///
+/// If \p C is not a valid hex digit, -1U is returned.
+inline unsigned hexDigitValue(char C) {
+ /* clang-format off */
+ static const int16_t LUT[256] = {
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, // '0'..'9'
+ -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 'A'..'F'
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 'a'..'f'
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ };
+ /* clang-format on */
+ return LUT[static_cast<unsigned char>(C)];
+}
+
+/// Checks if character \p C is one of the 10 decimal digits.
+inline bool isDigit(char C) { return C >= '0' && C <= '9'; }
+
+/// Checks if character \p C is a hexadecimal numeric character.
+inline bool isHexDigit(char C) { return hexDigitValue(C) != ~0U; }
+
+/// Checks if character \p C is a valid letter as classified by "C" locale.
+inline bool isAlpha(char C) {
+ return ('a' <= C && C <= 'z') || ('A' <= C && C <= 'Z');
+}
+
+/// Checks whether character \p C is either a decimal digit or an uppercase or
+/// lowercase letter as classified by "C" locale.
+inline bool isAlnum(char C) { return isAlpha(C) || isDigit(C); }
+
+/// Checks whether character \p C is valid ASCII (high bit is zero).
+inline bool isASCII(char C) { return static_cast<unsigned char>(C) <= 127; }
+
+/// Checks whether all characters in S are ASCII.
+inline bool isASCII(llvm::StringRef S) {
+ for (char C : S)
+ if (LLVM_UNLIKELY(!isASCII(C)))
+ return false;
+ return true;
+}
+
+/// Checks whether character \p C is printable.
+///
+/// Locale-independent version of the C standard library isprint whose results
+/// may differ on different platforms.
+inline bool isPrint(char C) {
+ unsigned char UC = static_cast<unsigned char>(C);
+ return (0x20 <= UC) && (UC <= 0x7E);
+}
+
+/// Checks whether character \p C is whitespace in the "C" locale.
+///
+/// Locale-independent version of the C standard library isspace.
+inline bool isSpace(char C) {
+ return C == ' ' || C == '\f' || C == '\n' || C == '\r' || C == '\t' ||
+ C == '\v';
+}
+
+/// Returns the corresponding lowercase character if \p x is uppercase.
+inline char toLower(char x) {
+ if (x >= 'A' && x <= 'Z')
+ return x - 'A' + 'a';
+ return x;
+}
+
+/// Returns the corresponding uppercase character if \p x is lowercase.
+inline char toUpper(char x) {
+ if (x >= 'a' && x <= 'z')
+ return x - 'a' + 'A';
+ return x;
+}
+
+inline std::string utohexstr(uint64_t X, bool LowerCase = false,
+ unsigned Width = 0) {
+ char Buffer[17];
+ char *BufPtr = std::end(Buffer);
+
+ if (X == 0) *--BufPtr = '0';
+
+ for (unsigned i = 0; Width ? (i < Width) : X; ++i) {
+ unsigned char Mod = static_cast<unsigned char>(X) & 15;
+ *--BufPtr = hexdigit(Mod, LowerCase);
+ X >>= 4;
+ }
+
+ return std::string(BufPtr, std::end(Buffer));
+}
+
+/// Convert buffer \p Input to its hexadecimal representation.
+/// The returned string is double the size of \p Input.
+inline void toHex(ArrayRef<uint8_t> Input, bool LowerCase,
+ SmallVectorImpl<char> &Output) {
+ const size_t Length = Input.size();
+ Output.resize_for_overwrite(Length * 2);
+
+ for (size_t i = 0; i < Length; i++) {
+ const uint8_t c = Input[i];
+ Output[i * 2 ] = hexdigit(c >> 4, LowerCase);
+ Output[i * 2 + 1] = hexdigit(c & 15, LowerCase);
+ }
+}
+
+inline std::string toHex(ArrayRef<uint8_t> Input, bool LowerCase = false) {
+ SmallString<16> Output;
+ toHex(Input, LowerCase, Output);
+ return std::string(Output);
+}
+
+inline std::string toHex(StringRef Input, bool LowerCase = false) {
+ return toHex(arrayRefFromStringRef(Input), LowerCase);
+}
+
+/// Store the binary representation of the two provided values, \p MSB and
+/// \p LSB, that make up the nibbles of a hexadecimal digit. If \p MSB or \p LSB
+/// do not correspond to proper nibbles of a hexadecimal digit, this method
+/// returns false. Otherwise, returns true.
+inline bool tryGetHexFromNibbles(char MSB, char LSB, uint8_t &Hex) {
+ unsigned U1 = hexDigitValue(MSB);
+ unsigned U2 = hexDigitValue(LSB);
+ if (U1 == ~0U || U2 == ~0U)
+ return false;
+
+ Hex = static_cast<uint8_t>((U1 << 4) | U2);
+ return true;
+}
+
+/// Return the binary representation of the two provided values, \p MSB and
+/// \p LSB, that make up the nibbles of a hexadecimal digit.
+inline uint8_t hexFromNibbles(char MSB, char LSB) {
+ uint8_t Hex = 0;
+ bool GotHex = tryGetHexFromNibbles(MSB, LSB, Hex);
+ (void)GotHex;
+ assert(GotHex && "MSB and/or LSB do not correspond to hex digits");
+ return Hex;
+}
+
+/// Convert hexadecimal string \p Input to its binary representation and store
+/// the result in \p Output. Returns true if the binary representation could be
+/// converted from the hexadecimal string. Returns false if \p Input contains
+/// non-hexadecimal digits. The output string is half the size of \p Input.
+inline bool tryGetFromHex(StringRef Input, std::string &Output) {
+ if (Input.empty())
+ return true;
+
+ // If the input string is not properly aligned on 2 nibbles we pad out the
+ // front with a 0 prefix; e.g. `ABC` -> `0ABC`.
+ Output.resize((Input.size() + 1) / 2);
+ char *OutputPtr = const_cast<char *>(Output.data());
+ if (Input.size() % 2 == 1) {
+ uint8_t Hex = 0;
+ if (!tryGetHexFromNibbles('0', Input.front(), Hex))
+ return false;
+ *OutputPtr++ = Hex;
+ Input = Input.drop_front();
+ }
+
+ // Convert the nibble pairs (e.g. `9C`) into bytes (0x9C).
+ // With the padding above we know the input is aligned and the output expects
+ // exactly half as many bytes as nibbles in the input.
+ size_t InputSize = Input.size();
+ assert(InputSize % 2 == 0);
+ const char *InputPtr = Input.data();
+ for (size_t OutputIndex = 0; OutputIndex < InputSize / 2; ++OutputIndex) {
+ uint8_t Hex = 0;
+ if (!tryGetHexFromNibbles(InputPtr[OutputIndex * 2 + 0], // MSB
+ InputPtr[OutputIndex * 2 + 1], // LSB
+ Hex))
+ return false;
+ OutputPtr[OutputIndex] = Hex;
+ }
+ return true;
+}
+
+/// Convert hexadecimal string \p Input to its binary representation.
+/// The return string is half the size of \p Input.
+inline std::string fromHex(StringRef Input) {
+ std::string Hex;
+ bool GotHex = tryGetFromHex(Input, Hex);
+ (void)GotHex;
+ assert(GotHex && "Input contains non hex digits");
+ return Hex;
+}
+
+/// Convert the string \p S to an integer of the specified type using
+/// the radix \p Base. If \p Base is 0, auto-detects the radix.
+/// Returns true if the number was successfully converted, false otherwise.
+template <typename N> bool to_integer(StringRef S, N &Num, unsigned Base = 0) {
+ return !S.getAsInteger(Base, Num);
+}
+
+namespace detail {
+template <typename N>
+inline bool to_float(const Twine &T, N &Num, N (*StrTo)(const char *, char **)) {
+ SmallString<32> Storage;
+ StringRef S = T.toNullTerminatedStringRef(Storage);
+ char *End;
+ N Temp = StrTo(S.data(), &End);
+ if (*End != '\0')
+ return false;
+ Num = Temp;
+ return true;
+}
+}
+
+inline bool to_float(const Twine &T, float &Num) {
+ return detail::to_float(T, Num, strtof);
+}
+
+inline bool to_float(const Twine &T, double &Num) {
+ return detail::to_float(T, Num, strtod);
+}
+
+inline bool to_float(const Twine &T, long double &Num) {
+ return detail::to_float(T, Num, strtold);
+}
+
+inline std::string utostr(uint64_t X, bool isNeg = false) {
+ char Buffer[21];
+ char *BufPtr = std::end(Buffer);
+
+ if (X == 0) *--BufPtr = '0'; // Handle special case...
+
+ while (X) {
+ *--BufPtr = '0' + char(X % 10);
+ X /= 10;
+ }
+
+ if (isNeg) *--BufPtr = '-'; // Add negative sign...
+ return std::string(BufPtr, std::end(Buffer));
+}
+
+inline std::string itostr(int64_t X) {
+ if (X < 0)
+ return utostr(static_cast<uint64_t>(1) + ~static_cast<uint64_t>(X), true);
+ else
+ return utostr(static_cast<uint64_t>(X));
+}
+
+inline std::string toString(const APInt &I, unsigned Radix, bool Signed,
+ bool formatAsCLiteral = false) {
+ SmallString<40> S;
+ I.toString(S, Radix, Signed, formatAsCLiteral);
+ return std::string(S.str());
+}
+
+inline std::string toString(const APSInt &I, unsigned Radix) {
+ return toString(I, Radix, I.isSigned());
+}
+
+/// StrInStrNoCase - Portable version of strcasestr. Locates the first
+/// occurrence of string 's1' in string 's2', ignoring case. Returns
+/// the offset of s2 in s1 or npos if s2 cannot be found.
+StringRef::size_type StrInStrNoCase(StringRef s1, StringRef s2);
+
+/// getToken - This function extracts one token from source, ignoring any
+/// leading characters that appear in the Delimiters string, and ending the
+/// token at any of the characters that appear in the Delimiters string. If
+/// there are no tokens in the source string, an empty string is returned.
+/// The function returns a pair containing the extracted token and the
+/// remaining tail string.
+std::pair<StringRef, StringRef> getToken(StringRef Source,
+ StringRef Delimiters = " \t\n\v\f\r");
+
+/// SplitString - Split up the specified string according to the specified
+/// delimiters, appending the result fragments to the output list.
+void SplitString(StringRef Source,
+ SmallVectorImpl<StringRef> &OutFragments,
+ StringRef Delimiters = " \t\n\v\f\r");
+
+/// Returns the English suffix for an ordinal integer (-st, -nd, -rd, -th).
+inline StringRef getOrdinalSuffix(unsigned Val) {
+ // It is critically important that we do this perfectly for
+ // user-written sequences with over 100 elements.
+ switch (Val % 100) {
+ case 11:
+ case 12:
+ case 13:
+ return "th";
+ default:
+ switch (Val % 10) {
+ case 1: return "st";
+ case 2: return "nd";
+ case 3: return "rd";
+ default: return "th";
+ }
+ }
+}
+
+/// Print each character of the specified string, escaping it if it is not
+/// printable or if it is an escape char.
+void printEscapedString(StringRef Name, raw_ostream &Out);
+
+/// Print each character of the specified string, escaping HTML special
+/// characters.
+void printHTMLEscaped(StringRef String, raw_ostream &Out);
+
+/// printLowerCase - Print each character as lowercase if it is uppercase.
+void printLowerCase(StringRef String, raw_ostream &Out);
+
+/// Converts a string from camel-case to snake-case by replacing all uppercase
+/// letters with '_' followed by the letter in lowercase, except if the
+/// uppercase letter is the first character of the string.
+std::string convertToSnakeFromCamelCase(StringRef input);
+
+/// Converts a string from snake-case to camel-case by replacing all occurrences
+/// of '_' followed by a lowercase letter with the letter in uppercase.
+/// Optionally allow capitalization of the first letter (if it is a lowercase
+/// letter)
+std::string convertToCamelFromSnakeCase(StringRef input,
+ bool capitalizeFirst = false);
+
+namespace detail {
+
+template <typename IteratorT>
+inline std::string join_impl(IteratorT Begin, IteratorT End,
+ StringRef Separator, std::input_iterator_tag) {
+ std::string S;
+ if (Begin == End)
+ return S;
+
+ S += (*Begin);
+ while (++Begin != End) {
+ S += Separator;
+ S += (*Begin);
+ }
+ return S;
+}
+
+template <typename IteratorT>
+inline std::string join_impl(IteratorT Begin, IteratorT End,
+ StringRef Separator, std::forward_iterator_tag) {
+ std::string S;
+ if (Begin == End)
+ return S;
+
+ size_t Len = (std::distance(Begin, End) - 1) * Separator.size();
+ for (IteratorT I = Begin; I != End; ++I)
+ Len += (*I).size();
+ S.reserve(Len);
+ size_t PrevCapacity = S.capacity();
+ (void)PrevCapacity;
+ S += (*Begin);
+ while (++Begin != End) {
+ S += Separator;
+ S += (*Begin);
+ }
+ assert(PrevCapacity == S.capacity() && "String grew during building");
+ return S;
+}
+
+template <typename Sep>
+inline void join_items_impl(std::string &Result, Sep Separator) {}
+
+template <typename Sep, typename Arg>
+inline void join_items_impl(std::string &Result, Sep Separator,
+ const Arg &Item) {
+ Result += Item;
+}
+
+template <typename Sep, typename Arg1, typename... Args>
+inline void join_items_impl(std::string &Result, Sep Separator, const Arg1 &A1,
+ Args &&... Items) {
+ Result += A1;
+ Result += Separator;
+ join_items_impl(Result, Separator, std::forward<Args>(Items)...);
+}
+
+inline size_t join_one_item_size(char) { return 1; }
+inline size_t join_one_item_size(const char *S) { return S ? ::strlen(S) : 0; }
+
+template <typename T> inline size_t join_one_item_size(const T &Str) {
+ return Str.size();
+}
+
+inline size_t join_items_size() { return 0; }
+
+template <typename A1> inline size_t join_items_size(const A1 &A) {
+ return join_one_item_size(A);
+}
+template <typename A1, typename... Args>
+inline size_t join_items_size(const A1 &A, Args &&... Items) {
+ return join_one_item_size(A) + join_items_size(std::forward<Args>(Items)...);
+}
+
+} // end namespace detail
+
+/// Joins the strings in the range [Begin, End), adding Separator between
+/// the elements.
+template <typename IteratorT>
+inline std::string join(IteratorT Begin, IteratorT End, StringRef Separator) {
+ using tag = typename std::iterator_traits<IteratorT>::iterator_category;
+ return detail::join_impl(Begin, End, Separator, tag());
+}
+
+/// Joins the strings in the range [R.begin(), R.end()), adding Separator
+/// between the elements.
+template <typename Range>
+inline std::string join(Range &&R, StringRef Separator) {
+ return join(R.begin(), R.end(), Separator);
+}
+
+/// Joins the strings in the parameter pack \p Items, adding \p Separator
+/// between the elements. All arguments must be implicitly convertible to
+/// std::string, or there should be an overload of std::string::operator+=()
+/// that accepts the argument explicitly.
+template <typename Sep, typename... Args>
+inline std::string join_items(Sep Separator, Args &&... Items) {
+ std::string Result;
+ if (sizeof...(Items) == 0)
+ return Result;
+
+ size_t NS = detail::join_one_item_size(Separator);
+ size_t NI = detail::join_items_size(std::forward<Args>(Items)...);
+ Result.reserve(NI + (sizeof...(Items) - 1) * NS + 1);
+ detail::join_items_impl(Result, Separator, std::forward<Args>(Items)...);
+ return Result;
+}
+
+/// A helper class to return the specified delimiter string after the first
+/// invocation of operator StringRef(). Used to generate a comma-separated
+/// list from a loop like so:
+///
+/// \code
+/// ListSeparator LS;
+/// for (auto &I : C)
+/// OS << LS << I.getName();
+/// \end
+class ListSeparator {
+ bool First = true;
+ StringRef Separator;
+
+public:
+ ListSeparator(StringRef Separator = ", ") : Separator(Separator) {}
+ operator StringRef() {
+ if (First) {
+ First = false;
+ return {};
+ }
+ return Separator;
+ }
+};
+
+/// A forward iterator over partitions of string over a separator.
+class SplittingIterator
+ : public iterator_facade_base<SplittingIterator, std::forward_iterator_tag,
+ StringRef> {
+ char SeparatorStorage;
+ StringRef Current;
+ StringRef Next;
+ StringRef Separator;
+
+public:
+ SplittingIterator(StringRef Str, StringRef Separator)
+ : Next(Str), Separator(Separator) {
+ ++*this;
+ }
+
+ SplittingIterator(StringRef Str, char Separator)
+ : SeparatorStorage(Separator), Next(Str),
+ Separator(&SeparatorStorage, 1) {
+ ++*this;
+ }
+
+ SplittingIterator(const SplittingIterator &R)
+ : SeparatorStorage(R.SeparatorStorage), Current(R.Current), Next(R.Next),
+ Separator(R.Separator) {
+ if (R.Separator.data() == &R.SeparatorStorage)
+ Separator = StringRef(&SeparatorStorage, 1);
+ }
+
+ SplittingIterator &operator=(const SplittingIterator &R) {
+ if (this == &R)
+ return *this;
+
+ SeparatorStorage = R.SeparatorStorage;
+ Current = R.Current;
+ Next = R.Next;
+ Separator = R.Separator;
+ if (R.Separator.data() == &R.SeparatorStorage)
+ Separator = StringRef(&SeparatorStorage, 1);
+ return *this;
+ }
+
+ bool operator==(const SplittingIterator &R) const {
+ assert(Separator == R.Separator);
+ return Current.data() == R.Current.data();
+ }
+
+ const StringRef &operator*() const { return Current; }
+
+ StringRef &operator*() { return Current; }
+
+ SplittingIterator &operator++() {
+ std::tie(Current, Next) = Next.split(Separator);
+ return *this;
+ }
+};
+
+/// Split the specified string over a separator and return a range-compatible
+/// iterable over its partitions. Used to permit conveniently iterating
+/// over separated strings like so:
+///
+/// \code
+/// for (StringRef x : llvm::split("foo,bar,baz", ","))
+/// ...;
+/// \end
+///
+/// Note that the passed string must remain valid throuhgout lifetime
+/// of the iterators.
+inline iterator_range<SplittingIterator> split(StringRef Str, StringRef Separator) {
+ return {SplittingIterator(Str, Separator),
+ SplittingIterator(StringRef(), Separator)};
+}
+
+inline iterator_range<SplittingIterator> split(StringRef Str, char Separator) {
+ return {SplittingIterator(Str, Separator),
+ SplittingIterator(StringRef(), Separator)};
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STRINGEXTRAS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/StringMap.h b/contrib/libs/llvm14/include/llvm/ADT/StringMap.h
new file mode 100644
index 0000000000..52ed099d5d
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/StringMap.h
@@ -0,0 +1,499 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- StringMap.h - String Hash table map interface ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the StringMap class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STRINGMAP_H
+#define LLVM_ADT_STRINGMAP_H
+
+#include "llvm/ADT/StringMapEntry.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/Support/AllocatorBase.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <initializer_list>
+#include <iterator>
+
+namespace llvm {
+
+template <typename ValueTy> class StringMapConstIterator;
+template <typename ValueTy> class StringMapIterator;
+template <typename ValueTy> class StringMapKeyIterator;
+
+/// StringMapImpl - This is the base class of StringMap that is shared among
+/// all of its instantiations.
+class StringMapImpl {
+protected:
+ // Array of NumBuckets pointers to entries, null pointers are holes.
+ // TheTable[NumBuckets] contains a sentinel value for easy iteration. Followed
+ // by an array of the actual hash values as unsigned integers.
+ StringMapEntryBase **TheTable = nullptr;
+ unsigned NumBuckets = 0;
+ unsigned NumItems = 0;
+ unsigned NumTombstones = 0;
+ unsigned ItemSize;
+
+protected:
+ explicit StringMapImpl(unsigned itemSize) : ItemSize(itemSize) {}
+ StringMapImpl(StringMapImpl &&RHS)
+ : TheTable(RHS.TheTable), NumBuckets(RHS.NumBuckets),
+ NumItems(RHS.NumItems), NumTombstones(RHS.NumTombstones),
+ ItemSize(RHS.ItemSize) {
+ RHS.TheTable = nullptr;
+ RHS.NumBuckets = 0;
+ RHS.NumItems = 0;
+ RHS.NumTombstones = 0;
+ }
+
+ StringMapImpl(unsigned InitSize, unsigned ItemSize);
+ unsigned RehashTable(unsigned BucketNo = 0);
+
+ /// LookupBucketFor - Look up the bucket that the specified string should end
+ /// up in. If it already exists as a key in the map, the Item pointer for the
+ /// specified bucket will be non-null. Otherwise, it will be null. In either
+ /// case, the FullHashValue field of the bucket will be set to the hash value
+ /// of the string.
+ unsigned LookupBucketFor(StringRef Key);
+
+ /// FindKey - Look up the bucket that contains the specified key. If it exists
+ /// in the map, return the bucket number of the key. Otherwise return -1.
+ /// This does not modify the map.
+ int FindKey(StringRef Key) const;
+
+ /// RemoveKey - Remove the specified StringMapEntry from the table, but do not
+ /// delete it. This aborts if the value isn't in the table.
+ void RemoveKey(StringMapEntryBase *V);
+
+ /// RemoveKey - Remove the StringMapEntry for the specified key from the
+ /// table, returning it. If the key is not in the table, this returns null.
+ StringMapEntryBase *RemoveKey(StringRef Key);
+
+ /// Allocate the table with the specified number of buckets and otherwise
+ /// setup the map as empty.
+ void init(unsigned Size);
+
+public:
+ static constexpr uintptr_t TombstoneIntVal =
+ static_cast<uintptr_t>(-1)
+ << PointerLikeTypeTraits<StringMapEntryBase *>::NumLowBitsAvailable;
+
+ static StringMapEntryBase *getTombstoneVal() {
+ return reinterpret_cast<StringMapEntryBase *>(TombstoneIntVal);
+ }
+
+ unsigned getNumBuckets() const { return NumBuckets; }
+ unsigned getNumItems() const { return NumItems; }
+
+ bool empty() const { return NumItems == 0; }
+ unsigned size() const { return NumItems; }
+
+ void swap(StringMapImpl &Other) {
+ std::swap(TheTable, Other.TheTable);
+ std::swap(NumBuckets, Other.NumBuckets);
+ std::swap(NumItems, Other.NumItems);
+ std::swap(NumTombstones, Other.NumTombstones);
+ }
+};
+
+/// StringMap - This is an unconventional map that is specialized for handling
+/// keys that are "strings", which are basically ranges of bytes. This does some
+/// funky memory allocation and hashing things to make it extremely efficient,
+/// storing the string data *after* the value in the map.
+template <typename ValueTy, typename AllocatorTy = MallocAllocator>
+class StringMap : public StringMapImpl {
+ AllocatorTy Allocator;
+
+public:
+ using MapEntryTy = StringMapEntry<ValueTy>;
+
+ StringMap() : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {}
+
+ explicit StringMap(unsigned InitialSize)
+ : StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))) {}
+
+ explicit StringMap(AllocatorTy A)
+ : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))), Allocator(A) {
+ }
+
+ StringMap(unsigned InitialSize, AllocatorTy A)
+ : StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))),
+ Allocator(A) {}
+
+ StringMap(std::initializer_list<std::pair<StringRef, ValueTy>> List)
+ : StringMapImpl(List.size(), static_cast<unsigned>(sizeof(MapEntryTy))) {
+ insert(List);
+ }
+
+ StringMap(StringMap &&RHS)
+ : StringMapImpl(std::move(RHS)), Allocator(std::move(RHS.Allocator)) {}
+
+ StringMap(const StringMap &RHS)
+ : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))),
+ Allocator(RHS.Allocator) {
+ if (RHS.empty())
+ return;
+
+ // Allocate TheTable of the same size as RHS's TheTable, and set the
+ // sentinel appropriately (and NumBuckets).
+ init(RHS.NumBuckets);
+ unsigned *HashTable = (unsigned *)(TheTable + NumBuckets + 1),
+ *RHSHashTable = (unsigned *)(RHS.TheTable + NumBuckets + 1);
+
+ NumItems = RHS.NumItems;
+ NumTombstones = RHS.NumTombstones;
+ for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
+ StringMapEntryBase *Bucket = RHS.TheTable[I];
+ if (!Bucket || Bucket == getTombstoneVal()) {
+ TheTable[I] = Bucket;
+ continue;
+ }
+
+ TheTable[I] = MapEntryTy::Create(
+ static_cast<MapEntryTy *>(Bucket)->getKey(), Allocator,
+ static_cast<MapEntryTy *>(Bucket)->getValue());
+ HashTable[I] = RHSHashTable[I];
+ }
+
+ // Note that here we've copied everything from the RHS into this object,
+ // tombstones included. We could, instead, have re-probed for each key to
+ // instantiate this new object without any tombstone buckets. The
+ // assumption here is that items are rarely deleted from most StringMaps,
+ // and so tombstones are rare, so the cost of re-probing for all inputs is
+ // not worthwhile.
+ }
+
+ StringMap &operator=(StringMap RHS) {
+ StringMapImpl::swap(RHS);
+ std::swap(Allocator, RHS.Allocator);
+ return *this;
+ }
+
+ ~StringMap() {
+ // Delete all the elements in the map, but don't reset the elements
+ // to default values. This is a copy of clear(), but avoids unnecessary
+ // work not required in the destructor.
+ if (!empty()) {
+ for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
+ StringMapEntryBase *Bucket = TheTable[I];
+ if (Bucket && Bucket != getTombstoneVal()) {
+ static_cast<MapEntryTy *>(Bucket)->Destroy(Allocator);
+ }
+ }
+ }
+ free(TheTable);
+ }
+
+ AllocatorTy &getAllocator() { return Allocator; }
+ const AllocatorTy &getAllocator() const { return Allocator; }
+
+ using key_type = const char *;
+ using mapped_type = ValueTy;
+ using value_type = StringMapEntry<ValueTy>;
+ using size_type = size_t;
+
+ using const_iterator = StringMapConstIterator<ValueTy>;
+ using iterator = StringMapIterator<ValueTy>;
+
+ iterator begin() { return iterator(TheTable, NumBuckets == 0); }
+ iterator end() { return iterator(TheTable + NumBuckets, true); }
+ const_iterator begin() const {
+ return const_iterator(TheTable, NumBuckets == 0);
+ }
+ const_iterator end() const {
+ return const_iterator(TheTable + NumBuckets, true);
+ }
+
+ iterator_range<StringMapKeyIterator<ValueTy>> keys() const {
+ return make_range(StringMapKeyIterator<ValueTy>(begin()),
+ StringMapKeyIterator<ValueTy>(end()));
+ }
+
+ iterator find(StringRef Key) {
+ int Bucket = FindKey(Key);
+ if (Bucket == -1)
+ return end();
+ return iterator(TheTable + Bucket, true);
+ }
+
+ const_iterator find(StringRef Key) const {
+ int Bucket = FindKey(Key);
+ if (Bucket == -1)
+ return end();
+ return const_iterator(TheTable + Bucket, true);
+ }
+
+ /// lookup - Return the entry for the specified key, or a default
+ /// constructed value if no such entry exists.
+ ValueTy lookup(StringRef Key) const {
+ const_iterator it = find(Key);
+ if (it != end())
+ return it->second;
+ return ValueTy();
+ }
+
+ /// Lookup the ValueTy for the \p Key, or create a default constructed value
+ /// if the key is not in the map.
+ ValueTy &operator[](StringRef Key) { return try_emplace(Key).first->second; }
+
+ /// count - Return 1 if the element is in the map, 0 otherwise.
+ size_type count(StringRef Key) const { return find(Key) == end() ? 0 : 1; }
+
+ template <typename InputTy>
+ size_type count(const StringMapEntry<InputTy> &MapEntry) const {
+ return count(MapEntry.getKey());
+ }
+
+ /// equal - check whether both of the containers are equal.
+ bool operator==(const StringMap &RHS) const {
+ if (size() != RHS.size())
+ return false;
+
+ for (const auto &KeyValue : *this) {
+ auto FindInRHS = RHS.find(KeyValue.getKey());
+
+ if (FindInRHS == RHS.end())
+ return false;
+
+ if (!(KeyValue.getValue() == FindInRHS->getValue()))
+ return false;
+ }
+
+ return true;
+ }
+
+ bool operator!=(const StringMap &RHS) const { return !(*this == RHS); }
+
+ /// insert - Insert the specified key/value pair into the map. If the key
+ /// already exists in the map, return false and ignore the request, otherwise
+ /// insert it and return true.
+ bool insert(MapEntryTy *KeyValue) {
+ unsigned BucketNo = LookupBucketFor(KeyValue->getKey());
+ StringMapEntryBase *&Bucket = TheTable[BucketNo];
+ if (Bucket && Bucket != getTombstoneVal())
+ return false; // Already exists in map.
+
+ if (Bucket == getTombstoneVal())
+ --NumTombstones;
+ Bucket = KeyValue;
+ ++NumItems;
+ assert(NumItems + NumTombstones <= NumBuckets);
+
+ RehashTable();
+ return true;
+ }
+
+ /// insert - Inserts the specified key/value pair into the map if the key
+ /// isn't already in the map. The bool component of the returned pair is true
+ /// if and only if the insertion takes place, and the iterator component of
+ /// the pair points to the element with key equivalent to the key of the pair.
+ std::pair<iterator, bool> insert(std::pair<StringRef, ValueTy> KV) {
+ return try_emplace(KV.first, std::move(KV.second));
+ }
+
+ /// Inserts elements from range [first, last). If multiple elements in the
+ /// range have keys that compare equivalent, it is unspecified which element
+ /// is inserted .
+ template <typename InputIt> void insert(InputIt First, InputIt Last) {
+ for (InputIt It = First; It != Last; ++It)
+ insert(*It);
+ }
+
+ /// Inserts elements from initializer list ilist. If multiple elements in
+ /// the range have keys that compare equivalent, it is unspecified which
+ /// element is inserted
+ void insert(std::initializer_list<std::pair<StringRef, ValueTy>> List) {
+ insert(List.begin(), List.end());
+ }
+
+ /// Inserts an element or assigns to the current element if the key already
+ /// exists. The return type is the same as try_emplace.
+ template <typename V>
+ std::pair<iterator, bool> insert_or_assign(StringRef Key, V &&Val) {
+ auto Ret = try_emplace(Key, std::forward<V>(Val));
+ if (!Ret.second)
+ Ret.first->second = std::forward<V>(Val);
+ return Ret;
+ }
+
+ /// Emplace a new element for the specified key into the map if the key isn't
+ /// already in the map. The bool component of the returned pair is true
+ /// if and only if the insertion takes place, and the iterator component of
+ /// the pair points to the element with key equivalent to the key of the pair.
+ template <typename... ArgsTy>
+ std::pair<iterator, bool> try_emplace(StringRef Key, ArgsTy &&... Args) {
+ unsigned BucketNo = LookupBucketFor(Key);
+ StringMapEntryBase *&Bucket = TheTable[BucketNo];
+ if (Bucket && Bucket != getTombstoneVal())
+ return std::make_pair(iterator(TheTable + BucketNo, false),
+ false); // Already exists in map.
+
+ if (Bucket == getTombstoneVal())
+ --NumTombstones;
+ Bucket = MapEntryTy::Create(Key, Allocator, std::forward<ArgsTy>(Args)...);
+ ++NumItems;
+ assert(NumItems + NumTombstones <= NumBuckets);
+
+ BucketNo = RehashTable(BucketNo);
+ return std::make_pair(iterator(TheTable + BucketNo, false), true);
+ }
+
+ // clear - Empties out the StringMap
+ void clear() {
+ if (empty())
+ return;
+
+ // Zap all values, resetting the keys back to non-present (not tombstone),
+ // which is safe because we're removing all elements.
+ for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
+ StringMapEntryBase *&Bucket = TheTable[I];
+ if (Bucket && Bucket != getTombstoneVal()) {
+ static_cast<MapEntryTy *>(Bucket)->Destroy(Allocator);
+ }
+ Bucket = nullptr;
+ }
+
+ NumItems = 0;
+ NumTombstones = 0;
+ }
+
+ /// remove - Remove the specified key/value pair from the map, but do not
+ /// erase it. This aborts if the key is not in the map.
+ void remove(MapEntryTy *KeyValue) { RemoveKey(KeyValue); }
+
+ void erase(iterator I) {
+ MapEntryTy &V = *I;
+ remove(&V);
+ V.Destroy(Allocator);
+ }
+
+ bool erase(StringRef Key) {
+ iterator I = find(Key);
+ if (I == end())
+ return false;
+ erase(I);
+ return true;
+ }
+};
+
+template <typename DerivedTy, typename ValueTy>
+class StringMapIterBase
+ : public iterator_facade_base<DerivedTy, std::forward_iterator_tag,
+ ValueTy> {
+protected:
+ StringMapEntryBase **Ptr = nullptr;
+
+public:
+ StringMapIterBase() = default;
+
+ explicit StringMapIterBase(StringMapEntryBase **Bucket,
+ bool NoAdvance = false)
+ : Ptr(Bucket) {
+ if (!NoAdvance)
+ AdvancePastEmptyBuckets();
+ }
+
+ DerivedTy &operator=(const DerivedTy &Other) {
+ Ptr = Other.Ptr;
+ return static_cast<DerivedTy &>(*this);
+ }
+
+ friend bool operator==(const DerivedTy &LHS, const DerivedTy &RHS) {
+ return LHS.Ptr == RHS.Ptr;
+ }
+
+ DerivedTy &operator++() { // Preincrement
+ ++Ptr;
+ AdvancePastEmptyBuckets();
+ return static_cast<DerivedTy &>(*this);
+ }
+
+ DerivedTy operator++(int) { // Post-increment
+ DerivedTy Tmp(Ptr);
+ ++*this;
+ return Tmp;
+ }
+
+private:
+ void AdvancePastEmptyBuckets() {
+ while (*Ptr == nullptr || *Ptr == StringMapImpl::getTombstoneVal())
+ ++Ptr;
+ }
+};
+
+template <typename ValueTy>
+class StringMapConstIterator
+ : public StringMapIterBase<StringMapConstIterator<ValueTy>,
+ const StringMapEntry<ValueTy>> {
+ using base = StringMapIterBase<StringMapConstIterator<ValueTy>,
+ const StringMapEntry<ValueTy>>;
+
+public:
+ StringMapConstIterator() = default;
+ explicit StringMapConstIterator(StringMapEntryBase **Bucket,
+ bool NoAdvance = false)
+ : base(Bucket, NoAdvance) {}
+
+ const StringMapEntry<ValueTy> &operator*() const {
+ return *static_cast<const StringMapEntry<ValueTy> *>(*this->Ptr);
+ }
+};
+
+template <typename ValueTy>
+class StringMapIterator : public StringMapIterBase<StringMapIterator<ValueTy>,
+ StringMapEntry<ValueTy>> {
+ using base =
+ StringMapIterBase<StringMapIterator<ValueTy>, StringMapEntry<ValueTy>>;
+
+public:
+ StringMapIterator() = default;
+ explicit StringMapIterator(StringMapEntryBase **Bucket,
+ bool NoAdvance = false)
+ : base(Bucket, NoAdvance) {}
+
+ StringMapEntry<ValueTy> &operator*() const {
+ return *static_cast<StringMapEntry<ValueTy> *>(*this->Ptr);
+ }
+
+ operator StringMapConstIterator<ValueTy>() const {
+ return StringMapConstIterator<ValueTy>(this->Ptr, true);
+ }
+};
+
+template <typename ValueTy>
+class StringMapKeyIterator
+ : public iterator_adaptor_base<StringMapKeyIterator<ValueTy>,
+ StringMapConstIterator<ValueTy>,
+ std::forward_iterator_tag, StringRef> {
+ using base = iterator_adaptor_base<StringMapKeyIterator<ValueTy>,
+ StringMapConstIterator<ValueTy>,
+ std::forward_iterator_tag, StringRef>;
+
+public:
+ StringMapKeyIterator() = default;
+ explicit StringMapKeyIterator(StringMapConstIterator<ValueTy> Iter)
+ : base(std::move(Iter)) {}
+
+ StringRef operator*() const { return this->wrapped()->getKey(); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STRINGMAP_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/StringMapEntry.h b/contrib/libs/llvm14/include/llvm/ADT/StringMapEntry.h
new file mode 100644
index 0000000000..78e3874702
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/StringMapEntry.h
@@ -0,0 +1,161 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- StringMapEntry.h - String Hash table map interface -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the StringMapEntry class - it is intended to be a low
+/// dependency implementation detail of StringMap that is more suitable for
+/// inclusion in public headers than StringMap.h itself is.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STRINGMAPENTRY_H
+#define LLVM_ADT_STRINGMAPENTRY_H
+
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
+
+namespace llvm {
+
+/// StringMapEntryBase - Shared base class of StringMapEntry instances.
+class StringMapEntryBase {
+ size_t keyLength;
+
+public:
+ explicit StringMapEntryBase(size_t keyLength) : keyLength(keyLength) {}
+
+ size_t getKeyLength() const { return keyLength; }
+
+protected:
+ /// Helper to tail-allocate \p Key. It'd be nice to generalize this so it
+ /// could be reused elsewhere, maybe even taking an llvm::function_ref to
+ /// type-erase the allocator and put it in a source file.
+ template <typename AllocatorTy>
+ static void *allocateWithKey(size_t EntrySize, size_t EntryAlign,
+ StringRef Key, AllocatorTy &Allocator);
+};
+
+// Define out-of-line to dissuade inlining.
+template <typename AllocatorTy>
+void *StringMapEntryBase::allocateWithKey(size_t EntrySize, size_t EntryAlign,
+ StringRef Key,
+ AllocatorTy &Allocator) {
+ size_t KeyLength = Key.size();
+
+ // Allocate a new item with space for the string at the end and a null
+ // terminator.
+ size_t AllocSize = EntrySize + KeyLength + 1;
+ void *Allocation = Allocator.Allocate(AllocSize, EntryAlign);
+ assert(Allocation && "Unhandled out-of-memory");
+
+ // Copy the string information.
+ char *Buffer = reinterpret_cast<char *>(Allocation) + EntrySize;
+ if (KeyLength > 0)
+ ::memcpy(Buffer, Key.data(), KeyLength);
+ Buffer[KeyLength] = 0; // Null terminate for convenience of clients.
+ return Allocation;
+}
+
+/// StringMapEntryStorage - Holds the value in a StringMapEntry.
+///
+/// Factored out into a separate base class to make it easier to specialize.
+/// This is primarily intended to support StringSet, which doesn't need a value
+/// stored at all.
+template <typename ValueTy>
+class StringMapEntryStorage : public StringMapEntryBase {
+public:
+ ValueTy second;
+
+ explicit StringMapEntryStorage(size_t keyLength)
+ : StringMapEntryBase(keyLength), second() {}
+ template <typename... InitTy>
+ StringMapEntryStorage(size_t keyLength, InitTy &&... initVals)
+ : StringMapEntryBase(keyLength),
+ second(std::forward<InitTy>(initVals)...) {}
+ StringMapEntryStorage(StringMapEntryStorage &e) = delete;
+
+ const ValueTy &getValue() const { return second; }
+ ValueTy &getValue() { return second; }
+
+ void setValue(const ValueTy &V) { second = V; }
+};
+
+template <> class StringMapEntryStorage<NoneType> : public StringMapEntryBase {
+public:
+ explicit StringMapEntryStorage(size_t keyLength, NoneType = None)
+ : StringMapEntryBase(keyLength) {}
+ StringMapEntryStorage(StringMapEntryStorage &entry) = delete;
+
+ NoneType getValue() const { return None; }
+};
+
+/// StringMapEntry - This is used to represent one value that is inserted into
+/// a StringMap. It contains the Value itself and the key: the string length
+/// and data.
+template <typename ValueTy>
+class StringMapEntry final : public StringMapEntryStorage<ValueTy> {
+public:
+ using StringMapEntryStorage<ValueTy>::StringMapEntryStorage;
+
+ StringRef getKey() const {
+ return StringRef(getKeyData(), this->getKeyLength());
+ }
+
+ /// getKeyData - Return the start of the string data that is the key for this
+ /// value. The string data is always stored immediately after the
+ /// StringMapEntry object.
+ const char *getKeyData() const {
+ return reinterpret_cast<const char *>(this + 1);
+ }
+
+ StringRef first() const {
+ return StringRef(getKeyData(), this->getKeyLength());
+ }
+
+ /// Create a StringMapEntry for the specified key construct the value using
+ /// \p InitiVals.
+ template <typename AllocatorTy, typename... InitTy>
+ static StringMapEntry *Create(StringRef key, AllocatorTy &allocator,
+ InitTy &&... initVals) {
+ return new (StringMapEntryBase::allocateWithKey(
+ sizeof(StringMapEntry), alignof(StringMapEntry), key, allocator))
+ StringMapEntry(key.size(), std::forward<InitTy>(initVals)...);
+ }
+
+ /// GetStringMapEntryFromKeyData - Given key data that is known to be embedded
+ /// into a StringMapEntry, return the StringMapEntry itself.
+ static StringMapEntry &GetStringMapEntryFromKeyData(const char *keyData) {
+ char *ptr = const_cast<char *>(keyData) - sizeof(StringMapEntry<ValueTy>);
+ return *reinterpret_cast<StringMapEntry *>(ptr);
+ }
+
+ /// Destroy - Destroy this StringMapEntry, releasing memory back to the
+ /// specified allocator.
+ template <typename AllocatorTy> void Destroy(AllocatorTy &allocator) {
+ // Free memory referenced by the item.
+ size_t AllocSize = sizeof(StringMapEntry) + this->getKeyLength() + 1;
+ this->~StringMapEntry();
+ allocator.Deallocate(static_cast<void *>(this), AllocSize,
+ alignof(StringMapEntry));
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STRINGMAPENTRY_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/StringRef.h b/contrib/libs/llvm14/include/llvm/ADT/StringRef.h
new file mode 100644
index 0000000000..87d136fec2
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/StringRef.h
@@ -0,0 +1,1006 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- StringRef.h - Constant String Reference Wrapper ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STRINGREF_H
+#define LLVM_ADT_STRINGREF_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/Compiler.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstring>
+#include <limits>
+#include <string>
+#if __cplusplus > 201402L
+#include <string_view>
+#endif
+#include <type_traits>
+#include <utility>
+
+// Declare the __builtin_strlen intrinsic for MSVC so it can be used in
+// constexpr context.
+#if defined(_MSC_VER)
+extern "C" size_t __builtin_strlen(const char *);
+#endif
+
+namespace llvm {
+
+ class APInt;
+ class hash_code;
+ template <typename T> class SmallVectorImpl;
+ class StringRef;
+
+ /// Helper functions for StringRef::getAsInteger.
+ bool getAsUnsignedInteger(StringRef Str, unsigned Radix,
+ unsigned long long &Result);
+
+ bool getAsSignedInteger(StringRef Str, unsigned Radix, long long &Result);
+
+ bool consumeUnsignedInteger(StringRef &Str, unsigned Radix,
+ unsigned long long &Result);
+ bool consumeSignedInteger(StringRef &Str, unsigned Radix, long long &Result);
+
+ /// StringRef - Represent a constant reference to a string, i.e. a character
+ /// array and a length, which need not be null terminated.
+ ///
+ /// This class does not own the string data, it is expected to be used in
+ /// situations where the character data resides in some other buffer, whose
+ /// lifetime extends past that of the StringRef. For this reason, it is not in
+ /// general safe to store a StringRef.
+ class LLVM_GSL_POINTER StringRef {
+ public:
+ static constexpr size_t npos = ~size_t(0);
+
+ using iterator = const char *;
+ using const_iterator = const char *;
+ using size_type = size_t;
+
+ private:
+ /// The start of the string, in an external buffer.
+ const char *Data = nullptr;
+
+ /// The length of the string.
+ size_t Length = 0;
+
+ // Workaround memcmp issue with null pointers (undefined behavior)
+ // by providing a specialized version
+ static int compareMemory(const char *Lhs, const char *Rhs, size_t Length) {
+ if (Length == 0) { return 0; }
+ return ::memcmp(Lhs,Rhs,Length);
+ }
+
+ // Constexpr version of std::strlen.
+ static constexpr size_t strLen(const char *Str) {
+#if __cplusplus > 201402L
+ return std::char_traits<char>::length(Str);
+#elif __has_builtin(__builtin_strlen) || defined(__GNUC__) || \
+ (defined(_MSC_VER) && _MSC_VER >= 1916)
+ return __builtin_strlen(Str);
+#else
+ const char *Begin = Str;
+ while (*Str != '\0')
+ ++Str;
+ return Str - Begin;
+#endif
+ }
+
+ public:
+ /// @name Constructors
+ /// @{
+
+ /// Construct an empty string ref.
+ /*implicit*/ StringRef() = default;
+
+ /// Disable conversion from nullptr. This prevents things like
+ /// if (S == nullptr)
+ StringRef(std::nullptr_t) = delete;
+
+ /// Construct a string ref from a cstring.
+ /*implicit*/ constexpr StringRef(const char *Str)
+ : Data(Str), Length(Str ? strLen(Str) : 0) {}
+
+ /// Construct a string ref from a pointer and length.
+ /*implicit*/ constexpr StringRef(const char *data, size_t length)
+ : Data(data), Length(length) {}
+
+ /// Construct a string ref from an std::string.
+ /*implicit*/ StringRef(const std::string &Str)
+ : Data(Str.data()), Length(Str.length()) {}
+
+#if __cplusplus > 201402L
+ /// Construct a string ref from an std::string_view.
+ /*implicit*/ constexpr StringRef(std::string_view Str)
+ : Data(Str.data()), Length(Str.size()) {}
+#endif
+
+ /// @}
+ /// @name Iterators
+ /// @{
+
+ iterator begin() const { return Data; }
+
+ iterator end() const { return Data + Length; }
+
+ const unsigned char *bytes_begin() const {
+ return reinterpret_cast<const unsigned char *>(begin());
+ }
+ const unsigned char *bytes_end() const {
+ return reinterpret_cast<const unsigned char *>(end());
+ }
+ iterator_range<const unsigned char *> bytes() const {
+ return make_range(bytes_begin(), bytes_end());
+ }
+
+ /// @}
+ /// @name String Operations
+ /// @{
+
+ /// data - Get a pointer to the start of the string (which may not be null
+ /// terminated).
+ LLVM_NODISCARD
+ const char *data() const { return Data; }
+
+ /// empty - Check if the string is empty.
+ LLVM_NODISCARD
+ constexpr bool empty() const { return Length == 0; }
+
+ /// size - Get the string size.
+ LLVM_NODISCARD
+ constexpr size_t size() const { return Length; }
+
+ /// front - Get the first character in the string.
+ LLVM_NODISCARD
+ char front() const {
+ assert(!empty());
+ return Data[0];
+ }
+
+ /// back - Get the last character in the string.
+ LLVM_NODISCARD
+ char back() const {
+ assert(!empty());
+ return Data[Length-1];
+ }
+
+ // copy - Allocate copy in Allocator and return StringRef to it.
+ template <typename Allocator>
+ LLVM_NODISCARD StringRef copy(Allocator &A) const {
+ // Don't request a length 0 copy from the allocator.
+ if (empty())
+ return StringRef();
+ char *S = A.template Allocate<char>(Length);
+ std::copy(begin(), end(), S);
+ return StringRef(S, Length);
+ }
+
+ /// equals - Check for string equality, this is more efficient than
+ /// compare() when the relative ordering of inequal strings isn't needed.
+ LLVM_NODISCARD
+ bool equals(StringRef RHS) const {
+ return (Length == RHS.Length &&
+ compareMemory(Data, RHS.Data, RHS.Length) == 0);
+ }
+
+ /// Check for string equality, ignoring case.
+ LLVM_NODISCARD
+ bool equals_insensitive(StringRef RHS) const {
+ return Length == RHS.Length && compare_insensitive(RHS) == 0;
+ }
+
+ /// compare - Compare two strings; the result is -1, 0, or 1 if this string
+ /// is lexicographically less than, equal to, or greater than the \p RHS.
+ LLVM_NODISCARD
+ int compare(StringRef RHS) const {
+ // Check the prefix for a mismatch.
+ if (int Res = compareMemory(Data, RHS.Data, std::min(Length, RHS.Length)))
+ return Res < 0 ? -1 : 1;
+
+ // Otherwise the prefixes match, so we only need to check the lengths.
+ if (Length == RHS.Length)
+ return 0;
+ return Length < RHS.Length ? -1 : 1;
+ }
+
+ /// Compare two strings, ignoring case.
+ LLVM_NODISCARD
+ int compare_insensitive(StringRef RHS) const;
+
+ /// compare_numeric - Compare two strings, treating sequences of digits as
+ /// numbers.
+ LLVM_NODISCARD
+ int compare_numeric(StringRef RHS) const;
+
+ /// Determine the edit distance between this string and another
+ /// string.
+ ///
+ /// \param Other the string to compare this string against.
+ ///
+ /// \param AllowReplacements whether to allow character
+ /// replacements (change one character into another) as a single
+ /// operation, rather than as two operations (an insertion and a
+ /// removal).
+ ///
+ /// \param MaxEditDistance If non-zero, the maximum edit distance that
+ /// this routine is allowed to compute. If the edit distance will exceed
+ /// that maximum, returns \c MaxEditDistance+1.
+ ///
+ /// \returns the minimum number of character insertions, removals,
+ /// or (if \p AllowReplacements is \c true) replacements needed to
+ /// transform one of the given strings into the other. If zero,
+ /// the strings are identical.
+ LLVM_NODISCARD
+ unsigned edit_distance(StringRef Other, bool AllowReplacements = true,
+ unsigned MaxEditDistance = 0) const;
+
+ /// str - Get the contents as an std::string.
+ LLVM_NODISCARD
+ std::string str() const {
+ if (!Data) return std::string();
+ return std::string(Data, Length);
+ }
+
+ /// @}
+ /// @name Operator Overloads
+ /// @{
+
+ LLVM_NODISCARD
+ char operator[](size_t Index) const {
+ assert(Index < Length && "Invalid index!");
+ return Data[Index];
+ }
+
+ /// Disallow accidental assignment from a temporary std::string.
+ ///
+ /// The declaration here is extra complicated so that `stringRef = {}`
+ /// and `stringRef = "abc"` continue to select the move assignment operator.
+ template <typename T>
+ std::enable_if_t<std::is_same<T, std::string>::value, StringRef> &
+ operator=(T &&Str) = delete;
+
+ /// @}
+ /// @name Type Conversions
+ /// @{
+
+ explicit operator std::string() const { return str(); }
+
+#if __cplusplus > 201402L
+ operator std::string_view() const {
+ return std::string_view(data(), size());
+ }
+#endif
+
+ /// @}
+ /// @name String Predicates
+ /// @{
+
+ /// Check if this string starts with the given \p Prefix.
+ LLVM_NODISCARD
+ bool startswith(StringRef Prefix) const {
+ return Length >= Prefix.Length &&
+ compareMemory(Data, Prefix.Data, Prefix.Length) == 0;
+ }
+
+ /// Check if this string starts with the given \p Prefix, ignoring case.
+ LLVM_NODISCARD
+ bool startswith_insensitive(StringRef Prefix) const;
+
+ /// Check if this string ends with the given \p Suffix.
+ LLVM_NODISCARD
+ bool endswith(StringRef Suffix) const {
+ return Length >= Suffix.Length &&
+ compareMemory(end() - Suffix.Length, Suffix.Data, Suffix.Length) == 0;
+ }
+
+ /// Check if this string ends with the given \p Suffix, ignoring case.
+ LLVM_NODISCARD
+ bool endswith_insensitive(StringRef Suffix) const;
+
+ /// @}
+ /// @name String Searching
+ /// @{
+
+ /// Search for the first character \p C in the string.
+ ///
+ /// \returns The index of the first occurrence of \p C, or npos if not
+ /// found.
+ LLVM_NODISCARD
+ size_t find(char C, size_t From = 0) const {
+ size_t FindBegin = std::min(From, Length);
+ if (FindBegin < Length) { // Avoid calling memchr with nullptr.
+ // Just forward to memchr, which is faster than a hand-rolled loop.
+ if (const void *P = ::memchr(Data + FindBegin, C, Length - FindBegin))
+ return static_cast<const char *>(P) - Data;
+ }
+ return npos;
+ }
+
+ /// Search for the first character \p C in the string, ignoring case.
+ ///
+ /// \returns The index of the first occurrence of \p C, or npos if not
+ /// found.
+ LLVM_NODISCARD
+ size_t find_insensitive(char C, size_t From = 0) const;
+
+ /// Search for the first character satisfying the predicate \p F
+ ///
+ /// \returns The index of the first character satisfying \p F starting from
+ /// \p From, or npos if not found.
+ LLVM_NODISCARD
+ size_t find_if(function_ref<bool(char)> F, size_t From = 0) const {
+ StringRef S = drop_front(From);
+ while (!S.empty()) {
+ if (F(S.front()))
+ return size() - S.size();
+ S = S.drop_front();
+ }
+ return npos;
+ }
+
+ /// Search for the first character not satisfying the predicate \p F
+ ///
+ /// \returns The index of the first character not satisfying \p F starting
+ /// from \p From, or npos if not found.
+ LLVM_NODISCARD
+ size_t find_if_not(function_ref<bool(char)> F, size_t From = 0) const {
+ return find_if([F](char c) { return !F(c); }, From);
+ }
+
+ /// Search for the first string \p Str in the string.
+ ///
+ /// \returns The index of the first occurrence of \p Str, or npos if not
+ /// found.
+ LLVM_NODISCARD
+ size_t find(StringRef Str, size_t From = 0) const;
+
+ /// Search for the first string \p Str in the string, ignoring case.
+ ///
+ /// \returns The index of the first occurrence of \p Str, or npos if not
+ /// found.
+ LLVM_NODISCARD
+ size_t find_insensitive(StringRef Str, size_t From = 0) const;
+
+ /// Search for the last character \p C in the string.
+ ///
+ /// \returns The index of the last occurrence of \p C, or npos if not
+ /// found.
+ LLVM_NODISCARD
+ size_t rfind(char C, size_t From = npos) const {
+ From = std::min(From, Length);
+ size_t i = From;
+ while (i != 0) {
+ --i;
+ if (Data[i] == C)
+ return i;
+ }
+ return npos;
+ }
+
+ /// Search for the last character \p C in the string, ignoring case.
+ ///
+ /// \returns The index of the last occurrence of \p C, or npos if not
+ /// found.
+ LLVM_NODISCARD
+ size_t rfind_insensitive(char C, size_t From = npos) const;
+
+ /// Search for the last string \p Str in the string.
+ ///
+ /// \returns The index of the last occurrence of \p Str, or npos if not
+ /// found.
+ LLVM_NODISCARD
+ size_t rfind(StringRef Str) const;
+
+ /// Search for the last string \p Str in the string, ignoring case.
+ ///
+ /// \returns The index of the last occurrence of \p Str, or npos if not
+ /// found.
+ LLVM_NODISCARD
+ size_t rfind_insensitive(StringRef Str) const;
+
+ /// Find the first character in the string that is \p C, or npos if not
+ /// found. Same as find.
+ LLVM_NODISCARD
+ size_t find_first_of(char C, size_t From = 0) const {
+ return find(C, From);
+ }
+
+ /// Find the first character in the string that is in \p Chars, or npos if
+ /// not found.
+ ///
+ /// Complexity: O(size() + Chars.size())
+ LLVM_NODISCARD
+ size_t find_first_of(StringRef Chars, size_t From = 0) const;
+
+ /// Find the first character in the string that is not \p C or npos if not
+ /// found.
+ LLVM_NODISCARD
+ size_t find_first_not_of(char C, size_t From = 0) const;
+
+ /// Find the first character in the string that is not in the string
+ /// \p Chars, or npos if not found.
+ ///
+ /// Complexity: O(size() + Chars.size())
+ LLVM_NODISCARD
+ size_t find_first_not_of(StringRef Chars, size_t From = 0) const;
+
+ /// Find the last character in the string that is \p C, or npos if not
+ /// found.
+ LLVM_NODISCARD
+ size_t find_last_of(char C, size_t From = npos) const {
+ return rfind(C, From);
+ }
+
+ /// Find the last character in the string that is in \p C, or npos if not
+ /// found.
+ ///
+ /// Complexity: O(size() + Chars.size())
+ LLVM_NODISCARD
+ size_t find_last_of(StringRef Chars, size_t From = npos) const;
+
+ /// Find the last character in the string that is not \p C, or npos if not
+ /// found.
+ LLVM_NODISCARD
+ size_t find_last_not_of(char C, size_t From = npos) const;
+
+ /// Find the last character in the string that is not in \p Chars, or
+ /// npos if not found.
+ ///
+ /// Complexity: O(size() + Chars.size())
+ LLVM_NODISCARD
+ size_t find_last_not_of(StringRef Chars, size_t From = npos) const;
+
+ /// Return true if the given string is a substring of *this, and false
+ /// otherwise.
+ LLVM_NODISCARD
+ bool contains(StringRef Other) const { return find(Other) != npos; }
+
+ /// Return true if the given character is contained in *this, and false
+ /// otherwise.
+ LLVM_NODISCARD
+ bool contains(char C) const { return find_first_of(C) != npos; }
+
+ /// Return true if the given string is a substring of *this, and false
+ /// otherwise.
+ LLVM_NODISCARD
+ bool contains_insensitive(StringRef Other) const {
+ return find_insensitive(Other) != npos;
+ }
+
+ /// Return true if the given character is contained in *this, and false
+ /// otherwise.
+ LLVM_NODISCARD
+ bool contains_insensitive(char C) const {
+ return find_insensitive(C) != npos;
+ }
+
+ /// @}
+ /// @name Helpful Algorithms
+ /// @{
+
+ /// Return the number of occurrences of \p C in the string.
+ LLVM_NODISCARD
+ size_t count(char C) const {
+ size_t Count = 0;
+ for (size_t i = 0, e = Length; i != e; ++i)
+ if (Data[i] == C)
+ ++Count;
+ return Count;
+ }
+
+ /// Return the number of non-overlapped occurrences of \p Str in
+ /// the string.
+ size_t count(StringRef Str) const;
+
+ /// Parse the current string as an integer of the specified radix. If
+ /// \p Radix is specified as zero, this does radix autosensing using
+ /// extended C rules: 0 is octal, 0x is hex, 0b is binary.
+ ///
+ /// If the string is invalid or if only a subset of the string is valid,
+ /// this returns true to signify the error. The string is considered
+ /// erroneous if empty or if it overflows T.
+ template <typename T>
+ std::enable_if_t<std::numeric_limits<T>::is_signed, bool>
+ getAsInteger(unsigned Radix, T &Result) const {
+ long long LLVal;
+ if (getAsSignedInteger(*this, Radix, LLVal) ||
+ static_cast<T>(LLVal) != LLVal)
+ return true;
+ Result = LLVal;
+ return false;
+ }
+
+ template <typename T>
+ std::enable_if_t<!std::numeric_limits<T>::is_signed, bool>
+ getAsInteger(unsigned Radix, T &Result) const {
+ unsigned long long ULLVal;
+ // The additional cast to unsigned long long is required to avoid the
+ // Visual C++ warning C4805: '!=' : unsafe mix of type 'bool' and type
+ // 'unsigned __int64' when instantiating getAsInteger with T = bool.
+ if (getAsUnsignedInteger(*this, Radix, ULLVal) ||
+ static_cast<unsigned long long>(static_cast<T>(ULLVal)) != ULLVal)
+ return true;
+ Result = ULLVal;
+ return false;
+ }
+
+ /// Parse the current string as an integer of the specified radix. If
+ /// \p Radix is specified as zero, this does radix autosensing using
+ /// extended C rules: 0 is octal, 0x is hex, 0b is binary.
+ ///
+ /// If the string does not begin with a number of the specified radix,
+ /// this returns true to signify the error. The string is considered
+ /// erroneous if empty or if it overflows T.
+ /// The portion of the string representing the discovered numeric value
+ /// is removed from the beginning of the string.
+ template <typename T>
+ std::enable_if_t<std::numeric_limits<T>::is_signed, bool>
+ consumeInteger(unsigned Radix, T &Result) {
+ long long LLVal;
+ if (consumeSignedInteger(*this, Radix, LLVal) ||
+ static_cast<long long>(static_cast<T>(LLVal)) != LLVal)
+ return true;
+ Result = LLVal;
+ return false;
+ }
+
+ template <typename T>
+ std::enable_if_t<!std::numeric_limits<T>::is_signed, bool>
+ consumeInteger(unsigned Radix, T &Result) {
+ unsigned long long ULLVal;
+ if (consumeUnsignedInteger(*this, Radix, ULLVal) ||
+ static_cast<unsigned long long>(static_cast<T>(ULLVal)) != ULLVal)
+ return true;
+ Result = ULLVal;
+ return false;
+ }
+
+ /// Parse the current string as an integer of the specified \p Radix, or of
+ /// an autosensed radix if the \p Radix given is 0. The current value in
+ /// \p Result is discarded, and the storage is changed to be wide enough to
+ /// store the parsed integer.
+ ///
+ /// \returns true if the string does not solely consist of a valid
+ /// non-empty number in the appropriate base.
+ ///
+ /// APInt::fromString is superficially similar but assumes the
+ /// string is well-formed in the given radix.
+ bool getAsInteger(unsigned Radix, APInt &Result) const;
+
+ /// Parse the current string as an IEEE double-precision floating
+ /// point value. The string must be a well-formed double.
+ ///
+ /// If \p AllowInexact is false, the function will fail if the string
+ /// cannot be represented exactly. Otherwise, the function only fails
+ /// in case of an overflow or underflow, or an invalid floating point
+ /// representation.
+ bool getAsDouble(double &Result, bool AllowInexact = true) const;
+
+ /// @}
+ /// @name String Operations
+ /// @{
+
+ // Convert the given ASCII string to lowercase.
+ LLVM_NODISCARD
+ std::string lower() const;
+
+ /// Convert the given ASCII string to uppercase.
+ LLVM_NODISCARD
+ std::string upper() const;
+
+ /// @}
+ /// @name Substring Operations
+ /// @{
+
+ /// Return a reference to the substring from [Start, Start + N).
+ ///
+ /// \param Start The index of the starting character in the substring; if
+ /// the index is npos or greater than the length of the string then the
+ /// empty substring will be returned.
+ ///
+ /// \param N The number of characters to included in the substring. If N
+ /// exceeds the number of characters remaining in the string, the string
+ /// suffix (starting with \p Start) will be returned.
+ LLVM_NODISCARD
+ StringRef substr(size_t Start, size_t N = npos) const {
+ Start = std::min(Start, Length);
+ return StringRef(Data + Start, std::min(N, Length - Start));
+ }
+
+ /// Return a StringRef equal to 'this' but with only the first \p N
+ /// elements remaining. If \p N is greater than the length of the
+ /// string, the entire string is returned.
+ LLVM_NODISCARD
+ StringRef take_front(size_t N = 1) const {
+ if (N >= size())
+ return *this;
+ return drop_back(size() - N);
+ }
+
+ /// Return a StringRef equal to 'this' but with only the last \p N
+ /// elements remaining. If \p N is greater than the length of the
+ /// string, the entire string is returned.
+ LLVM_NODISCARD
+ StringRef take_back(size_t N = 1) const {
+ if (N >= size())
+ return *this;
+ return drop_front(size() - N);
+ }
+
+ /// Return the longest prefix of 'this' such that every character
+ /// in the prefix satisfies the given predicate.
+ LLVM_NODISCARD
+ StringRef take_while(function_ref<bool(char)> F) const {
+ return substr(0, find_if_not(F));
+ }
+
+ /// Return the longest prefix of 'this' such that no character in
+ /// the prefix satisfies the given predicate.
+ LLVM_NODISCARD
+ StringRef take_until(function_ref<bool(char)> F) const {
+ return substr(0, find_if(F));
+ }
+
+ /// Return a StringRef equal to 'this' but with the first \p N elements
+ /// dropped.
+ LLVM_NODISCARD
+ StringRef drop_front(size_t N = 1) const {
+ assert(size() >= N && "Dropping more elements than exist");
+ return substr(N);
+ }
+
+ /// Return a StringRef equal to 'this' but with the last \p N elements
+ /// dropped.
+ LLVM_NODISCARD
+ StringRef drop_back(size_t N = 1) const {
+ assert(size() >= N && "Dropping more elements than exist");
+ return substr(0, size()-N);
+ }
+
+ /// Return a StringRef equal to 'this', but with all characters satisfying
+ /// the given predicate dropped from the beginning of the string.
+ LLVM_NODISCARD
+ StringRef drop_while(function_ref<bool(char)> F) const {
+ return substr(find_if_not(F));
+ }
+
+ /// Return a StringRef equal to 'this', but with all characters not
+ /// satisfying the given predicate dropped from the beginning of the string.
+ LLVM_NODISCARD
+ StringRef drop_until(function_ref<bool(char)> F) const {
+ return substr(find_if(F));
+ }
+
+ /// Returns true if this StringRef has the given prefix and removes that
+ /// prefix.
+ bool consume_front(StringRef Prefix) {
+ if (!startswith(Prefix))
+ return false;
+
+ *this = drop_front(Prefix.size());
+ return true;
+ }
+
+ /// Returns true if this StringRef has the given prefix, ignoring case,
+ /// and removes that prefix.
+ bool consume_front_insensitive(StringRef Prefix) {
+ if (!startswith_insensitive(Prefix))
+ return false;
+
+ *this = drop_front(Prefix.size());
+ return true;
+ }
+
+ /// Returns true if this StringRef has the given suffix and removes that
+ /// suffix.
+ bool consume_back(StringRef Suffix) {
+ if (!endswith(Suffix))
+ return false;
+
+ *this = drop_back(Suffix.size());
+ return true;
+ }
+
+ /// Returns true if this StringRef has the given suffix, ignoring case,
+ /// and removes that suffix.
+ bool consume_back_insensitive(StringRef Suffix) {
+ if (!endswith_insensitive(Suffix))
+ return false;
+
+ *this = drop_back(Suffix.size());
+ return true;
+ }
+
+ /// Return a reference to the substring from [Start, End).
+ ///
+ /// \param Start The index of the starting character in the substring; if
+ /// the index is npos or greater than the length of the string then the
+ /// empty substring will be returned.
+ ///
+ /// \param End The index following the last character to include in the
+ /// substring. If this is npos or exceeds the number of characters
+ /// remaining in the string, the string suffix (starting with \p Start)
+ /// will be returned. If this is less than \p Start, an empty string will
+ /// be returned.
+ LLVM_NODISCARD
+ StringRef slice(size_t Start, size_t End) const {
+ Start = std::min(Start, Length);
+ End = std::min(std::max(Start, End), Length);
+ return StringRef(Data + Start, End - Start);
+ }
+
+ /// Split into two substrings around the first occurrence of a separator
+ /// character.
+ ///
+ /// If \p Separator is in the string, then the result is a pair (LHS, RHS)
+ /// such that (*this == LHS + Separator + RHS) is true and RHS is
+ /// maximal. If \p Separator is not in the string, then the result is a
+ /// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
+ ///
+ /// \param Separator The character to split on.
+ /// \returns The split substrings.
+ LLVM_NODISCARD
+ std::pair<StringRef, StringRef> split(char Separator) const {
+ return split(StringRef(&Separator, 1));
+ }
+
+ /// Split into two substrings around the first occurrence of a separator
+ /// string.
+ ///
+ /// If \p Separator is in the string, then the result is a pair (LHS, RHS)
+ /// such that (*this == LHS + Separator + RHS) is true and RHS is
+ /// maximal. If \p Separator is not in the string, then the result is a
+ /// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
+ ///
+ /// \param Separator - The string to split on.
+ /// \return - The split substrings.
+ LLVM_NODISCARD
+ std::pair<StringRef, StringRef> split(StringRef Separator) const {
+ size_t Idx = find(Separator);
+ if (Idx == npos)
+ return std::make_pair(*this, StringRef());
+ return std::make_pair(slice(0, Idx), slice(Idx + Separator.size(), npos));
+ }
+
+ /// Split into two substrings around the last occurrence of a separator
+ /// string.
+ ///
+ /// If \p Separator is in the string, then the result is a pair (LHS, RHS)
+ /// such that (*this == LHS + Separator + RHS) is true and RHS is
+ /// minimal. If \p Separator is not in the string, then the result is a
+ /// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
+ ///
+ /// \param Separator - The string to split on.
+ /// \return - The split substrings.
+ LLVM_NODISCARD
+ std::pair<StringRef, StringRef> rsplit(StringRef Separator) const {
+ size_t Idx = rfind(Separator);
+ if (Idx == npos)
+ return std::make_pair(*this, StringRef());
+ return std::make_pair(slice(0, Idx), slice(Idx + Separator.size(), npos));
+ }
+
+ /// Split into substrings around the occurrences of a separator string.
+ ///
+ /// Each substring is stored in \p A. If \p MaxSplit is >= 0, at most
+ /// \p MaxSplit splits are done and consequently <= \p MaxSplit + 1
+ /// elements are added to A.
+ /// If \p KeepEmpty is false, empty strings are not added to \p A. They
+ /// still count when considering \p MaxSplit
+ /// An useful invariant is that
+ /// Separator.join(A) == *this if MaxSplit == -1 and KeepEmpty == true
+ ///
+ /// \param A - Where to put the substrings.
+ /// \param Separator - The string to split on.
+ /// \param MaxSplit - The maximum number of times the string is split.
+ /// \param KeepEmpty - True if empty substring should be added.
+ void split(SmallVectorImpl<StringRef> &A,
+ StringRef Separator, int MaxSplit = -1,
+ bool KeepEmpty = true) const;
+
+ /// Split into substrings around the occurrences of a separator character.
+ ///
+ /// Each substring is stored in \p A. If \p MaxSplit is >= 0, at most
+ /// \p MaxSplit splits are done and consequently <= \p MaxSplit + 1
+ /// elements are added to A.
+ /// If \p KeepEmpty is false, empty strings are not added to \p A. They
+ /// still count when considering \p MaxSplit
+ /// An useful invariant is that
+ /// Separator.join(A) == *this if MaxSplit == -1 and KeepEmpty == true
+ ///
+ /// \param A - Where to put the substrings.
+ /// \param Separator - The string to split on.
+ /// \param MaxSplit - The maximum number of times the string is split.
+ /// \param KeepEmpty - True if empty substring should be added.
+ void split(SmallVectorImpl<StringRef> &A, char Separator, int MaxSplit = -1,
+ bool KeepEmpty = true) const;
+
+ /// Split into two substrings around the last occurrence of a separator
+ /// character.
+ ///
+ /// If \p Separator is in the string, then the result is a pair (LHS, RHS)
+ /// such that (*this == LHS + Separator + RHS) is true and RHS is
+ /// minimal. If \p Separator is not in the string, then the result is a
+ /// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
+ ///
+ /// \param Separator - The character to split on.
+ /// \return - The split substrings.
+ LLVM_NODISCARD
+ std::pair<StringRef, StringRef> rsplit(char Separator) const {
+ return rsplit(StringRef(&Separator, 1));
+ }
+
+ /// Return string with consecutive \p Char characters starting from the
+ /// the left removed.
+ LLVM_NODISCARD
+ StringRef ltrim(char Char) const {
+ return drop_front(std::min(Length, find_first_not_of(Char)));
+ }
+
+ /// Return string with consecutive characters in \p Chars starting from
+ /// the left removed.
+ LLVM_NODISCARD
+ StringRef ltrim(StringRef Chars = " \t\n\v\f\r") const {
+ return drop_front(std::min(Length, find_first_not_of(Chars)));
+ }
+
+ /// Return string with consecutive \p Char characters starting from the
+ /// right removed.
+ LLVM_NODISCARD
+ StringRef rtrim(char Char) const {
+ return drop_back(Length - std::min(Length, find_last_not_of(Char) + 1));
+ }
+
+ /// Return string with consecutive characters in \p Chars starting from
+ /// the right removed.
+ LLVM_NODISCARD
+ StringRef rtrim(StringRef Chars = " \t\n\v\f\r") const {
+ return drop_back(Length - std::min(Length, find_last_not_of(Chars) + 1));
+ }
+
+ /// Return string with consecutive \p Char characters starting from the
+ /// left and right removed.
+ LLVM_NODISCARD
+ StringRef trim(char Char) const {
+ return ltrim(Char).rtrim(Char);
+ }
+
+ /// Return string with consecutive characters in \p Chars starting from
+ /// the left and right removed.
+ LLVM_NODISCARD
+ StringRef trim(StringRef Chars = " \t\n\v\f\r") const {
+ return ltrim(Chars).rtrim(Chars);
+ }
+
+ /// Detect the line ending style of the string.
+ ///
+ /// If the string contains a line ending, return the line ending character
+ /// sequence that is detected. Otherwise return '\n' for unix line endings.
+ ///
+ /// \return - The line ending character sequence.
+ LLVM_NODISCARD
+ StringRef detectEOL() const {
+ size_t Pos = find('\r');
+ if (Pos == npos) {
+ // If there is no carriage return, assume unix
+ return "\n";
+ }
+ if (Pos + 1 < Length && Data[Pos + 1] == '\n')
+ return "\r\n"; // Windows
+ if (Pos > 0 && Data[Pos - 1] == '\n')
+ return "\n\r"; // You monster!
+ return "\r"; // Classic Mac
+ }
+ /// @}
+ };
+
+ /// A wrapper around a string literal that serves as a proxy for constructing
+ /// global tables of StringRefs with the length computed at compile time.
+ /// In order to avoid the invocation of a global constructor, StringLiteral
+ /// should *only* be used in a constexpr context, as such:
+ ///
+ /// constexpr StringLiteral S("test");
+ ///
+ class StringLiteral : public StringRef {
+ private:
+ constexpr StringLiteral(const char *Str, size_t N) : StringRef(Str, N) {
+ }
+
+ public:
+ template <size_t N>
+ constexpr StringLiteral(const char (&Str)[N])
+#if defined(__clang__) && __has_attribute(enable_if)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wgcc-compat"
+ __attribute((enable_if(__builtin_strlen(Str) == N - 1,
+ "invalid string literal")))
+#pragma clang diagnostic pop
+#endif
+ : StringRef(Str, N - 1) {
+ }
+
+ // Explicit construction for strings like "foo\0bar".
+ template <size_t N>
+ static constexpr StringLiteral withInnerNUL(const char (&Str)[N]) {
+ return StringLiteral(Str, N - 1);
+ }
+ };
+
+ /// @name StringRef Comparison Operators
+ /// @{
+
+ inline bool operator==(StringRef LHS, StringRef RHS) {
+ return LHS.equals(RHS);
+ }
+
+ inline bool operator!=(StringRef LHS, StringRef RHS) { return !(LHS == RHS); }
+
+ inline bool operator<(StringRef LHS, StringRef RHS) {
+ return LHS.compare(RHS) == -1;
+ }
+
+ inline bool operator<=(StringRef LHS, StringRef RHS) {
+ return LHS.compare(RHS) != 1;
+ }
+
+ inline bool operator>(StringRef LHS, StringRef RHS) {
+ return LHS.compare(RHS) == 1;
+ }
+
+ inline bool operator>=(StringRef LHS, StringRef RHS) {
+ return LHS.compare(RHS) != -1;
+ }
+
+ inline std::string &operator+=(std::string &buffer, StringRef string) {
+ return buffer.append(string.data(), string.size());
+ }
+
+ /// @}
+
+ /// Compute a hash_code for a StringRef.
+ LLVM_NODISCARD
+ hash_code hash_value(StringRef S);
+
+ // Provide DenseMapInfo for StringRefs.
+ template <> struct DenseMapInfo<StringRef, void> {
+ static inline StringRef getEmptyKey() {
+ return StringRef(
+ reinterpret_cast<const char *>(~static_cast<uintptr_t>(0)), 0);
+ }
+
+ static inline StringRef getTombstoneKey() {
+ return StringRef(
+ reinterpret_cast<const char *>(~static_cast<uintptr_t>(1)), 0);
+ }
+
+ static unsigned getHashValue(StringRef Val);
+
+ static bool isEqual(StringRef LHS, StringRef RHS) {
+ if (RHS.data() == getEmptyKey().data())
+ return LHS.data() == getEmptyKey().data();
+ if (RHS.data() == getTombstoneKey().data())
+ return LHS.data() == getTombstoneKey().data();
+ return LHS == RHS;
+ }
+ };
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STRINGREF_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/StringSet.h b/contrib/libs/llvm14/include/llvm/ADT/StringSet.h
new file mode 100644
index 0000000000..c895fcf0f9
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/StringSet.h
@@ -0,0 +1,67 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- StringSet.h - An efficient set built on StringMap --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// StringSet - A set-like wrapper for the StringMap.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STRINGSET_H
+#define LLVM_ADT_STRINGSET_H
+
+#include "llvm/ADT/StringMap.h"
+
+namespace llvm {
+
+/// StringSet - A wrapper for StringMap that provides set-like functionality.
+template <class AllocatorTy = MallocAllocator>
+class StringSet : public StringMap<NoneType, AllocatorTy> {
+ using Base = StringMap<NoneType, AllocatorTy>;
+
+public:
+ StringSet() = default;
+ StringSet(std::initializer_list<StringRef> initializer) {
+ for (StringRef str : initializer)
+ insert(str);
+ }
+ explicit StringSet(AllocatorTy a) : Base(a) {}
+
+ std::pair<typename Base::iterator, bool> insert(StringRef key) {
+ return Base::try_emplace(key);
+ }
+
+ template <typename InputIt>
+ void insert(const InputIt &begin, const InputIt &end) {
+ for (auto it = begin; it != end; ++it)
+ insert(*it);
+ }
+
+ template <typename ValueTy>
+ std::pair<typename Base::iterator, bool>
+ insert(const StringMapEntry<ValueTy> &mapEntry) {
+ return insert(mapEntry.getKey());
+ }
+
+ /// Check if the set contains the given \c key.
+ bool contains(StringRef key) const { return Base::FindKey(key) != -1; }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STRINGSET_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/StringSwitch.h b/contrib/libs/llvm14/include/llvm/ADT/StringSwitch.h
new file mode 100644
index 0000000000..a5dbb7fe58
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/StringSwitch.h
@@ -0,0 +1,209 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===--- StringSwitch.h - Switch-on-literal-string Construct --------------===/
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//===----------------------------------------------------------------------===/
+///
+/// \file
+/// This file implements the StringSwitch template, which mimics a switch()
+/// statement whose cases are string literals.
+///
+//===----------------------------------------------------------------------===/
+#ifndef LLVM_ADT_STRINGSWITCH_H
+#define LLVM_ADT_STRINGSWITCH_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
+#include <cassert>
+#include <cstring>
+
+namespace llvm {
+
+/// A switch()-like statement whose cases are string literals.
+///
+/// The StringSwitch class is a simple form of a switch() statement that
+/// determines whether the given string matches one of the given string
+/// literals. The template type parameter \p T is the type of the value that
+/// will be returned from the string-switch expression. For example,
+/// the following code switches on the name of a color in \c argv[i]:
+///
+/// \code
+/// Color color = StringSwitch<Color>(argv[i])
+/// .Case("red", Red)
+/// .Case("orange", Orange)
+/// .Case("yellow", Yellow)
+/// .Case("green", Green)
+/// .Case("blue", Blue)
+/// .Case("indigo", Indigo)
+/// .Cases("violet", "purple", Violet)
+/// .Default(UnknownColor);
+/// \endcode
+template<typename T, typename R = T>
+class StringSwitch {
+ /// The string we are matching.
+ const StringRef Str;
+
+ /// The pointer to the result of this switch statement, once known,
+ /// null before that.
+ Optional<T> Result;
+
+public:
+ explicit StringSwitch(StringRef S)
+ : Str(S), Result() { }
+
+ // StringSwitch is not copyable.
+ StringSwitch(const StringSwitch &) = delete;
+
+ // StringSwitch is not assignable due to 'Str' being 'const'.
+ void operator=(const StringSwitch &) = delete;
+ void operator=(StringSwitch &&other) = delete;
+
+ StringSwitch(StringSwitch &&other)
+ : Str(other.Str), Result(std::move(other.Result)) { }
+
+ ~StringSwitch() = default;
+
+ // Case-sensitive case matchers
+ StringSwitch &Case(StringLiteral S, T Value) {
+ if (!Result && Str == S) {
+ Result = std::move(Value);
+ }
+ return *this;
+ }
+
+ StringSwitch& EndsWith(StringLiteral S, T Value) {
+ if (!Result && Str.endswith(S)) {
+ Result = std::move(Value);
+ }
+ return *this;
+ }
+
+ StringSwitch& StartsWith(StringLiteral S, T Value) {
+ if (!Result && Str.startswith(S)) {
+ Result = std::move(Value);
+ }
+ return *this;
+ }
+
+ StringSwitch &Cases(StringLiteral S0, StringLiteral S1, T Value) {
+ return Case(S0, Value).Case(S1, Value);
+ }
+
+ StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+ T Value) {
+ return Case(S0, Value).Cases(S1, S2, Value);
+ }
+
+ StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+ StringLiteral S3, T Value) {
+ return Case(S0, Value).Cases(S1, S2, S3, Value);
+ }
+
+ StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+ StringLiteral S3, StringLiteral S4, T Value) {
+ return Case(S0, Value).Cases(S1, S2, S3, S4, Value);
+ }
+
+ StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+ StringLiteral S3, StringLiteral S4, StringLiteral S5,
+ T Value) {
+ return Case(S0, Value).Cases(S1, S2, S3, S4, S5, Value);
+ }
+
+ StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+ StringLiteral S3, StringLiteral S4, StringLiteral S5,
+ StringLiteral S6, T Value) {
+ return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, Value);
+ }
+
+ StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+ StringLiteral S3, StringLiteral S4, StringLiteral S5,
+ StringLiteral S6, StringLiteral S7, T Value) {
+ return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, S7, Value);
+ }
+
+ StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+ StringLiteral S3, StringLiteral S4, StringLiteral S5,
+ StringLiteral S6, StringLiteral S7, StringLiteral S8,
+ T Value) {
+ return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, S7, S8, Value);
+ }
+
+ StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+ StringLiteral S3, StringLiteral S4, StringLiteral S5,
+ StringLiteral S6, StringLiteral S7, StringLiteral S8,
+ StringLiteral S9, T Value) {
+ return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, S7, S8, S9, Value);
+ }
+
+ // Case-insensitive case matchers.
+ StringSwitch &CaseLower(StringLiteral S, T Value) {
+ if (!Result && Str.equals_insensitive(S))
+ Result = std::move(Value);
+
+ return *this;
+ }
+
+ StringSwitch &EndsWithLower(StringLiteral S, T Value) {
+ if (!Result && Str.endswith_insensitive(S))
+ Result = Value;
+
+ return *this;
+ }
+
+ StringSwitch &StartsWithLower(StringLiteral S, T Value) {
+ if (!Result && Str.startswith_insensitive(S))
+ Result = std::move(Value);
+
+ return *this;
+ }
+
+ StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, T Value) {
+ return CaseLower(S0, Value).CaseLower(S1, Value);
+ }
+
+ StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+ T Value) {
+ return CaseLower(S0, Value).CasesLower(S1, S2, Value);
+ }
+
+ StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+ StringLiteral S3, T Value) {
+ return CaseLower(S0, Value).CasesLower(S1, S2, S3, Value);
+ }
+
+ StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, StringLiteral S2,
+ StringLiteral S3, StringLiteral S4, T Value) {
+ return CaseLower(S0, Value).CasesLower(S1, S2, S3, S4, Value);
+ }
+
+ LLVM_NODISCARD
+ R Default(T Value) {
+ if (Result)
+ return std::move(*Result);
+ return Value;
+ }
+
+ LLVM_NODISCARD
+ operator R() {
+ assert(Result && "Fell off the end of a string-switch");
+ return std::move(*Result);
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STRINGSWITCH_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/TinyPtrVector.h b/contrib/libs/llvm14/include/llvm/ADT/TinyPtrVector.h
new file mode 100644
index 0000000000..f84519e77f
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/TinyPtrVector.h
@@ -0,0 +1,369 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/TinyPtrVector.h - 'Normally tiny' vectors -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_TINYPTRVECTOR_H
+#define LLVM_ADT_TINYPTRVECTOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/SmallVector.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+
+namespace llvm {
+
+/// TinyPtrVector - This class is specialized for cases where there are
+/// normally 0 or 1 element in a vector, but is general enough to go beyond that
+/// when required.
+///
+/// NOTE: This container doesn't allow you to store a null pointer into it.
+///
+template <typename EltTy>
+class TinyPtrVector {
+public:
+ using VecTy = SmallVector<EltTy, 4>;
+ using value_type = typename VecTy::value_type;
+ // EltTy must be the first pointer type so that is<EltTy> is true for the
+ // default-constructed PtrUnion. This allows an empty TinyPtrVector to
+ // naturally vend a begin/end iterator of type EltTy* without an additional
+ // check for the empty state.
+ using PtrUnion = PointerUnion<EltTy, VecTy *>;
+
+private:
+ PtrUnion Val;
+
+public:
+ TinyPtrVector() = default;
+
+ ~TinyPtrVector() {
+ if (VecTy *V = Val.template dyn_cast<VecTy*>())
+ delete V;
+ }
+
+ TinyPtrVector(const TinyPtrVector &RHS) : Val(RHS.Val) {
+ if (VecTy *V = Val.template dyn_cast<VecTy*>())
+ Val = new VecTy(*V);
+ }
+
+ TinyPtrVector &operator=(const TinyPtrVector &RHS) {
+ if (this == &RHS)
+ return *this;
+ if (RHS.empty()) {
+ this->clear();
+ return *this;
+ }
+
+ // Try to squeeze into the single slot. If it won't fit, allocate a copied
+ // vector.
+ if (Val.template is<EltTy>()) {
+ if (RHS.size() == 1)
+ Val = RHS.front();
+ else
+ Val = new VecTy(*RHS.Val.template get<VecTy*>());
+ return *this;
+ }
+
+ // If we have a full vector allocated, try to re-use it.
+ if (RHS.Val.template is<EltTy>()) {
+ Val.template get<VecTy*>()->clear();
+ Val.template get<VecTy*>()->push_back(RHS.front());
+ } else {
+ *Val.template get<VecTy*>() = *RHS.Val.template get<VecTy*>();
+ }
+ return *this;
+ }
+
+ TinyPtrVector(TinyPtrVector &&RHS) : Val(RHS.Val) {
+ RHS.Val = (EltTy)nullptr;
+ }
+
+ TinyPtrVector &operator=(TinyPtrVector &&RHS) {
+ if (this == &RHS)
+ return *this;
+ if (RHS.empty()) {
+ this->clear();
+ return *this;
+ }
+
+ // If this vector has been allocated on the heap, re-use it if cheap. If it
+ // would require more copying, just delete it and we'll steal the other
+ // side.
+ if (VecTy *V = Val.template dyn_cast<VecTy*>()) {
+ if (RHS.Val.template is<EltTy>()) {
+ V->clear();
+ V->push_back(RHS.front());
+ RHS.Val = EltTy();
+ return *this;
+ }
+ delete V;
+ }
+
+ Val = RHS.Val;
+ RHS.Val = EltTy();
+ return *this;
+ }
+
+ TinyPtrVector(std::initializer_list<EltTy> IL)
+ : Val(IL.size() == 0
+ ? PtrUnion()
+ : IL.size() == 1 ? PtrUnion(*IL.begin())
+ : PtrUnion(new VecTy(IL.begin(), IL.end()))) {}
+
+ /// Constructor from an ArrayRef.
+ ///
+ /// This also is a constructor for individual array elements due to the single
+ /// element constructor for ArrayRef.
+ explicit TinyPtrVector(ArrayRef<EltTy> Elts)
+ : Val(Elts.empty()
+ ? PtrUnion()
+ : Elts.size() == 1
+ ? PtrUnion(Elts[0])
+ : PtrUnion(new VecTy(Elts.begin(), Elts.end()))) {}
+
+ TinyPtrVector(size_t Count, EltTy Value)
+ : Val(Count == 0 ? PtrUnion()
+ : Count == 1 ? PtrUnion(Value)
+ : PtrUnion(new VecTy(Count, Value))) {}
+
+ // implicit conversion operator to ArrayRef.
+ operator ArrayRef<EltTy>() const {
+ if (Val.isNull())
+ return None;
+ if (Val.template is<EltTy>())
+ return *Val.getAddrOfPtr1();
+ return *Val.template get<VecTy*>();
+ }
+
+ // implicit conversion operator to MutableArrayRef.
+ operator MutableArrayRef<EltTy>() {
+ if (Val.isNull())
+ return None;
+ if (Val.template is<EltTy>())
+ return *Val.getAddrOfPtr1();
+ return *Val.template get<VecTy*>();
+ }
+
+ // Implicit conversion to ArrayRef<U> if EltTy* implicitly converts to U*.
+ template <
+ typename U,
+ std::enable_if_t<std::is_convertible<ArrayRef<EltTy>, ArrayRef<U>>::value,
+ bool> = false>
+ operator ArrayRef<U>() const {
+ return operator ArrayRef<EltTy>();
+ }
+
+ bool empty() const {
+ // This vector can be empty if it contains no element, or if it
+ // contains a pointer to an empty vector.
+ if (Val.isNull()) return true;
+ if (VecTy *Vec = Val.template dyn_cast<VecTy*>())
+ return Vec->empty();
+ return false;
+ }
+
+ unsigned size() const {
+ if (empty())
+ return 0;
+ if (Val.template is<EltTy>())
+ return 1;
+ return Val.template get<VecTy*>()->size();
+ }
+
+ using iterator = EltTy *;
+ using const_iterator = const EltTy *;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+
+ iterator begin() {
+ if (Val.template is<EltTy>())
+ return Val.getAddrOfPtr1();
+
+ return Val.template get<VecTy *>()->begin();
+ }
+
+ iterator end() {
+ if (Val.template is<EltTy>())
+ return begin() + (Val.isNull() ? 0 : 1);
+
+ return Val.template get<VecTy *>()->end();
+ }
+
+ const_iterator begin() const {
+ return (const_iterator)const_cast<TinyPtrVector*>(this)->begin();
+ }
+
+ const_iterator end() const {
+ return (const_iterator)const_cast<TinyPtrVector*>(this)->end();
+ }
+
+ reverse_iterator rbegin() { return reverse_iterator(end()); }
+ reverse_iterator rend() { return reverse_iterator(begin()); }
+
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator(end());
+ }
+
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(begin());
+ }
+
+ EltTy operator[](unsigned i) const {
+ assert(!Val.isNull() && "can't index into an empty vector");
+ if (Val.template is<EltTy>()) {
+ assert(i == 0 && "tinyvector index out of range");
+ return Val.template get<EltTy>();
+ }
+
+ assert(i < Val.template get<VecTy*>()->size() &&
+ "tinyvector index out of range");
+ return (*Val.template get<VecTy*>())[i];
+ }
+
+ EltTy front() const {
+ assert(!empty() && "vector empty");
+ if (Val.template is<EltTy>())
+ return Val.template get<EltTy>();
+ return Val.template get<VecTy*>()->front();
+ }
+
+ EltTy back() const {
+ assert(!empty() && "vector empty");
+ if (Val.template is<EltTy>())
+ return Val.template get<EltTy>();
+ return Val.template get<VecTy*>()->back();
+ }
+
+ void push_back(EltTy NewVal) {
+ // If we have nothing, add something.
+ if (Val.isNull()) {
+ Val = NewVal;
+ assert(!Val.isNull() && "Can't add a null value");
+ return;
+ }
+
+ // If we have a single value, convert to a vector.
+ if (Val.template is<EltTy>()) {
+ EltTy V = Val.template get<EltTy>();
+ Val = new VecTy();
+ Val.template get<VecTy*>()->push_back(V);
+ }
+
+ // Add the new value, we know we have a vector.
+ Val.template get<VecTy*>()->push_back(NewVal);
+ }
+
+ void pop_back() {
+ // If we have a single value, convert to empty.
+ if (Val.template is<EltTy>())
+ Val = (EltTy)nullptr;
+ else if (VecTy *Vec = Val.template get<VecTy*>())
+ Vec->pop_back();
+ }
+
+ void clear() {
+ // If we have a single value, convert to empty.
+ if (Val.template is<EltTy>()) {
+ Val = EltTy();
+ } else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
+ // If we have a vector form, just clear it.
+ Vec->clear();
+ }
+ // Otherwise, we're already empty.
+ }
+
+ iterator erase(iterator I) {
+ assert(I >= begin() && "Iterator to erase is out of bounds.");
+ assert(I < end() && "Erasing at past-the-end iterator.");
+
+ // If we have a single value, convert to empty.
+ if (Val.template is<EltTy>()) {
+ if (I == begin())
+ Val = EltTy();
+ } else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
+ // multiple items in a vector; just do the erase, there is no
+ // benefit to collapsing back to a pointer
+ return Vec->erase(I);
+ }
+ return end();
+ }
+
+ iterator erase(iterator S, iterator E) {
+ assert(S >= begin() && "Range to erase is out of bounds.");
+ assert(S <= E && "Trying to erase invalid range.");
+ assert(E <= end() && "Trying to erase past the end.");
+
+ if (Val.template is<EltTy>()) {
+ if (S == begin() && S != E)
+ Val = EltTy();
+ } else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
+ return Vec->erase(S, E);
+ }
+ return end();
+ }
+
+ iterator insert(iterator I, const EltTy &Elt) {
+ assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+ assert(I <= this->end() && "Inserting past the end of the vector.");
+ if (I == end()) {
+ push_back(Elt);
+ return std::prev(end());
+ }
+ assert(!Val.isNull() && "Null value with non-end insert iterator.");
+ if (Val.template is<EltTy>()) {
+ EltTy V = Val.template get<EltTy>();
+ assert(I == begin());
+ Val = Elt;
+ push_back(V);
+ return begin();
+ }
+
+ return Val.template get<VecTy*>()->insert(I, Elt);
+ }
+
+ template<typename ItTy>
+ iterator insert(iterator I, ItTy From, ItTy To) {
+ assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+ assert(I <= this->end() && "Inserting past the end of the vector.");
+ if (From == To)
+ return I;
+
+ // If we have a single value, convert to a vector.
+ ptrdiff_t Offset = I - begin();
+ if (Val.isNull()) {
+ if (std::next(From) == To) {
+ Val = *From;
+ return begin();
+ }
+
+ Val = new VecTy();
+ } else if (Val.template is<EltTy>()) {
+ EltTy V = Val.template get<EltTy>();
+ Val = new VecTy();
+ Val.template get<VecTy*>()->push_back(V);
+ }
+ return Val.template get<VecTy*>()->insert(begin() + Offset, From, To);
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_TINYPTRVECTOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/Triple.h b/contrib/libs/llvm14/include/llvm/ADT/Triple.h
new file mode 100644
index 0000000000..85660e978b
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/Triple.h
@@ -0,0 +1,1016 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- llvm/ADT/Triple.h - Target triple helper class ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_TRIPLE_H
+#define LLVM_ADT_TRIPLE_H
+
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/VersionTuple.h"
+
+// Some system headers or GCC predefined macros conflict with identifiers in
+// this file. Undefine them here.
+#undef NetBSD
+#undef mips
+#undef sparc
+
+namespace llvm {
+
+/// Triple - Helper class for working with autoconf configuration names. For
+/// historical reasons, we also call these 'triples' (they used to contain
+/// exactly three fields).
+///
+/// Configuration names are strings in the canonical form:
+/// ARCHITECTURE-VENDOR-OPERATING_SYSTEM
+/// or
+/// ARCHITECTURE-VENDOR-OPERATING_SYSTEM-ENVIRONMENT
+///
+/// This class is used for clients which want to support arbitrary
+/// configuration names, but also want to implement certain special
+/// behavior for particular configurations. This class isolates the mapping
+/// from the components of the configuration name to well known IDs.
+///
+/// At its core the Triple class is designed to be a wrapper for a triple
+/// string; the constructor does not change or normalize the triple string.
+/// Clients that need to handle the non-canonical triples that users often
+/// specify should use the normalize method.
+///
+/// See autoconf/config.guess for a glimpse into what configuration names
+/// look like in practice.
+class Triple {
+public:
+ enum ArchType {
+ UnknownArch,
+
+ arm, // ARM (little endian): arm, armv.*, xscale
+ armeb, // ARM (big endian): armeb
+ aarch64, // AArch64 (little endian): aarch64
+ aarch64_be, // AArch64 (big endian): aarch64_be
+ aarch64_32, // AArch64 (little endian) ILP32: aarch64_32
+ arc, // ARC: Synopsys ARC
+ avr, // AVR: Atmel AVR microcontroller
+ bpfel, // eBPF or extended BPF or 64-bit BPF (little endian)
+ bpfeb, // eBPF or extended BPF or 64-bit BPF (big endian)
+ csky, // CSKY: csky
+ hexagon, // Hexagon: hexagon
+ m68k, // M68k: Motorola 680x0 family
+ mips, // MIPS: mips, mipsallegrex, mipsr6
+ mipsel, // MIPSEL: mipsel, mipsallegrexe, mipsr6el
+ mips64, // MIPS64: mips64, mips64r6, mipsn32, mipsn32r6
+ mips64el, // MIPS64EL: mips64el, mips64r6el, mipsn32el, mipsn32r6el
+ msp430, // MSP430: msp430
+ ppc, // PPC: powerpc
+ ppcle, // PPCLE: powerpc (little endian)
+ ppc64, // PPC64: powerpc64, ppu
+ ppc64le, // PPC64LE: powerpc64le
+ r600, // R600: AMD GPUs HD2XXX - HD6XXX
+ amdgcn, // AMDGCN: AMD GCN GPUs
+ riscv32, // RISC-V (32-bit): riscv32
+ riscv64, // RISC-V (64-bit): riscv64
+ sparc, // Sparc: sparc
+ sparcv9, // Sparcv9: Sparcv9
+ sparcel, // Sparc: (endianness = little). NB: 'Sparcle' is a CPU variant
+ systemz, // SystemZ: s390x
+ tce, // TCE (http://tce.cs.tut.fi/): tce
+ tcele, // TCE little endian (http://tce.cs.tut.fi/): tcele
+ thumb, // Thumb (little endian): thumb, thumbv.*
+ thumbeb, // Thumb (big endian): thumbeb
+ x86, // X86: i[3-9]86
+ x86_64, // X86-64: amd64, x86_64
+ xcore, // XCore: xcore
+ nvptx, // NVPTX: 32-bit
+ nvptx64, // NVPTX: 64-bit
+ le32, // le32: generic little-endian 32-bit CPU (PNaCl)
+ le64, // le64: generic little-endian 64-bit CPU (PNaCl)
+ amdil, // AMDIL
+ amdil64, // AMDIL with 64-bit pointers
+ hsail, // AMD HSAIL
+ hsail64, // AMD HSAIL with 64-bit pointers
+ spir, // SPIR: standard portable IR for OpenCL 32-bit version
+ spir64, // SPIR: standard portable IR for OpenCL 64-bit version
+ spirv32, // SPIR-V with 32-bit pointers
+ spirv64, // SPIR-V with 64-bit pointers
+ kalimba, // Kalimba: generic kalimba
+ shave, // SHAVE: Movidius vector VLIW processors
+ lanai, // Lanai: Lanai 32-bit
+ wasm32, // WebAssembly with 32-bit pointers
+ wasm64, // WebAssembly with 64-bit pointers
+ renderscript32, // 32-bit RenderScript
+ renderscript64, // 64-bit RenderScript
+ ve, // NEC SX-Aurora Vector Engine
+ LastArchType = ve
+ };
+ enum SubArchType {
+ NoSubArch,
+
+ ARMSubArch_v9_3a,
+ ARMSubArch_v9_2a,
+ ARMSubArch_v9_1a,
+ ARMSubArch_v9,
+ ARMSubArch_v8_8a,
+ ARMSubArch_v8_7a,
+ ARMSubArch_v8_6a,
+ ARMSubArch_v8_5a,
+ ARMSubArch_v8_4a,
+ ARMSubArch_v8_3a,
+ ARMSubArch_v8_2a,
+ ARMSubArch_v8_1a,
+ ARMSubArch_v8,
+ ARMSubArch_v8r,
+ ARMSubArch_v8m_baseline,
+ ARMSubArch_v8m_mainline,
+ ARMSubArch_v8_1m_mainline,
+ ARMSubArch_v7,
+ ARMSubArch_v7em,
+ ARMSubArch_v7m,
+ ARMSubArch_v7s,
+ ARMSubArch_v7k,
+ ARMSubArch_v7ve,
+ ARMSubArch_v6,
+ ARMSubArch_v6m,
+ ARMSubArch_v6k,
+ ARMSubArch_v6t2,
+ ARMSubArch_v5,
+ ARMSubArch_v5te,
+ ARMSubArch_v4t,
+
+ AArch64SubArch_arm64e,
+
+ KalimbaSubArch_v3,
+ KalimbaSubArch_v4,
+ KalimbaSubArch_v5,
+
+ MipsSubArch_r6,
+
+ PPCSubArch_spe
+ };
+ enum VendorType {
+ UnknownVendor,
+
+ Apple,
+ PC,
+ SCEI,
+ Freescale,
+ IBM,
+ ImaginationTechnologies,
+ MipsTechnologies,
+ NVIDIA,
+ CSR,
+ Myriad,
+ AMD,
+ Mesa,
+ SUSE,
+ OpenEmbedded,
+ LastVendorType = OpenEmbedded
+ };
+ enum OSType {
+ UnknownOS,
+
+ Ananas,
+ CloudABI,
+ Darwin,
+ DragonFly,
+ FreeBSD,
+ Fuchsia,
+ IOS,
+ KFreeBSD,
+ Linux,
+ Lv2, // PS3
+ MacOSX,
+ NetBSD,
+ OpenBSD,
+ Solaris,
+ Win32,
+ ZOS,
+ Haiku,
+ Minix,
+ RTEMS,
+ NaCl, // Native Client
+ AIX,
+ CUDA, // NVIDIA CUDA
+ NVCL, // NVIDIA OpenCL
+ AMDHSA, // AMD HSA Runtime
+ PS4,
+ ELFIAMCU,
+ TvOS, // Apple tvOS
+ WatchOS, // Apple watchOS
+ Mesa3D,
+ Contiki,
+ AMDPAL, // AMD PAL Runtime
+ HermitCore, // HermitCore Unikernel/Multikernel
+ Hurd, // GNU/Hurd
+ WASI, // Experimental WebAssembly OS
+ Emscripten,
+ LastOSType = Emscripten
+ };
+ enum EnvironmentType {
+ UnknownEnvironment,
+
+ GNU,
+ GNUABIN32,
+ GNUABI64,
+ GNUEABI,
+ GNUEABIHF,
+ GNUX32,
+ GNUILP32,
+ CODE16,
+ EABI,
+ EABIHF,
+ Android,
+ Musl,
+ MuslEABI,
+ MuslEABIHF,
+ MuslX32,
+
+ MSVC,
+ Itanium,
+ Cygnus,
+ CoreCLR,
+ Simulator, // Simulator variants of other systems, e.g., Apple's iOS
+ MacABI, // Mac Catalyst variant of Apple's iOS deployment target.
+ LastEnvironmentType = MacABI
+ };
+ enum ObjectFormatType {
+ UnknownObjectFormat,
+
+ COFF,
+ ELF,
+ GOFF,
+ MachO,
+ Wasm,
+ XCOFF,
+ };
+
+private:
+ std::string Data;
+
+ /// The parsed arch type.
+ ArchType Arch;
+
+ /// The parsed subarchitecture type.
+ SubArchType SubArch;
+
+ /// The parsed vendor type.
+ VendorType Vendor;
+
+ /// The parsed OS type.
+ OSType OS;
+
+ /// The parsed Environment type.
+ EnvironmentType Environment;
+
+ /// The object format type.
+ ObjectFormatType ObjectFormat;
+
+public:
+ /// @name Constructors
+ /// @{
+
+ /// Default constructor is the same as an empty string and leaves all
+ /// triple fields unknown.
+ Triple() : Arch(), SubArch(), Vendor(), OS(), Environment(), ObjectFormat() {}
+
+ explicit Triple(const Twine &Str);
+ Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr);
+ Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr,
+ const Twine &EnvironmentStr);
+
+ bool operator==(const Triple &Other) const {
+ return Arch == Other.Arch && SubArch == Other.SubArch &&
+ Vendor == Other.Vendor && OS == Other.OS &&
+ Environment == Other.Environment &&
+ ObjectFormat == Other.ObjectFormat;
+ }
+
+ bool operator!=(const Triple &Other) const {
+ return !(*this == Other);
+ }
+
+ /// @}
+ /// @name Normalization
+ /// @{
+
+ /// Turn an arbitrary machine specification into the canonical triple form (or
+ /// something sensible that the Triple class understands if nothing better can
+ /// reasonably be done). In particular, it handles the common case in which
+ /// otherwise valid components are in the wrong order.
+ static std::string normalize(StringRef Str);
+
+ /// Return the normalized form of this triple's string.
+ std::string normalize() const { return normalize(Data); }
+
+ /// @}
+ /// @name Typed Component Access
+ /// @{
+
+ /// Get the parsed architecture type of this triple.
+ ArchType getArch() const { return Arch; }
+
+ /// get the parsed subarchitecture type for this triple.
+ SubArchType getSubArch() const { return SubArch; }
+
+ /// Get the parsed vendor type of this triple.
+ VendorType getVendor() const { return Vendor; }
+
+ /// Get the parsed operating system type of this triple.
+ OSType getOS() const { return OS; }
+
+ /// Does this triple have the optional environment (fourth) component?
+ bool hasEnvironment() const {
+ return getEnvironmentName() != "";
+ }
+
+ /// Get the parsed environment type of this triple.
+ EnvironmentType getEnvironment() const { return Environment; }
+
+ /// Parse the version number from the OS name component of the
+ /// triple, if present.
+ ///
+ /// For example, "fooos1.2.3" would return (1, 2, 3).
+ VersionTuple getEnvironmentVersion() const;
+
+ /// Get the object format for this triple.
+ ObjectFormatType getObjectFormat() const { return ObjectFormat; }
+
+ /// Parse the version number from the OS name component of the triple, if
+ /// present.
+ ///
+ /// For example, "fooos1.2.3" would return (1, 2, 3).
+ VersionTuple getOSVersion() const;
+
+ /// Return just the major version number, this is specialized because it is a
+ /// common query.
+ unsigned getOSMajorVersion() const { return getOSVersion().getMajor(); }
+
+ /// Parse the version number as with getOSVersion and then translate generic
+ /// "darwin" versions to the corresponding OS X versions. This may also be
+ /// called with IOS triples but the OS X version number is just set to a
+ /// constant 10.4.0 in that case. Returns true if successful.
+ bool getMacOSXVersion(VersionTuple &Version) const;
+
+ /// Parse the version number as with getOSVersion. This should only be called
+ /// with IOS or generic triples.
+ VersionTuple getiOSVersion() const;
+
+ /// Parse the version number as with getOSVersion. This should only be called
+ /// with WatchOS or generic triples.
+ VersionTuple getWatchOSVersion() const;
+
+ /// @}
+ /// @name Direct Component Access
+ /// @{
+
+ const std::string &str() const { return Data; }
+
+ const std::string &getTriple() const { return Data; }
+
+ /// Get the architecture (first) component of the triple.
+ StringRef getArchName() const;
+
+ /// Get the architecture name based on Kind and SubArch.
+ StringRef getArchName(ArchType Kind, SubArchType SubArch = NoSubArch) const;
+
+ /// Get the vendor (second) component of the triple.
+ StringRef getVendorName() const;
+
+ /// Get the operating system (third) component of the triple.
+ StringRef getOSName() const;
+
+ /// Get the optional environment (fourth) component of the triple, or "" if
+ /// empty.
+ StringRef getEnvironmentName() const;
+
+ /// Get the operating system and optional environment components as a single
+ /// string (separated by a '-' if the environment component is present).
+ StringRef getOSAndEnvironmentName() const;
+
+ /// @}
+ /// @name Convenience Predicates
+ /// @{
+
+ /// Test whether the architecture is 64-bit
+ ///
+ /// Note that this tests for 64-bit pointer width, and nothing else. Note
+ /// that we intentionally expose only three predicates, 64-bit, 32-bit, and
+ /// 16-bit. The inner details of pointer width for particular architectures
+ /// is not summed up in the triple, and so only a coarse grained predicate
+ /// system is provided.
+ bool isArch64Bit() const;
+
+ /// Test whether the architecture is 32-bit
+ ///
+ /// Note that this tests for 32-bit pointer width, and nothing else.
+ bool isArch32Bit() const;
+
+ /// Test whether the architecture is 16-bit
+ ///
+ /// Note that this tests for 16-bit pointer width, and nothing else.
+ bool isArch16Bit() const;
+
+ /// Helper function for doing comparisons against version numbers included in
+ /// the target triple.
+ bool isOSVersionLT(unsigned Major, unsigned Minor = 0,
+ unsigned Micro = 0) const {
+ if (Minor == 0) {
+ return getOSVersion() < VersionTuple(Major);
+ }
+ if (Micro == 0) {
+ return getOSVersion() < VersionTuple(Major, Minor);
+ }
+ return getOSVersion() < VersionTuple(Major, Minor, Micro);
+ }
+
+ bool isOSVersionLT(const Triple &Other) const {
+ return getOSVersion() < Other.getOSVersion();
+ }
+
+ /// Comparison function for checking OS X version compatibility, which handles
+ /// supporting skewed version numbering schemes used by the "darwin" triples.
+ bool isMacOSXVersionLT(unsigned Major, unsigned Minor = 0,
+ unsigned Micro = 0) const;
+
+ /// Is this a Mac OS X triple. For legacy reasons, we support both "darwin"
+ /// and "osx" as OS X triples.
+ bool isMacOSX() const {
+ return getOS() == Triple::Darwin || getOS() == Triple::MacOSX;
+ }
+
+ /// Is this an iOS triple.
+ /// Note: This identifies tvOS as a variant of iOS. If that ever
+ /// changes, i.e., if the two operating systems diverge or their version
+ /// numbers get out of sync, that will need to be changed.
+ /// watchOS has completely different version numbers so it is not included.
+ bool isiOS() const {
+ return getOS() == Triple::IOS || isTvOS();
+ }
+
+ /// Is this an Apple tvOS triple.
+ bool isTvOS() const {
+ return getOS() == Triple::TvOS;
+ }
+
+ /// Is this an Apple watchOS triple.
+ bool isWatchOS() const {
+ return getOS() == Triple::WatchOS;
+ }
+
+ bool isWatchABI() const {
+ return getSubArch() == Triple::ARMSubArch_v7k;
+ }
+
+ bool isOSzOS() const { return getOS() == Triple::ZOS; }
+
+ /// Is this a "Darwin" OS (macOS, iOS, tvOS or watchOS).
+ bool isOSDarwin() const {
+ return isMacOSX() || isiOS() || isWatchOS();
+ }
+
+ bool isSimulatorEnvironment() const {
+ return getEnvironment() == Triple::Simulator;
+ }
+
+ bool isMacCatalystEnvironment() const {
+ return getEnvironment() == Triple::MacABI;
+ }
+
+ /// Returns true for targets that run on a macOS machine.
+ bool isTargetMachineMac() const {
+ return isMacOSX() || (isOSDarwin() && (isSimulatorEnvironment() ||
+ isMacCatalystEnvironment()));
+ }
+
+ bool isOSNetBSD() const {
+ return getOS() == Triple::NetBSD;
+ }
+
+ bool isOSOpenBSD() const {
+ return getOS() == Triple::OpenBSD;
+ }
+
+ bool isOSFreeBSD() const {
+ return getOS() == Triple::FreeBSD;
+ }
+
+ bool isOSFuchsia() const {
+ return getOS() == Triple::Fuchsia;
+ }
+
+ bool isOSDragonFly() const { return getOS() == Triple::DragonFly; }
+
+ bool isOSSolaris() const {
+ return getOS() == Triple::Solaris;
+ }
+
+ bool isOSIAMCU() const {
+ return getOS() == Triple::ELFIAMCU;
+ }
+
+ bool isOSUnknown() const { return getOS() == Triple::UnknownOS; }
+
+ bool isGNUEnvironment() const {
+ EnvironmentType Env = getEnvironment();
+ return Env == Triple::GNU || Env == Triple::GNUABIN32 ||
+ Env == Triple::GNUABI64 || Env == Triple::GNUEABI ||
+ Env == Triple::GNUEABIHF || Env == Triple::GNUX32;
+ }
+
+ bool isOSContiki() const {
+ return getOS() == Triple::Contiki;
+ }
+
+ /// Tests whether the OS is Haiku.
+ bool isOSHaiku() const {
+ return getOS() == Triple::Haiku;
+ }
+
+ /// Tests whether the OS is Windows.
+ bool isOSWindows() const {
+ return getOS() == Triple::Win32;
+ }
+
+ /// Checks if the environment is MSVC.
+ bool isKnownWindowsMSVCEnvironment() const {
+ return isOSWindows() && getEnvironment() == Triple::MSVC;
+ }
+
+ /// Checks if the environment could be MSVC.
+ bool isWindowsMSVCEnvironment() const {
+ return isKnownWindowsMSVCEnvironment() ||
+ (isOSWindows() && getEnvironment() == Triple::UnknownEnvironment);
+ }
+
+ bool isWindowsCoreCLREnvironment() const {
+ return isOSWindows() && getEnvironment() == Triple::CoreCLR;
+ }
+
+ bool isWindowsItaniumEnvironment() const {
+ return isOSWindows() && getEnvironment() == Triple::Itanium;
+ }
+
+ bool isWindowsCygwinEnvironment() const {
+ return isOSWindows() && getEnvironment() == Triple::Cygnus;
+ }
+
+ bool isWindowsGNUEnvironment() const {
+ return isOSWindows() && getEnvironment() == Triple::GNU;
+ }
+
+ /// Tests for either Cygwin or MinGW OS
+ bool isOSCygMing() const {
+ return isWindowsCygwinEnvironment() || isWindowsGNUEnvironment();
+ }
+
+ /// Is this a "Windows" OS targeting a "MSVCRT.dll" environment.
+ bool isOSMSVCRT() const {
+ return isWindowsMSVCEnvironment() || isWindowsGNUEnvironment() ||
+ isWindowsItaniumEnvironment();
+ }
+
+ /// Tests whether the OS is NaCl (Native Client)
+ bool isOSNaCl() const {
+ return getOS() == Triple::NaCl;
+ }
+
+ /// Tests whether the OS is Linux.
+ bool isOSLinux() const {
+ return getOS() == Triple::Linux;
+ }
+
+ /// Tests whether the OS is kFreeBSD.
+ bool isOSKFreeBSD() const {
+ return getOS() == Triple::KFreeBSD;
+ }
+
+ /// Tests whether the OS is Hurd.
+ bool isOSHurd() const {
+ return getOS() == Triple::Hurd;
+ }
+
+ /// Tests whether the OS is WASI.
+ bool isOSWASI() const {
+ return getOS() == Triple::WASI;
+ }
+
+ /// Tests whether the OS is Emscripten.
+ bool isOSEmscripten() const {
+ return getOS() == Triple::Emscripten;
+ }
+
+ /// Tests whether the OS uses glibc.
+ bool isOSGlibc() const {
+ return (getOS() == Triple::Linux || getOS() == Triple::KFreeBSD ||
+ getOS() == Triple::Hurd) &&
+ !isAndroid();
+ }
+
+ /// Tests whether the OS is AIX.
+ bool isOSAIX() const {
+ return getOS() == Triple::AIX;
+ }
+
+ /// Tests whether the OS uses the ELF binary format.
+ bool isOSBinFormatELF() const {
+ return getObjectFormat() == Triple::ELF;
+ }
+
+ /// Tests whether the OS uses the COFF binary format.
+ bool isOSBinFormatCOFF() const {
+ return getObjectFormat() == Triple::COFF;
+ }
+
+ /// Tests whether the OS uses the GOFF binary format.
+ bool isOSBinFormatGOFF() const { return getObjectFormat() == Triple::GOFF; }
+
+ /// Tests whether the environment is MachO.
+ bool isOSBinFormatMachO() const {
+ return getObjectFormat() == Triple::MachO;
+ }
+
+ /// Tests whether the OS uses the Wasm binary format.
+ bool isOSBinFormatWasm() const {
+ return getObjectFormat() == Triple::Wasm;
+ }
+
+ /// Tests whether the OS uses the XCOFF binary format.
+ bool isOSBinFormatXCOFF() const {
+ return getObjectFormat() == Triple::XCOFF;
+ }
+
+ /// Tests whether the target is the PS4 CPU
+ bool isPS4CPU() const {
+ return getArch() == Triple::x86_64 &&
+ getVendor() == Triple::SCEI &&
+ getOS() == Triple::PS4;
+ }
+
+ /// Tests whether the target is the PS4 platform
+ bool isPS4() const {
+ return getVendor() == Triple::SCEI &&
+ getOS() == Triple::PS4;
+ }
+
+ /// Tests whether the target is Android
+ bool isAndroid() const { return getEnvironment() == Triple::Android; }
+
+ bool isAndroidVersionLT(unsigned Major) const {
+ assert(isAndroid() && "Not an Android triple!");
+
+ VersionTuple Version = getEnvironmentVersion();
+
+ // 64-bit targets did not exist before API level 21 (Lollipop).
+ if (isArch64Bit() && Version.getMajor() < 21)
+ return VersionTuple(21) < VersionTuple(Major);
+
+ return Version < VersionTuple(Major);
+ }
+
+ /// Tests whether the environment is musl-libc
+ bool isMusl() const {
+ return getEnvironment() == Triple::Musl ||
+ getEnvironment() == Triple::MuslEABI ||
+ getEnvironment() == Triple::MuslEABIHF ||
+ getEnvironment() == Triple::MuslX32;
+ }
+
+ /// Tests whether the target is SPIR (32- or 64-bit).
+ bool isSPIR() const {
+ return getArch() == Triple::spir || getArch() == Triple::spir64;
+ }
+
+ /// Tests whether the target is SPIR-V (32/64-bit).
+ bool isSPIRV() const {
+ return getArch() == Triple::spirv32 || getArch() == Triple::spirv64;
+ }
+
+ /// Tests whether the target is NVPTX (32- or 64-bit).
+ bool isNVPTX() const {
+ return getArch() == Triple::nvptx || getArch() == Triple::nvptx64;
+ }
+
+ /// Tests whether the target is AMDGCN
+ bool isAMDGCN() const { return getArch() == Triple::amdgcn; }
+
+ bool isAMDGPU() const {
+ return getArch() == Triple::r600 || getArch() == Triple::amdgcn;
+ }
+
+ /// Tests whether the target is Thumb (little and big endian).
+ bool isThumb() const {
+ return getArch() == Triple::thumb || getArch() == Triple::thumbeb;
+ }
+
+ /// Tests whether the target is ARM (little and big endian).
+ bool isARM() const {
+ return getArch() == Triple::arm || getArch() == Triple::armeb;
+ }
+
+ /// Tests whether the target supports the EHABI exception
+ /// handling standard.
+ bool isTargetEHABICompatible() const {
+ return (isARM() || isThumb()) &&
+ (getEnvironment() == Triple::EABI ||
+ getEnvironment() == Triple::GNUEABI ||
+ getEnvironment() == Triple::MuslEABI ||
+ getEnvironment() == Triple::EABIHF ||
+ getEnvironment() == Triple::GNUEABIHF ||
+ getEnvironment() == Triple::MuslEABIHF || isAndroid()) &&
+ isOSBinFormatELF();
+ }
+
+ /// Tests whether the target is T32.
+ bool isArmT32() const {
+ switch (getSubArch()) {
+ case Triple::ARMSubArch_v8m_baseline:
+ case Triple::ARMSubArch_v7s:
+ case Triple::ARMSubArch_v7k:
+ case Triple::ARMSubArch_v7ve:
+ case Triple::ARMSubArch_v6:
+ case Triple::ARMSubArch_v6m:
+ case Triple::ARMSubArch_v6k:
+ case Triple::ARMSubArch_v6t2:
+ case Triple::ARMSubArch_v5:
+ case Triple::ARMSubArch_v5te:
+ case Triple::ARMSubArch_v4t:
+ return false;
+ default:
+ return true;
+ }
+ }
+
+ /// Tests whether the target is an M-class.
+ bool isArmMClass() const {
+ switch (getSubArch()) {
+ case Triple::ARMSubArch_v6m:
+ case Triple::ARMSubArch_v7m:
+ case Triple::ARMSubArch_v7em:
+ case Triple::ARMSubArch_v8m_mainline:
+ case Triple::ARMSubArch_v8m_baseline:
+ case Triple::ARMSubArch_v8_1m_mainline:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /// Tests whether the target is AArch64 (little and big endian).
+ bool isAArch64() const {
+ return getArch() == Triple::aarch64 || getArch() == Triple::aarch64_be ||
+ getArch() == Triple::aarch64_32;
+ }
+
+ /// Tests whether the target is AArch64 and pointers are the size specified by
+ /// \p PointerWidth.
+ bool isAArch64(int PointerWidth) const {
+ assert(PointerWidth == 64 || PointerWidth == 32);
+ if (!isAArch64())
+ return false;
+ return getArch() == Triple::aarch64_32 ||
+ getEnvironment() == Triple::GNUILP32
+ ? PointerWidth == 32
+ : PointerWidth == 64;
+ }
+
+ /// Tests whether the target is MIPS 32-bit (little and big endian).
+ bool isMIPS32() const {
+ return getArch() == Triple::mips || getArch() == Triple::mipsel;
+ }
+
+ /// Tests whether the target is MIPS 64-bit (little and big endian).
+ bool isMIPS64() const {
+ return getArch() == Triple::mips64 || getArch() == Triple::mips64el;
+ }
+
+ /// Tests whether the target is MIPS (little and big endian, 32- or 64-bit).
+ bool isMIPS() const {
+ return isMIPS32() || isMIPS64();
+ }
+
+ /// Tests whether the target is PowerPC (32- or 64-bit LE or BE).
+ bool isPPC() const {
+ return getArch() == Triple::ppc || getArch() == Triple::ppc64 ||
+ getArch() == Triple::ppcle || getArch() == Triple::ppc64le;
+ }
+
+ /// Tests whether the target is 32-bit PowerPC (little and big endian).
+ bool isPPC32() const {
+ return getArch() == Triple::ppc || getArch() == Triple::ppcle;
+ }
+
+ /// Tests whether the target is 64-bit PowerPC (little and big endian).
+ bool isPPC64() const {
+ return getArch() == Triple::ppc64 || getArch() == Triple::ppc64le;
+ }
+
+ /// Tests whether the target is RISC-V (32- and 64-bit).
+ bool isRISCV() const {
+ return getArch() == Triple::riscv32 || getArch() == Triple::riscv64;
+ }
+
+ /// Tests whether the target is SystemZ.
+ bool isSystemZ() const {
+ return getArch() == Triple::systemz;
+ }
+
+ /// Tests whether the target is x86 (32- or 64-bit).
+ bool isX86() const {
+ return getArch() == Triple::x86 || getArch() == Triple::x86_64;
+ }
+
+ /// Tests whether the target is VE
+ bool isVE() const {
+ return getArch() == Triple::ve;
+ }
+
+ /// Tests whether the target is wasm (32- and 64-bit).
+ bool isWasm() const {
+ return getArch() == Triple::wasm32 || getArch() == Triple::wasm64;
+ }
+
+ // Tests whether the target is CSKY
+ bool isCSKY() const {
+ return getArch() == Triple::csky;
+ }
+
+ /// Tests whether the target is the Apple "arm64e" AArch64 subarch.
+ bool isArm64e() const {
+ return getArch() == Triple::aarch64 &&
+ getSubArch() == Triple::AArch64SubArch_arm64e;
+ }
+
+ /// Tests whether the target is X32.
+ bool isX32() const {
+ EnvironmentType Env = getEnvironment();
+ return Env == Triple::GNUX32 || Env == Triple::MuslX32;
+ }
+
+ /// Tests whether the target supports comdat
+ bool supportsCOMDAT() const {
+ return !(isOSBinFormatMachO() || isOSBinFormatXCOFF());
+ }
+
+ /// Tests whether the target uses emulated TLS as default.
+ bool hasDefaultEmulatedTLS() const {
+ return isAndroid() || isOSOpenBSD() || isWindowsCygwinEnvironment();
+ }
+
+ /// Tests whether the target uses -data-sections as default.
+ bool hasDefaultDataSections() const {
+ return isOSBinFormatXCOFF() || isWasm();
+ }
+
+ /// Tests if the environment supports dllimport/export annotations.
+ bool hasDLLImportExport() const { return isOSWindows() || isPS4CPU(); }
+
+ /// @}
+ /// @name Mutators
+ /// @{
+
+ /// Set the architecture (first) component of the triple to a known type.
+ void setArch(ArchType Kind, SubArchType SubArch = NoSubArch);
+
+ /// Set the vendor (second) component of the triple to a known type.
+ void setVendor(VendorType Kind);
+
+ /// Set the operating system (third) component of the triple to a known type.
+ void setOS(OSType Kind);
+
+ /// Set the environment (fourth) component of the triple to a known type.
+ void setEnvironment(EnvironmentType Kind);
+
+ /// Set the object file format.
+ void setObjectFormat(ObjectFormatType Kind);
+
+ /// Set all components to the new triple \p Str.
+ void setTriple(const Twine &Str);
+
+ /// Set the architecture (first) component of the triple by name.
+ void setArchName(StringRef Str);
+
+ /// Set the vendor (second) component of the triple by name.
+ void setVendorName(StringRef Str);
+
+ /// Set the operating system (third) component of the triple by name.
+ void setOSName(StringRef Str);
+
+ /// Set the optional environment (fourth) component of the triple by name.
+ void setEnvironmentName(StringRef Str);
+
+ /// Set the operating system and optional environment components with a single
+ /// string.
+ void setOSAndEnvironmentName(StringRef Str);
+
+ /// @}
+ /// @name Helpers to build variants of a particular triple.
+ /// @{
+
+ /// Form a triple with a 32-bit variant of the current architecture.
+ ///
+ /// This can be used to move across "families" of architectures where useful.
+ ///
+ /// \returns A new triple with a 32-bit architecture or an unknown
+ /// architecture if no such variant can be found.
+ llvm::Triple get32BitArchVariant() const;
+
+ /// Form a triple with a 64-bit variant of the current architecture.
+ ///
+ /// This can be used to move across "families" of architectures where useful.
+ ///
+ /// \returns A new triple with a 64-bit architecture or an unknown
+ /// architecture if no such variant can be found.
+ llvm::Triple get64BitArchVariant() const;
+
+ /// Form a triple with a big endian variant of the current architecture.
+ ///
+ /// This can be used to move across "families" of architectures where useful.
+ ///
+ /// \returns A new triple with a big endian architecture or an unknown
+ /// architecture if no such variant can be found.
+ llvm::Triple getBigEndianArchVariant() const;
+
+ /// Form a triple with a little endian variant of the current architecture.
+ ///
+ /// This can be used to move across "families" of architectures where useful.
+ ///
+ /// \returns A new triple with a little endian architecture or an unknown
+ /// architecture if no such variant can be found.
+ llvm::Triple getLittleEndianArchVariant() const;
+
+ /// Get the (LLVM) name of the minimum ARM CPU for the arch we are targeting.
+ ///
+ /// \param Arch the architecture name (e.g., "armv7s"). If it is an empty
+ /// string then the triple's arch name is used.
+ StringRef getARMCPUForArch(StringRef Arch = StringRef()) const;
+
+ /// Tests whether the target triple is little endian.
+ ///
+ /// \returns true if the triple is little endian, false otherwise.
+ bool isLittleEndian() const;
+
+ /// Test whether target triples are compatible.
+ bool isCompatibleWith(const Triple &Other) const;
+
+ /// Merge target triples.
+ std::string merge(const Triple &Other) const;
+
+ /// Some platforms have different minimum supported OS versions that
+ /// varies by the architecture specified in the triple. This function
+ /// returns the minimum supported OS version for this triple if one an exists,
+ /// or an invalid version tuple if this triple doesn't have one.
+ VersionTuple getMinimumSupportedOSVersion() const;
+
+ /// @}
+ /// @name Static helpers for IDs.
+ /// @{
+
+ /// Get the canonical name for the \p Kind architecture.
+ static StringRef getArchTypeName(ArchType Kind);
+
+ /// Get the "prefix" canonical name for the \p Kind architecture. This is the
+ /// prefix used by the architecture specific builtins, and is suitable for
+ /// passing to \see Intrinsic::getIntrinsicForGCCBuiltin().
+ ///
+ /// \return - The architecture prefix, or 0 if none is defined.
+ static StringRef getArchTypePrefix(ArchType Kind);
+
+ /// Get the canonical name for the \p Kind vendor.
+ static StringRef getVendorTypeName(VendorType Kind);
+
+ /// Get the canonical name for the \p Kind operating system.
+ static StringRef getOSTypeName(OSType Kind);
+
+ /// Get the canonical name for the \p Kind environment.
+ static StringRef getEnvironmentTypeName(EnvironmentType Kind);
+
+ /// @}
+ /// @name Static helpers for converting alternate architecture names.
+ /// @{
+
+ /// The canonical type for the given LLVM architecture name (e.g., "x86").
+ static ArchType getArchTypeForLLVMName(StringRef Str);
+
+ /// @}
+
+ /// Returns a canonicalized OS version number for the specified OS.
+ static VersionTuple getCanonicalVersionForOS(OSType OSKind,
+ const VersionTuple &Version);
+};
+
+} // End llvm namespace
+
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/Twine.h b/contrib/libs/llvm14/include/llvm/ADT/Twine.h
new file mode 100644
index 0000000000..743b4a6d8a
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/Twine.h
@@ -0,0 +1,577 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- Twine.h - Fast Temporary String Concatenation ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_TWINE_H
+#define LLVM_ADT_TWINE_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstdint>
+#include <string>
+#if __cplusplus > 201402L
+#include <string_view>
+#endif
+
+namespace llvm {
+
+ class formatv_object_base;
+ class raw_ostream;
+
+ /// Twine - A lightweight data structure for efficiently representing the
+ /// concatenation of temporary values as strings.
+ ///
+ /// A Twine is a kind of rope, it represents a concatenated string using a
+ /// binary-tree, where the string is the preorder of the nodes. Since the
+ /// Twine can be efficiently rendered into a buffer when its result is used,
+ /// it avoids the cost of generating temporary values for intermediate string
+ /// results -- particularly in cases when the Twine result is never
+ /// required. By explicitly tracking the type of leaf nodes, we can also avoid
+ /// the creation of temporary strings for conversions operations (such as
+ /// appending an integer to a string).
+ ///
+ /// A Twine is not intended for use directly and should not be stored, its
+ /// implementation relies on the ability to store pointers to temporary stack
+ /// objects which may be deallocated at the end of a statement. Twines should
+ /// only be used accepted as const references in arguments, when an API wishes
+ /// to accept possibly-concatenated strings.
+ ///
+ /// Twines support a special 'null' value, which always concatenates to form
+ /// itself, and renders as an empty string. This can be returned from APIs to
+ /// effectively nullify any concatenations performed on the result.
+ ///
+ /// \b Implementation
+ ///
+ /// Given the nature of a Twine, it is not possible for the Twine's
+ /// concatenation method to construct interior nodes; the result must be
+ /// represented inside the returned value. For this reason a Twine object
+ /// actually holds two values, the left- and right-hand sides of a
+ /// concatenation. We also have nullary Twine objects, which are effectively
+ /// sentinel values that represent empty strings.
+ ///
+ /// Thus, a Twine can effectively have zero, one, or two children. The \see
+ /// isNullary(), \see isUnary(), and \see isBinary() predicates exist for
+ /// testing the number of children.
+ ///
+ /// We maintain a number of invariants on Twine objects (FIXME: Why):
+ /// - Nullary twines are always represented with their Kind on the left-hand
+ /// side, and the Empty kind on the right-hand side.
+ /// - Unary twines are always represented with the value on the left-hand
+ /// side, and the Empty kind on the right-hand side.
+ /// - If a Twine has another Twine as a child, that child should always be
+ /// binary (otherwise it could have been folded into the parent).
+ ///
+ /// These invariants are check by \see isValid().
+ ///
+ /// \b Efficiency Considerations
+ ///
+ /// The Twine is designed to yield efficient and small code for common
+ /// situations. For this reason, the concat() method is inlined so that
+ /// concatenations of leaf nodes can be optimized into stores directly into a
+ /// single stack allocated object.
+ ///
+ /// In practice, not all compilers can be trusted to optimize concat() fully,
+ /// so we provide two additional methods (and accompanying operator+
+ /// overloads) to guarantee that particularly important cases (cstring plus
+ /// StringRef) codegen as desired.
+ class Twine {
+ /// NodeKind - Represent the type of an argument.
+ enum NodeKind : unsigned char {
+ /// An empty string; the result of concatenating anything with it is also
+ /// empty.
+ NullKind,
+
+ /// The empty string.
+ EmptyKind,
+
+ /// A pointer to a Twine instance.
+ TwineKind,
+
+ /// A pointer to a C string instance.
+ CStringKind,
+
+ /// A pointer to an std::string instance.
+ StdStringKind,
+
+ /// A Pointer and Length representation. Used for std::string_view,
+ /// StringRef, and SmallString. Can't use a StringRef here
+ /// because they are not trivally constructible.
+ PtrAndLengthKind,
+
+ /// A pointer to a formatv_object_base instance.
+ FormatvObjectKind,
+
+ /// A char value, to render as a character.
+ CharKind,
+
+ /// An unsigned int value, to render as an unsigned decimal integer.
+ DecUIKind,
+
+ /// An int value, to render as a signed decimal integer.
+ DecIKind,
+
+ /// A pointer to an unsigned long value, to render as an unsigned decimal
+ /// integer.
+ DecULKind,
+
+ /// A pointer to a long value, to render as a signed decimal integer.
+ DecLKind,
+
+ /// A pointer to an unsigned long long value, to render as an unsigned
+ /// decimal integer.
+ DecULLKind,
+
+ /// A pointer to a long long value, to render as a signed decimal integer.
+ DecLLKind,
+
+ /// A pointer to a uint64_t value, to render as an unsigned hexadecimal
+ /// integer.
+ UHexKind
+ };
+
+ union Child
+ {
+ const Twine *twine;
+ const char *cString;
+ const std::string *stdString;
+ struct {
+ const char *ptr;
+ size_t length;
+ } ptrAndLength;
+ const formatv_object_base *formatvObject;
+ char character;
+ unsigned int decUI;
+ int decI;
+ const unsigned long *decUL;
+ const long *decL;
+ const unsigned long long *decULL;
+ const long long *decLL;
+ const uint64_t *uHex;
+ };
+
+ /// LHS - The prefix in the concatenation, which may be uninitialized for
+ /// Null or Empty kinds.
+ Child LHS;
+
+ /// RHS - The suffix in the concatenation, which may be uninitialized for
+ /// Null or Empty kinds.
+ Child RHS;
+
+ /// LHSKind - The NodeKind of the left hand side, \see getLHSKind().
+ NodeKind LHSKind = EmptyKind;
+
+ /// RHSKind - The NodeKind of the right hand side, \see getRHSKind().
+ NodeKind RHSKind = EmptyKind;
+
+ /// Construct a nullary twine; the kind must be NullKind or EmptyKind.
+ explicit Twine(NodeKind Kind) : LHSKind(Kind) {
+ assert(isNullary() && "Invalid kind!");
+ }
+
+ /// Construct a binary twine.
+ explicit Twine(const Twine &LHS, const Twine &RHS)
+ : LHSKind(TwineKind), RHSKind(TwineKind) {
+ this->LHS.twine = &LHS;
+ this->RHS.twine = &RHS;
+ assert(isValid() && "Invalid twine!");
+ }
+
+ /// Construct a twine from explicit values.
+ explicit Twine(Child LHS, NodeKind LHSKind, Child RHS, NodeKind RHSKind)
+ : LHS(LHS), RHS(RHS), LHSKind(LHSKind), RHSKind(RHSKind) {
+ assert(isValid() && "Invalid twine!");
+ }
+
+ /// Check for the null twine.
+ bool isNull() const {
+ return getLHSKind() == NullKind;
+ }
+
+ /// Check for the empty twine.
+ bool isEmpty() const {
+ return getLHSKind() == EmptyKind;
+ }
+
+ /// Check if this is a nullary twine (null or empty).
+ bool isNullary() const {
+ return isNull() || isEmpty();
+ }
+
+ /// Check if this is a unary twine.
+ bool isUnary() const {
+ return getRHSKind() == EmptyKind && !isNullary();
+ }
+
+ /// Check if this is a binary twine.
+ bool isBinary() const {
+ return getLHSKind() != NullKind && getRHSKind() != EmptyKind;
+ }
+
+ /// Check if this is a valid twine (satisfying the invariants on
+ /// order and number of arguments).
+ bool isValid() const {
+ // Nullary twines always have Empty on the RHS.
+ if (isNullary() && getRHSKind() != EmptyKind)
+ return false;
+
+ // Null should never appear on the RHS.
+ if (getRHSKind() == NullKind)
+ return false;
+
+ // The RHS cannot be non-empty if the LHS is empty.
+ if (getRHSKind() != EmptyKind && getLHSKind() == EmptyKind)
+ return false;
+
+ // A twine child should always be binary.
+ if (getLHSKind() == TwineKind &&
+ !LHS.twine->isBinary())
+ return false;
+ if (getRHSKind() == TwineKind &&
+ !RHS.twine->isBinary())
+ return false;
+
+ return true;
+ }
+
+ /// Get the NodeKind of the left-hand side.
+ NodeKind getLHSKind() const { return LHSKind; }
+
+ /// Get the NodeKind of the right-hand side.
+ NodeKind getRHSKind() const { return RHSKind; }
+
+ /// Print one child from a twine.
+ void printOneChild(raw_ostream &OS, Child Ptr, NodeKind Kind) const;
+
+ /// Print the representation of one child from a twine.
+ void printOneChildRepr(raw_ostream &OS, Child Ptr,
+ NodeKind Kind) const;
+
+ public:
+ /// @name Constructors
+ /// @{
+
+ /// Construct from an empty string.
+ /*implicit*/ Twine() {
+ assert(isValid() && "Invalid twine!");
+ }
+
+ Twine(const Twine &) = default;
+
+ /// Construct from a C string.
+ ///
+ /// We take care here to optimize "" into the empty twine -- this will be
+ /// optimized out for string constants. This allows Twine arguments have
+ /// default "" values, without introducing unnecessary string constants.
+ /*implicit*/ Twine(const char *Str) {
+ if (Str[0] != '\0') {
+ LHS.cString = Str;
+ LHSKind = CStringKind;
+ } else
+ LHSKind = EmptyKind;
+
+ assert(isValid() && "Invalid twine!");
+ }
+ /// Delete the implicit conversion from nullptr as Twine(const char *)
+ /// cannot take nullptr.
+ /*implicit*/ Twine(std::nullptr_t) = delete;
+
+ /// Construct from an std::string.
+ /*implicit*/ Twine(const std::string &Str) : LHSKind(StdStringKind) {
+ LHS.stdString = &Str;
+ assert(isValid() && "Invalid twine!");
+ }
+
+#if __cplusplus > 201402L
+ /// Construct from an std::string_view by converting it to a pointer and
+ /// length. This handles string_views on a pure API basis, and avoids
+ /// storing one (or a pointer to one) inside a Twine, which avoids problems
+ /// when mixing code compiled under various C++ standards.
+ /*implicit*/ Twine(const std::string_view &Str)
+ : LHSKind(PtrAndLengthKind) {
+ LHS.ptrAndLength.ptr = Str.data();
+ LHS.ptrAndLength.length = Str.length();
+ assert(isValid() && "Invalid twine!");
+ }
+#endif
+
+ /// Construct from a StringRef.
+ /*implicit*/ Twine(const StringRef &Str) : LHSKind(PtrAndLengthKind) {
+ LHS.ptrAndLength.ptr = Str.data();
+ LHS.ptrAndLength.length = Str.size();
+ assert(isValid() && "Invalid twine!");
+ }
+
+ /// Construct from a SmallString.
+ /*implicit*/ Twine(const SmallVectorImpl<char> &Str)
+ : LHSKind(PtrAndLengthKind) {
+ LHS.ptrAndLength.ptr = Str.data();
+ LHS.ptrAndLength.length = Str.size();
+ assert(isValid() && "Invalid twine!");
+ }
+
+ /// Construct from a formatv_object_base.
+ /*implicit*/ Twine(const formatv_object_base &Fmt)
+ : LHSKind(FormatvObjectKind) {
+ LHS.formatvObject = &Fmt;
+ assert(isValid() && "Invalid twine!");
+ }
+
+ /// Construct from a char.
+ explicit Twine(char Val) : LHSKind(CharKind) {
+ LHS.character = Val;
+ }
+
+ /// Construct from a signed char.
+ explicit Twine(signed char Val) : LHSKind(CharKind) {
+ LHS.character = static_cast<char>(Val);
+ }
+
+ /// Construct from an unsigned char.
+ explicit Twine(unsigned char Val) : LHSKind(CharKind) {
+ LHS.character = static_cast<char>(Val);
+ }
+
+ /// Construct a twine to print \p Val as an unsigned decimal integer.
+ explicit Twine(unsigned Val) : LHSKind(DecUIKind) {
+ LHS.decUI = Val;
+ }
+
+ /// Construct a twine to print \p Val as a signed decimal integer.
+ explicit Twine(int Val) : LHSKind(DecIKind) {
+ LHS.decI = Val;
+ }
+
+ /// Construct a twine to print \p Val as an unsigned decimal integer.
+ explicit Twine(const unsigned long &Val) : LHSKind(DecULKind) {
+ LHS.decUL = &Val;
+ }
+
+ /// Construct a twine to print \p Val as a signed decimal integer.
+ explicit Twine(const long &Val) : LHSKind(DecLKind) {
+ LHS.decL = &Val;
+ }
+
+ /// Construct a twine to print \p Val as an unsigned decimal integer.
+ explicit Twine(const unsigned long long &Val) : LHSKind(DecULLKind) {
+ LHS.decULL = &Val;
+ }
+
+ /// Construct a twine to print \p Val as a signed decimal integer.
+ explicit Twine(const long long &Val) : LHSKind(DecLLKind) {
+ LHS.decLL = &Val;
+ }
+
+ // FIXME: Unfortunately, to make sure this is as efficient as possible we
+ // need extra binary constructors from particular types. We can't rely on
+ // the compiler to be smart enough to fold operator+()/concat() down to the
+ // right thing. Yet.
+
+ /// Construct as the concatenation of a C string and a StringRef.
+ /*implicit*/ Twine(const char *LHS, const StringRef &RHS)
+ : LHSKind(CStringKind), RHSKind(PtrAndLengthKind) {
+ this->LHS.cString = LHS;
+ this->RHS.ptrAndLength.ptr = RHS.data();
+ this->RHS.ptrAndLength.length = RHS.size();
+ assert(isValid() && "Invalid twine!");
+ }
+
+ /// Construct as the concatenation of a StringRef and a C string.
+ /*implicit*/ Twine(const StringRef &LHS, const char *RHS)
+ : LHSKind(PtrAndLengthKind), RHSKind(CStringKind) {
+ this->LHS.ptrAndLength.ptr = LHS.data();
+ this->LHS.ptrAndLength.length = LHS.size();
+ this->RHS.cString = RHS;
+ assert(isValid() && "Invalid twine!");
+ }
+
+ /// Since the intended use of twines is as temporary objects, assignments
+ /// when concatenating might cause undefined behavior or stack corruptions
+ Twine &operator=(const Twine &) = delete;
+
+ /// Create a 'null' string, which is an empty string that always
+ /// concatenates to form another empty string.
+ static Twine createNull() {
+ return Twine(NullKind);
+ }
+
+ /// @}
+ /// @name Numeric Conversions
+ /// @{
+
+ // Construct a twine to print \p Val as an unsigned hexadecimal integer.
+ static Twine utohexstr(const uint64_t &Val) {
+ Child LHS, RHS;
+ LHS.uHex = &Val;
+ RHS.twine = nullptr;
+ return Twine(LHS, UHexKind, RHS, EmptyKind);
+ }
+
+ /// @}
+ /// @name Predicate Operations
+ /// @{
+
+ /// Check if this twine is trivially empty; a false return value does not
+ /// necessarily mean the twine is empty.
+ bool isTriviallyEmpty() const {
+ return isNullary();
+ }
+
+ /// Return true if this twine can be dynamically accessed as a single
+ /// StringRef value with getSingleStringRef().
+ bool isSingleStringRef() const {
+ if (getRHSKind() != EmptyKind) return false;
+
+ switch (getLHSKind()) {
+ case EmptyKind:
+ case CStringKind:
+ case StdStringKind:
+ case PtrAndLengthKind:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /// @}
+ /// @name String Operations
+ /// @{
+
+ Twine concat(const Twine &Suffix) const;
+
+ /// @}
+ /// @name Output & Conversion.
+ /// @{
+
+ /// Return the twine contents as a std::string.
+ std::string str() const;
+
+ /// Append the concatenated string into the given SmallString or SmallVector.
+ void toVector(SmallVectorImpl<char> &Out) const;
+
+ /// This returns the twine as a single StringRef. This method is only valid
+ /// if isSingleStringRef() is true.
+ StringRef getSingleStringRef() const {
+ assert(isSingleStringRef() &&"This cannot be had as a single stringref!");
+ switch (getLHSKind()) {
+ default: llvm_unreachable("Out of sync with isSingleStringRef");
+ case EmptyKind:
+ return StringRef();
+ case CStringKind:
+ return StringRef(LHS.cString);
+ case StdStringKind:
+ return StringRef(*LHS.stdString);
+ case PtrAndLengthKind:
+ return StringRef(LHS.ptrAndLength.ptr, LHS.ptrAndLength.length);
+ }
+ }
+
+ /// This returns the twine as a single StringRef if it can be
+ /// represented as such. Otherwise the twine is written into the given
+ /// SmallVector and a StringRef to the SmallVector's data is returned.
+ StringRef toStringRef(SmallVectorImpl<char> &Out) const {
+ if (isSingleStringRef())
+ return getSingleStringRef();
+ toVector(Out);
+ return StringRef(Out.data(), Out.size());
+ }
+
+ /// This returns the twine as a single null terminated StringRef if it
+ /// can be represented as such. Otherwise the twine is written into the
+ /// given SmallVector and a StringRef to the SmallVector's data is returned.
+ ///
+ /// The returned StringRef's size does not include the null terminator.
+ StringRef toNullTerminatedStringRef(SmallVectorImpl<char> &Out) const;
+
+ /// Write the concatenated string represented by this twine to the
+ /// stream \p OS.
+ void print(raw_ostream &OS) const;
+
+ /// Dump the concatenated string represented by this twine to stderr.
+ void dump() const;
+
+ /// Write the representation of this twine to the stream \p OS.
+ void printRepr(raw_ostream &OS) const;
+
+ /// Dump the representation of this twine to stderr.
+ void dumpRepr() const;
+
+ /// @}
+ };
+
+ /// @name Twine Inline Implementations
+ /// @{
+
+ inline Twine Twine::concat(const Twine &Suffix) const {
+ // Concatenation with null is null.
+ if (isNull() || Suffix.isNull())
+ return Twine(NullKind);
+
+ // Concatenation with empty yields the other side.
+ if (isEmpty())
+ return Suffix;
+ if (Suffix.isEmpty())
+ return *this;
+
+ // Otherwise we need to create a new node, taking care to fold in unary
+ // twines.
+ Child NewLHS, NewRHS;
+ NewLHS.twine = this;
+ NewRHS.twine = &Suffix;
+ NodeKind NewLHSKind = TwineKind, NewRHSKind = TwineKind;
+ if (isUnary()) {
+ NewLHS = LHS;
+ NewLHSKind = getLHSKind();
+ }
+ if (Suffix.isUnary()) {
+ NewRHS = Suffix.LHS;
+ NewRHSKind = Suffix.getLHSKind();
+ }
+
+ return Twine(NewLHS, NewLHSKind, NewRHS, NewRHSKind);
+ }
+
+ inline Twine operator+(const Twine &LHS, const Twine &RHS) {
+ return LHS.concat(RHS);
+ }
+
+ /// Additional overload to guarantee simplified codegen; this is equivalent to
+ /// concat().
+
+ inline Twine operator+(const char *LHS, const StringRef &RHS) {
+ return Twine(LHS, RHS);
+ }
+
+ /// Additional overload to guarantee simplified codegen; this is equivalent to
+ /// concat().
+
+ inline Twine operator+(const StringRef &LHS, const char *RHS) {
+ return Twine(LHS, RHS);
+ }
+
+ inline raw_ostream &operator<<(raw_ostream &OS, const Twine &RHS) {
+ RHS.print(OS);
+ return OS;
+ }
+
+ /// @}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_TWINE_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/UniqueVector.h b/contrib/libs/llvm14/include/llvm/ADT/UniqueVector.h
new file mode 100644
index 0000000000..091169be07
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/UniqueVector.h
@@ -0,0 +1,112 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/UniqueVector.h ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_UNIQUEVECTOR_H
+#define LLVM_ADT_UNIQUEVECTOR_H
+
+#include <cassert>
+#include <cstddef>
+#include <map>
+#include <vector>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+/// UniqueVector - This class produces a sequential ID number (base 1) for each
+/// unique entry that is added. T is the type of entries in the vector. This
+/// class should have an implementation of operator== and of operator<.
+/// Entries can be fetched using operator[] with the entry ID.
+template<class T> class UniqueVector {
+public:
+ using VectorType = typename std::vector<T>;
+ using iterator = typename VectorType::iterator;
+ using const_iterator = typename VectorType::const_iterator;
+
+private:
+ // Map - Used to handle the correspondence of entry to ID.
+ std::map<T, unsigned> Map;
+
+ // Vector - ID ordered vector of entries. Entries can be indexed by ID - 1.
+ VectorType Vector;
+
+public:
+ /// insert - Append entry to the vector if it doesn't already exist. Returns
+ /// the entry's index + 1 to be used as a unique ID.
+ unsigned insert(const T &Entry) {
+ // Check if the entry is already in the map.
+ unsigned &Val = Map[Entry];
+
+ // See if entry exists, if so return prior ID.
+ if (Val) return Val;
+
+ // Compute ID for entry.
+ Val = static_cast<unsigned>(Vector.size()) + 1;
+
+ // Insert in vector.
+ Vector.push_back(Entry);
+ return Val;
+ }
+
+ /// idFor - return the ID for an existing entry. Returns 0 if the entry is
+ /// not found.
+ unsigned idFor(const T &Entry) const {
+ // Search for entry in the map.
+ typename std::map<T, unsigned>::const_iterator MI = Map.find(Entry);
+
+ // See if entry exists, if so return ID.
+ if (MI != Map.end()) return MI->second;
+
+ // No luck.
+ return 0;
+ }
+
+ /// operator[] - Returns a reference to the entry with the specified ID.
+ const T &operator[](unsigned ID) const {
+ assert(ID-1 < size() && "ID is 0 or out of range!");
+ return Vector[ID - 1];
+ }
+
+ /// Return an iterator to the start of the vector.
+ iterator begin() { return Vector.begin(); }
+
+ /// Return an iterator to the start of the vector.
+ const_iterator begin() const { return Vector.begin(); }
+
+ /// Return an iterator to the end of the vector.
+ iterator end() { return Vector.end(); }
+
+ /// Return an iterator to the end of the vector.
+ const_iterator end() const { return Vector.end(); }
+
+ /// size - Returns the number of entries in the vector.
+ size_t size() const { return Vector.size(); }
+
+ /// empty - Returns true if the vector is empty.
+ bool empty() const { return Vector.empty(); }
+
+ /// reset - Clears all the entries.
+ void reset() {
+ Map.clear();
+ Vector.resize(0, 0);
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_UNIQUEVECTOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/bit.h b/contrib/libs/llvm14/include/llvm/ADT/bit.h
new file mode 100644
index 0000000000..9831531312
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/bit.h
@@ -0,0 +1,75 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- llvm/ADT/bit.h - C++20 <bit> ----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the C++20 <bit> header.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_BIT_H
+#define LLVM_ADT_BIT_H
+
+#include "llvm/Support/Compiler.h"
+#include <cstring>
+#include <type_traits>
+
+namespace llvm {
+
+// This implementation of bit_cast is different from the C++17 one in two ways:
+// - It isn't constexpr because that requires compiler support.
+// - It requires trivially-constructible To, to avoid UB in the implementation.
+template <
+ typename To, typename From,
+ typename = std::enable_if_t<sizeof(To) == sizeof(From)>
+#if (__has_feature(is_trivially_constructible) && defined(_LIBCPP_VERSION)) || \
+ (defined(__GNUC__) && __GNUC__ >= 5)
+ ,
+ typename = std::enable_if_t<std::is_trivially_constructible<To>::value>
+#elif __has_feature(is_trivially_constructible)
+ ,
+ typename = std::enable_if_t<__is_trivially_constructible(To)>
+#else
+ // See comment below.
+#endif
+#if (__has_feature(is_trivially_copyable) && defined(_LIBCPP_VERSION)) || \
+ (defined(__GNUC__) && __GNUC__ >= 5)
+ ,
+ typename = std::enable_if_t<std::is_trivially_copyable<To>::value>,
+ typename = std::enable_if_t<std::is_trivially_copyable<From>::value>
+#elif __has_feature(is_trivially_copyable)
+ ,
+ typename = std::enable_if_t<__is_trivially_copyable(To)>,
+ typename = std::enable_if_t<__is_trivially_copyable(From)>
+#else
+// This case is GCC 4.x. clang with libc++ or libstdc++ never get here. Unlike
+// llvm/Support/type_traits.h's is_trivially_copyable we don't want to
+// provide a good-enough answer here: developers in that configuration will hit
+// compilation failures on the bots instead of locally. That's acceptable
+// because it's very few developers, and only until we move past C++11.
+#endif
+ >
+inline To bit_cast(const From &from) noexcept {
+ To to;
+ std::memcpy(&to, &from, sizeof(To));
+ return to;
+}
+
+} // namespace llvm
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/edit_distance.h b/contrib/libs/llvm14/include/llvm/ADT/edit_distance.h
new file mode 100644
index 0000000000..3edbcf6583
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/edit_distance.h
@@ -0,0 +1,114 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- llvm/ADT/edit_distance.h - Array edit distance function --- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines a Levenshtein distance function that works for any two
+/// sequences, with each element of each sequence being analogous to a character
+/// in a string.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_EDIT_DISTANCE_H
+#define LLVM_ADT_EDIT_DISTANCE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include <algorithm>
+#include <memory>
+
+namespace llvm {
+
+/// Determine the edit distance between two sequences.
+///
+/// \param FromArray the first sequence to compare.
+///
+/// \param ToArray the second sequence to compare.
+///
+/// \param AllowReplacements whether to allow element replacements (change one
+/// element into another) as a single operation, rather than as two operations
+/// (an insertion and a removal).
+///
+/// \param MaxEditDistance If non-zero, the maximum edit distance that this
+/// routine is allowed to compute. If the edit distance will exceed that
+/// maximum, returns \c MaxEditDistance+1.
+///
+/// \returns the minimum number of element insertions, removals, or (if
+/// \p AllowReplacements is \c true) replacements needed to transform one of
+/// the given sequences into the other. If zero, the sequences are identical.
+template<typename T>
+unsigned ComputeEditDistance(ArrayRef<T> FromArray, ArrayRef<T> ToArray,
+ bool AllowReplacements = true,
+ unsigned MaxEditDistance = 0) {
+ // The algorithm implemented below is the "classic"
+ // dynamic-programming algorithm for computing the Levenshtein
+ // distance, which is described here:
+ //
+ // http://en.wikipedia.org/wiki/Levenshtein_distance
+ //
+ // Although the algorithm is typically described using an m x n
+ // array, only one row plus one element are used at a time, so this
+ // implementation just keeps one vector for the row. To update one entry,
+ // only the entries to the left, top, and top-left are needed. The left
+ // entry is in Row[x-1], the top entry is what's in Row[x] from the last
+ // iteration, and the top-left entry is stored in Previous.
+ typename ArrayRef<T>::size_type m = FromArray.size();
+ typename ArrayRef<T>::size_type n = ToArray.size();
+
+ const unsigned SmallBufferSize = 64;
+ unsigned SmallBuffer[SmallBufferSize];
+ std::unique_ptr<unsigned[]> Allocated;
+ unsigned *Row = SmallBuffer;
+ if (n + 1 > SmallBufferSize) {
+ Row = new unsigned[n + 1];
+ Allocated.reset(Row);
+ }
+
+ for (unsigned i = 1; i <= n; ++i)
+ Row[i] = i;
+
+ for (typename ArrayRef<T>::size_type y = 1; y <= m; ++y) {
+ Row[0] = y;
+ unsigned BestThisRow = Row[0];
+
+ unsigned Previous = y - 1;
+ for (typename ArrayRef<T>::size_type x = 1; x <= n; ++x) {
+ int OldRow = Row[x];
+ if (AllowReplacements) {
+ Row[x] = std::min(
+ Previous + (FromArray[y-1] == ToArray[x-1] ? 0u : 1u),
+ std::min(Row[x-1], Row[x])+1);
+ }
+ else {
+ if (FromArray[y-1] == ToArray[x-1]) Row[x] = Previous;
+ else Row[x] = std::min(Row[x-1], Row[x]) + 1;
+ }
+ Previous = OldRow;
+ BestThisRow = std::min(BestThisRow, Row[x]);
+ }
+
+ if (MaxEditDistance && BestThisRow > MaxEditDistance)
+ return MaxEditDistance + 1;
+ }
+
+ unsigned Result = Row[n];
+ return Result;
+}
+
+} // End llvm namespace
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/fallible_iterator.h b/contrib/libs/llvm14/include/llvm/ADT/fallible_iterator.h
new file mode 100644
index 0000000000..b016701789
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/fallible_iterator.h
@@ -0,0 +1,252 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===--- fallible_iterator.h - Wrapper for fallible iterators ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_FALLIBLE_ITERATOR_H
+#define LLVM_ADT_FALLIBLE_ITERATOR_H
+
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/Error.h"
+
+#include <type_traits>
+
+namespace llvm {
+
+/// A wrapper class for fallible iterators.
+///
+/// The fallible_iterator template wraps an underlying iterator-like class
+/// whose increment and decrement operations are replaced with fallible versions
+/// like:
+///
+/// @code{.cpp}
+/// Error inc();
+/// Error dec();
+/// @endcode
+///
+/// It produces an interface that is (mostly) compatible with a traditional
+/// c++ iterator, including ++ and -- operators that do not fail.
+///
+/// Instances of the wrapper are constructed with an instance of the
+/// underlying iterator and (for non-end iterators) a reference to an Error
+/// instance. If the underlying increment/decrement operations fail, the Error
+/// is returned via this reference, and the resulting iterator value set to an
+/// end-of-range sentinel value. This enables the following loop idiom:
+///
+/// @code{.cpp}
+/// class Archive { // E.g. Potentially malformed on-disk archive
+/// public:
+/// fallible_iterator<ArchiveChildItr> children_begin(Error &Err);
+/// fallible_iterator<ArchiveChildItr> children_end();
+/// iterator_range<fallible_iterator<ArchiveChildItr>>
+/// children(Error &Err) {
+/// return make_range(children_begin(Err), children_end());
+/// //...
+/// };
+///
+/// void walk(Archive &A) {
+/// Error Err = Error::success();
+/// for (auto &C : A.children(Err)) {
+/// // Loop body only entered when increment succeeds.
+/// }
+/// if (Err) {
+/// // handle error.
+/// }
+/// }
+/// @endcode
+///
+/// The wrapper marks the referenced Error as unchecked after each increment
+/// and/or decrement operation, and clears the unchecked flag when a non-end
+/// value is compared against end (since, by the increment invariant, not being
+/// an end value proves that there was no error, and is equivalent to checking
+/// that the Error is success). This allows early exits from the loop body
+/// without requiring redundant error checks.
+template <typename Underlying> class fallible_iterator {
+private:
+ template <typename T>
+ using enable_if_struct_deref_supported = std::enable_if<
+ !std::is_void<decltype(std::declval<T>().operator->())>::value,
+ decltype(std::declval<T>().operator->())>;
+
+public:
+ /// Construct a fallible iterator that *cannot* be used as an end-of-range
+ /// value.
+ ///
+ /// A value created by this method can be dereferenced, incremented,
+ /// decremented and compared, providing the underlying type supports it.
+ ///
+ /// The error that is passed in will be initially marked as checked, so if the
+ /// iterator is not used at all the Error need not be checked.
+ static fallible_iterator itr(Underlying I, Error &Err) {
+ (void)!!Err;
+ return fallible_iterator(std::move(I), &Err);
+ }
+
+ /// Construct a fallible iterator that can be used as an end-of-range value.
+ ///
+ /// A value created by this method can be dereferenced (if the underlying
+ /// value points at a valid value) and compared, but not incremented or
+ /// decremented.
+ static fallible_iterator end(Underlying I) {
+ return fallible_iterator(std::move(I), nullptr);
+ }
+
+ /// Forward dereference to the underlying iterator.
+ decltype(auto) operator*() { return *I; }
+
+ /// Forward const dereference to the underlying iterator.
+ decltype(auto) operator*() const { return *I; }
+
+ /// Forward structure dereference to the underlying iterator (if the
+ /// underlying iterator supports it).
+ template <typename T = Underlying>
+ typename enable_if_struct_deref_supported<T>::type operator->() {
+ return I.operator->();
+ }
+
+ /// Forward const structure dereference to the underlying iterator (if the
+ /// underlying iterator supports it).
+ template <typename T = Underlying>
+ typename enable_if_struct_deref_supported<const T>::type operator->() const {
+ return I.operator->();
+ }
+
+ /// Increment the fallible iterator.
+ ///
+ /// If the underlying 'inc' operation fails, this will set the Error value
+ /// and update this iterator value to point to end-of-range.
+ ///
+ /// The Error value is marked as needing checking, regardless of whether the
+ /// 'inc' operation succeeds or fails.
+ fallible_iterator &operator++() {
+ assert(getErrPtr() && "Cannot increment end iterator");
+ if (auto Err = I.inc())
+ handleError(std::move(Err));
+ else
+ resetCheckedFlag();
+ return *this;
+ }
+
+ /// Decrement the fallible iterator.
+ ///
+ /// If the underlying 'dec' operation fails, this will set the Error value
+ /// and update this iterator value to point to end-of-range.
+ ///
+ /// The Error value is marked as needing checking, regardless of whether the
+ /// 'dec' operation succeeds or fails.
+ fallible_iterator &operator--() {
+ assert(getErrPtr() && "Cannot decrement end iterator");
+ if (auto Err = I.dec())
+ handleError(std::move(Err));
+ else
+ resetCheckedFlag();
+ return *this;
+ }
+
+ /// Compare fallible iterators for equality.
+ ///
+ /// Returns true if both LHS and RHS are end-of-range values, or if both are
+ /// non-end-of-range values whose underlying iterator values compare equal.
+ ///
+ /// If this is a comparison between an end-of-range iterator and a
+ /// non-end-of-range iterator, then the Error (referenced by the
+ /// non-end-of-range value) is marked as checked: Since all
+ /// increment/decrement operations result in an end-of-range value, comparing
+ /// false against end-of-range is equivalent to checking that the Error value
+ /// is success. This flag management enables early returns from loop bodies
+ /// without redundant Error checks.
+ friend bool operator==(const fallible_iterator &LHS,
+ const fallible_iterator &RHS) {
+ // If both iterators are in the end state they compare
+ // equal, regardless of whether either is valid.
+ if (LHS.isEnd() && RHS.isEnd())
+ return true;
+
+ assert(LHS.isValid() && RHS.isValid() &&
+ "Invalid iterators can only be compared against end");
+
+ bool Equal = LHS.I == RHS.I;
+
+ // If the iterators differ and this is a comparison against end then mark
+ // the Error as checked.
+ if (!Equal) {
+ if (LHS.isEnd())
+ (void)!!*RHS.getErrPtr();
+ else
+ (void)!!*LHS.getErrPtr();
+ }
+
+ return Equal;
+ }
+
+ /// Compare fallible iterators for inequality.
+ ///
+ /// See notes for operator==.
+ friend bool operator!=(const fallible_iterator &LHS,
+ const fallible_iterator &RHS) {
+ return !(LHS == RHS);
+ }
+
+private:
+ fallible_iterator(Underlying I, Error *Err)
+ : I(std::move(I)), ErrState(Err, false) {}
+
+ Error *getErrPtr() const { return ErrState.getPointer(); }
+
+ bool isEnd() const { return getErrPtr() == nullptr; }
+
+ bool isValid() const { return !ErrState.getInt(); }
+
+ void handleError(Error Err) {
+ *getErrPtr() = std::move(Err);
+ ErrState.setPointer(nullptr);
+ ErrState.setInt(true);
+ }
+
+ void resetCheckedFlag() {
+ *getErrPtr() = Error::success();
+ }
+
+ Underlying I;
+ mutable PointerIntPair<Error *, 1> ErrState;
+};
+
+/// Convenience wrapper to make a fallible_iterator value from an instance
+/// of an underlying iterator and an Error reference.
+template <typename Underlying>
+fallible_iterator<Underlying> make_fallible_itr(Underlying I, Error &Err) {
+ return fallible_iterator<Underlying>::itr(std::move(I), Err);
+}
+
+/// Convenience wrapper to make a fallible_iterator end value from an instance
+/// of an underlying iterator.
+template <typename Underlying>
+fallible_iterator<Underlying> make_fallible_end(Underlying E) {
+ return fallible_iterator<Underlying>::end(std::move(E));
+}
+
+template <typename Underlying>
+iterator_range<fallible_iterator<Underlying>>
+make_fallible_range(Underlying I, Underlying E, Error &Err) {
+ return make_range(make_fallible_itr(std::move(I), Err),
+ make_fallible_end(std::move(E)));
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_FALLIBLE_ITERATOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/identity.h b/contrib/libs/llvm14/include/llvm/ADT/identity.h
new file mode 100644
index 0000000000..498374edb2
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/identity.h
@@ -0,0 +1,45 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/Identity.h - Provide std::identity from C++20 ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides an implementation of std::identity from C++20.
+//
+// No library is required when using these functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_IDENTITY_H
+#define LLVM_ADT_IDENTITY_H
+
+
+namespace llvm {
+
+template <class Ty> struct identity {
+ using argument_type = Ty;
+
+ Ty &operator()(Ty &self) const {
+ return self;
+ }
+ const Ty &operator()(const Ty &self) const {
+ return self;
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_IDENTITY_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/ilist.h b/contrib/libs/llvm14/include/llvm/ADT/ilist.h
new file mode 100644
index 0000000000..ff61d717f9
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/ilist.h
@@ -0,0 +1,433 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//==-- llvm/ADT/ilist.h - Intrusive Linked List Template ---------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines classes to implement an intrusive doubly linked list class
+/// (i.e. each node of the list must contain a next and previous field for the
+/// list.
+///
+/// The ilist class itself should be a plug in replacement for list. This list
+/// replacement does not provide a constant time size() method, so be careful to
+/// use empty() when you really want to know if it's empty.
+///
+/// The ilist class is implemented as a circular list. The list itself contains
+/// a sentinel node, whose Next points at begin() and whose Prev points at
+/// rbegin(). The sentinel node itself serves as end() and rend().
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_H
+#define LLVM_ADT_ILIST_H
+
+#include "llvm/ADT/simple_ilist.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+
+namespace llvm {
+
+/// Use delete by default for iplist and ilist.
+///
+/// Specialize this to get different behaviour for ownership-related API. (If
+/// you really want ownership semantics, consider using std::list or building
+/// something like \a BumpPtrList.)
+///
+/// \see ilist_noalloc_traits
+template <typename NodeTy> struct ilist_alloc_traits {
+ static void deleteNode(NodeTy *V) { delete V; }
+};
+
+/// Custom traits to do nothing on deletion.
+///
+/// Specialize ilist_alloc_traits to inherit from this to disable the
+/// non-intrusive deletion in iplist (which implies ownership).
+///
+/// If you want purely intrusive semantics with no callbacks, consider using \a
+/// simple_ilist instead.
+///
+/// \code
+/// template <>
+/// struct ilist_alloc_traits<MyType> : ilist_noalloc_traits<MyType> {};
+/// \endcode
+template <typename NodeTy> struct ilist_noalloc_traits {
+ static void deleteNode(NodeTy *V) {}
+};
+
+/// Callbacks do nothing by default in iplist and ilist.
+///
+/// Specialize this for to use callbacks for when nodes change their list
+/// membership.
+template <typename NodeTy> struct ilist_callback_traits {
+ void addNodeToList(NodeTy *) {}
+ void removeNodeFromList(NodeTy *) {}
+
+ /// Callback before transferring nodes to this list. The nodes may already be
+ /// in this same list.
+ template <class Iterator>
+ void transferNodesFromList(ilist_callback_traits &OldList, Iterator /*first*/,
+ Iterator /*last*/) {
+ (void)OldList;
+ }
+};
+
+/// A fragment for template traits for intrusive list that provides default
+/// node related operations.
+///
+/// TODO: Remove this layer of indirection. It's not necessary.
+template <typename NodeTy>
+struct ilist_node_traits : ilist_alloc_traits<NodeTy>,
+ ilist_callback_traits<NodeTy> {};
+
+/// Template traits for intrusive list.
+///
+/// Customize callbacks and allocation semantics.
+template <typename NodeTy>
+struct ilist_traits : public ilist_node_traits<NodeTy> {};
+
+/// Const traits should never be instantiated.
+template <typename Ty> struct ilist_traits<const Ty> {};
+
+namespace ilist_detail {
+
+template <class T> T &make();
+
+/// Type trait to check for a traits class that has a getNext member (as a
+/// canary for any of the ilist_nextprev_traits API).
+template <class TraitsT, class NodeT> struct HasGetNext {
+ typedef char Yes[1];
+ typedef char No[2];
+ template <size_t N> struct SFINAE {};
+
+ template <class U>
+ static Yes &test(U *I, decltype(I->getNext(&make<NodeT>())) * = nullptr);
+ template <class> static No &test(...);
+
+public:
+ static const bool value = sizeof(test<TraitsT>(nullptr)) == sizeof(Yes);
+};
+
+/// Type trait to check for a traits class that has a createSentinel member (as
+/// a canary for any of the ilist_sentinel_traits API).
+template <class TraitsT> struct HasCreateSentinel {
+ typedef char Yes[1];
+ typedef char No[2];
+
+ template <class U>
+ static Yes &test(U *I, decltype(I->createSentinel()) * = nullptr);
+ template <class> static No &test(...);
+
+public:
+ static const bool value = sizeof(test<TraitsT>(nullptr)) == sizeof(Yes);
+};
+
+/// Type trait to check for a traits class that has a createNode member.
+/// Allocation should be managed in a wrapper class, instead of in
+/// ilist_traits.
+template <class TraitsT, class NodeT> struct HasCreateNode {
+ typedef char Yes[1];
+ typedef char No[2];
+ template <size_t N> struct SFINAE {};
+
+ template <class U>
+ static Yes &test(U *I, decltype(I->createNode(make<NodeT>())) * = 0);
+ template <class> static No &test(...);
+
+public:
+ static const bool value = sizeof(test<TraitsT>(nullptr)) == sizeof(Yes);
+};
+
+template <class TraitsT, class NodeT> struct HasObsoleteCustomization {
+ static const bool value = HasGetNext<TraitsT, NodeT>::value ||
+ HasCreateSentinel<TraitsT>::value ||
+ HasCreateNode<TraitsT, NodeT>::value;
+};
+
+} // end namespace ilist_detail
+
+//===----------------------------------------------------------------------===//
+//
+/// A wrapper around an intrusive list with callbacks and non-intrusive
+/// ownership.
+///
+/// This wraps a purely intrusive list (like simple_ilist) with a configurable
+/// traits class. The traits can implement callbacks and customize the
+/// ownership semantics.
+///
+/// This is a subset of ilist functionality that can safely be used on nodes of
+/// polymorphic types, i.e. a heterogeneous list with a common base class that
+/// holds the next/prev pointers. The only state of the list itself is an
+/// ilist_sentinel, which holds pointers to the first and last nodes in the
+/// list.
+template <class IntrusiveListT, class TraitsT>
+class iplist_impl : public TraitsT, IntrusiveListT {
+ typedef IntrusiveListT base_list_type;
+
+public:
+ typedef typename base_list_type::pointer pointer;
+ typedef typename base_list_type::const_pointer const_pointer;
+ typedef typename base_list_type::reference reference;
+ typedef typename base_list_type::const_reference const_reference;
+ typedef typename base_list_type::value_type value_type;
+ typedef typename base_list_type::size_type size_type;
+ typedef typename base_list_type::difference_type difference_type;
+ typedef typename base_list_type::iterator iterator;
+ typedef typename base_list_type::const_iterator const_iterator;
+ typedef typename base_list_type::reverse_iterator reverse_iterator;
+ typedef
+ typename base_list_type::const_reverse_iterator const_reverse_iterator;
+
+private:
+ // TODO: Drop this assertion and the transitive type traits anytime after
+ // v4.0 is branched (i.e,. keep them for one release to help out-of-tree code
+ // update).
+ static_assert(
+ !ilist_detail::HasObsoleteCustomization<TraitsT, value_type>::value,
+ "ilist customization points have changed!");
+
+ static bool op_less(const_reference L, const_reference R) { return L < R; }
+ static bool op_equal(const_reference L, const_reference R) { return L == R; }
+
+public:
+ iplist_impl() = default;
+
+ iplist_impl(const iplist_impl &) = delete;
+ iplist_impl &operator=(const iplist_impl &) = delete;
+
+ iplist_impl(iplist_impl &&X)
+ : TraitsT(std::move(static_cast<TraitsT &>(X))),
+ IntrusiveListT(std::move(static_cast<IntrusiveListT &>(X))) {}
+ iplist_impl &operator=(iplist_impl &&X) {
+ *static_cast<TraitsT *>(this) = std::move(static_cast<TraitsT &>(X));
+ *static_cast<IntrusiveListT *>(this) =
+ std::move(static_cast<IntrusiveListT &>(X));
+ return *this;
+ }
+
+ ~iplist_impl() { clear(); }
+
+ // Miscellaneous inspection routines.
+ size_type max_size() const { return size_type(-1); }
+
+ using base_list_type::begin;
+ using base_list_type::end;
+ using base_list_type::rbegin;
+ using base_list_type::rend;
+ using base_list_type::empty;
+ using base_list_type::front;
+ using base_list_type::back;
+
+ void swap(iplist_impl &RHS) {
+ assert(0 && "Swap does not use list traits callback correctly yet!");
+ base_list_type::swap(RHS);
+ }
+
+ iterator insert(iterator where, pointer New) {
+ this->addNodeToList(New); // Notify traits that we added a node...
+ return base_list_type::insert(where, *New);
+ }
+
+ iterator insert(iterator where, const_reference New) {
+ return this->insert(where, new value_type(New));
+ }
+
+ iterator insertAfter(iterator where, pointer New) {
+ if (empty())
+ return insert(begin(), New);
+ else
+ return insert(++where, New);
+ }
+
+ /// Clone another list.
+ template <class Cloner> void cloneFrom(const iplist_impl &L2, Cloner clone) {
+ clear();
+ for (const_reference V : L2)
+ push_back(clone(V));
+ }
+
+ pointer remove(iterator &IT) {
+ pointer Node = &*IT++;
+ this->removeNodeFromList(Node); // Notify traits that we removed a node...
+ base_list_type::remove(*Node);
+ return Node;
+ }
+
+ pointer remove(const iterator &IT) {
+ iterator MutIt = IT;
+ return remove(MutIt);
+ }
+
+ pointer remove(pointer IT) { return remove(iterator(IT)); }
+ pointer remove(reference IT) { return remove(iterator(IT)); }
+
+ // erase - remove a node from the controlled sequence... and delete it.
+ iterator erase(iterator where) {
+ this->deleteNode(remove(where));
+ return where;
+ }
+
+ iterator erase(pointer IT) { return erase(iterator(IT)); }
+ iterator erase(reference IT) { return erase(iterator(IT)); }
+
+ /// Remove all nodes from the list like clear(), but do not call
+ /// removeNodeFromList() or deleteNode().
+ ///
+ /// This should only be used immediately before freeing nodes in bulk to
+ /// avoid traversing the list and bringing all the nodes into cache.
+ void clearAndLeakNodesUnsafely() { base_list_type::clear(); }
+
+private:
+ // transfer - The heart of the splice function. Move linked list nodes from
+ // [first, last) into position.
+ //
+ void transfer(iterator position, iplist_impl &L2, iterator first, iterator last) {
+ if (position == last)
+ return;
+
+ // Notify traits we moved the nodes...
+ this->transferNodesFromList(L2, first, last);
+
+ base_list_type::splice(position, L2, first, last);
+ }
+
+public:
+ //===----------------------------------------------------------------------===
+ // Functionality derived from other functions defined above...
+ //
+
+ using base_list_type::size;
+
+ iterator erase(iterator first, iterator last) {
+ while (first != last)
+ first = erase(first);
+ return last;
+ }
+
+ void clear() { erase(begin(), end()); }
+
+ // Front and back inserters...
+ void push_front(pointer val) { insert(begin(), val); }
+ void push_back(pointer val) { insert(end(), val); }
+ void pop_front() {
+ assert(!empty() && "pop_front() on empty list!");
+ erase(begin());
+ }
+ void pop_back() {
+ assert(!empty() && "pop_back() on empty list!");
+ iterator t = end(); erase(--t);
+ }
+
+ // Special forms of insert...
+ template<class InIt> void insert(iterator where, InIt first, InIt last) {
+ for (; first != last; ++first) insert(where, *first);
+ }
+
+ // Splice members - defined in terms of transfer...
+ void splice(iterator where, iplist_impl &L2) {
+ if (!L2.empty())
+ transfer(where, L2, L2.begin(), L2.end());
+ }
+ void splice(iterator where, iplist_impl &L2, iterator first) {
+ iterator last = first; ++last;
+ if (where == first || where == last) return; // No change
+ transfer(where, L2, first, last);
+ }
+ void splice(iterator where, iplist_impl &L2, iterator first, iterator last) {
+ if (first != last) transfer(where, L2, first, last);
+ }
+ void splice(iterator where, iplist_impl &L2, reference N) {
+ splice(where, L2, iterator(N));
+ }
+ void splice(iterator where, iplist_impl &L2, pointer N) {
+ splice(where, L2, iterator(N));
+ }
+
+ template <class Compare>
+ void merge(iplist_impl &Right, Compare comp) {
+ if (this == &Right)
+ return;
+ this->transferNodesFromList(Right, Right.begin(), Right.end());
+ base_list_type::merge(Right, comp);
+ }
+ void merge(iplist_impl &Right) { return merge(Right, op_less); }
+
+ using base_list_type::sort;
+
+ /// Get the previous node, or \c nullptr for the list head.
+ pointer getPrevNode(reference N) const {
+ auto I = N.getIterator();
+ if (I == begin())
+ return nullptr;
+ return &*std::prev(I);
+ }
+ /// Get the previous node, or \c nullptr for the list head.
+ const_pointer getPrevNode(const_reference N) const {
+ return getPrevNode(const_cast<reference >(N));
+ }
+
+ /// Get the next node, or \c nullptr for the list tail.
+ pointer getNextNode(reference N) const {
+ auto Next = std::next(N.getIterator());
+ if (Next == end())
+ return nullptr;
+ return &*Next;
+ }
+ /// Get the next node, or \c nullptr for the list tail.
+ const_pointer getNextNode(const_reference N) const {
+ return getNextNode(const_cast<reference >(N));
+ }
+};
+
+/// An intrusive list with ownership and callbacks specified/controlled by
+/// ilist_traits, only with API safe for polymorphic types.
+///
+/// The \p Options parameters are the same as those for \a simple_ilist. See
+/// there for a description of what's available.
+template <class T, class... Options>
+class iplist
+ : public iplist_impl<simple_ilist<T, Options...>, ilist_traits<T>> {
+ using iplist_impl_type = typename iplist::iplist_impl;
+
+public:
+ iplist() = default;
+
+ iplist(const iplist &X) = delete;
+ iplist &operator=(const iplist &X) = delete;
+
+ iplist(iplist &&X) : iplist_impl_type(std::move(X)) {}
+ iplist &operator=(iplist &&X) {
+ *static_cast<iplist_impl_type *>(this) = std::move(X);
+ return *this;
+ }
+};
+
+template <class T, class... Options> using ilist = iplist<T, Options...>;
+
+} // end namespace llvm
+
+namespace std {
+
+ // Ensure that swap uses the fast list swap...
+ template<class Ty>
+ void swap(llvm::iplist<Ty> &Left, llvm::iplist<Ty> &Right) {
+ Left.swap(Right);
+ }
+
+} // end namespace std
+
+#endif // LLVM_ADT_ILIST_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/ilist_base.h b/contrib/libs/llvm14/include/llvm/ADT/ilist_base.h
new file mode 100644
index 0000000000..4d8df7df09
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/ilist_base.h
@@ -0,0 +1,103 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/ilist_base.h - Intrusive List Base --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_BASE_H
+#define LLVM_ADT_ILIST_BASE_H
+
+#include "llvm/ADT/ilist_node_base.h"
+#include <cassert>
+
+namespace llvm {
+
+/// Implementations of list algorithms using ilist_node_base.
+template <bool EnableSentinelTracking> class ilist_base {
+public:
+ using node_base_type = ilist_node_base<EnableSentinelTracking>;
+
+ static void insertBeforeImpl(node_base_type &Next, node_base_type &N) {
+ node_base_type &Prev = *Next.getPrev();
+ N.setNext(&Next);
+ N.setPrev(&Prev);
+ Prev.setNext(&N);
+ Next.setPrev(&N);
+ }
+
+ static void removeImpl(node_base_type &N) {
+ node_base_type *Prev = N.getPrev();
+ node_base_type *Next = N.getNext();
+ Next->setPrev(Prev);
+ Prev->setNext(Next);
+
+ // Not strictly necessary, but helps catch a class of bugs.
+ N.setPrev(nullptr);
+ N.setNext(nullptr);
+ }
+
+ static void removeRangeImpl(node_base_type &First, node_base_type &Last) {
+ node_base_type *Prev = First.getPrev();
+ node_base_type *Final = Last.getPrev();
+ Last.setPrev(Prev);
+ Prev->setNext(&Last);
+
+ // Not strictly necessary, but helps catch a class of bugs.
+ First.setPrev(nullptr);
+ Final->setNext(nullptr);
+ }
+
+ static void transferBeforeImpl(node_base_type &Next, node_base_type &First,
+ node_base_type &Last) {
+ if (&Next == &Last || &First == &Last)
+ return;
+
+ // Position cannot be contained in the range to be transferred.
+ assert(&Next != &First &&
+ // Check for the most common mistake.
+ "Insertion point can't be one of the transferred nodes");
+
+ node_base_type &Final = *Last.getPrev();
+
+ // Detach from old list/position.
+ First.getPrev()->setNext(&Last);
+ Last.setPrev(First.getPrev());
+
+ // Splice [First, Final] into its new list/position.
+ node_base_type &Prev = *Next.getPrev();
+ Final.setNext(&Next);
+ First.setPrev(&Prev);
+ Prev.setNext(&First);
+ Next.setPrev(&Final);
+ }
+
+ template <class T> static void insertBefore(T &Next, T &N) {
+ insertBeforeImpl(Next, N);
+ }
+
+ template <class T> static void remove(T &N) { removeImpl(N); }
+ template <class T> static void removeRange(T &First, T &Last) {
+ removeRangeImpl(First, Last);
+ }
+
+ template <class T> static void transferBefore(T &Next, T &First, T &Last) {
+ transferBeforeImpl(Next, First, Last);
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ILIST_BASE_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/ilist_iterator.h b/contrib/libs/llvm14/include/llvm/ADT/ilist_iterator.h
new file mode 100644
index 0000000000..e8644f9521
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/ilist_iterator.h
@@ -0,0 +1,208 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/ilist_iterator.h - Intrusive List Iterator ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_ITERATOR_H
+#define LLVM_ADT_ILIST_ITERATOR_H
+
+#include "llvm/ADT/ilist_node.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+
+namespace llvm {
+
+namespace ilist_detail {
+
+/// Find const-correct node types.
+template <class OptionsT, bool IsConst> struct IteratorTraits;
+template <class OptionsT> struct IteratorTraits<OptionsT, false> {
+ using value_type = typename OptionsT::value_type;
+ using pointer = typename OptionsT::pointer;
+ using reference = typename OptionsT::reference;
+ using node_pointer = ilist_node_impl<OptionsT> *;
+ using node_reference = ilist_node_impl<OptionsT> &;
+};
+template <class OptionsT> struct IteratorTraits<OptionsT, true> {
+ using value_type = const typename OptionsT::value_type;
+ using pointer = typename OptionsT::const_pointer;
+ using reference = typename OptionsT::const_reference;
+ using node_pointer = const ilist_node_impl<OptionsT> *;
+ using node_reference = const ilist_node_impl<OptionsT> &;
+};
+
+template <bool IsReverse> struct IteratorHelper;
+template <> struct IteratorHelper<false> : ilist_detail::NodeAccess {
+ using Access = ilist_detail::NodeAccess;
+
+ template <class T> static void increment(T *&I) { I = Access::getNext(*I); }
+ template <class T> static void decrement(T *&I) { I = Access::getPrev(*I); }
+};
+template <> struct IteratorHelper<true> : ilist_detail::NodeAccess {
+ using Access = ilist_detail::NodeAccess;
+
+ template <class T> static void increment(T *&I) { I = Access::getPrev(*I); }
+ template <class T> static void decrement(T *&I) { I = Access::getNext(*I); }
+};
+
+} // end namespace ilist_detail
+
+/// Iterator for intrusive lists based on ilist_node.
+template <class OptionsT, bool IsReverse, bool IsConst>
+class ilist_iterator : ilist_detail::SpecificNodeAccess<OptionsT> {
+ friend ilist_iterator<OptionsT, IsReverse, !IsConst>;
+ friend ilist_iterator<OptionsT, !IsReverse, IsConst>;
+ friend ilist_iterator<OptionsT, !IsReverse, !IsConst>;
+
+ using Traits = ilist_detail::IteratorTraits<OptionsT, IsConst>;
+ using Access = ilist_detail::SpecificNodeAccess<OptionsT>;
+
+public:
+ using value_type = typename Traits::value_type;
+ using pointer = typename Traits::pointer;
+ using reference = typename Traits::reference;
+ using difference_type = ptrdiff_t;
+ using iterator_category = std::bidirectional_iterator_tag;
+ using const_pointer = typename OptionsT::const_pointer;
+ using const_reference = typename OptionsT::const_reference;
+
+private:
+ using node_pointer = typename Traits::node_pointer;
+ using node_reference = typename Traits::node_reference;
+
+ node_pointer NodePtr = nullptr;
+
+public:
+ /// Create from an ilist_node.
+ explicit ilist_iterator(node_reference N) : NodePtr(&N) {}
+
+ explicit ilist_iterator(pointer NP) : NodePtr(Access::getNodePtr(NP)) {}
+ explicit ilist_iterator(reference NR) : NodePtr(Access::getNodePtr(&NR)) {}
+ ilist_iterator() = default;
+
+ // This is templated so that we can allow constructing a const iterator from
+ // a nonconst iterator...
+ template <bool RHSIsConst>
+ ilist_iterator(const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS,
+ std::enable_if_t<IsConst || !RHSIsConst, void *> = nullptr)
+ : NodePtr(RHS.NodePtr) {}
+
+ // This is templated so that we can allow assigning to a const iterator from
+ // a nonconst iterator...
+ template <bool RHSIsConst>
+ std::enable_if_t<IsConst || !RHSIsConst, ilist_iterator &>
+ operator=(const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS) {
+ NodePtr = RHS.NodePtr;
+ return *this;
+ }
+
+ /// Explicit conversion between forward/reverse iterators.
+ ///
+ /// Translate between forward and reverse iterators without changing range
+ /// boundaries. The resulting iterator will dereference (and have a handle)
+ /// to the previous node, which is somewhat unexpected; but converting the
+ /// two endpoints in a range will give the same range in reverse.
+ ///
+ /// This matches std::reverse_iterator conversions.
+ explicit ilist_iterator(
+ const ilist_iterator<OptionsT, !IsReverse, IsConst> &RHS)
+ : ilist_iterator(++RHS.getReverse()) {}
+
+ /// Get a reverse iterator to the same node.
+ ///
+ /// Gives a reverse iterator that will dereference (and have a handle) to the
+ /// same node. Converting the endpoint iterators in a range will give a
+ /// different range; for range operations, use the explicit conversions.
+ ilist_iterator<OptionsT, !IsReverse, IsConst> getReverse() const {
+ if (NodePtr)
+ return ilist_iterator<OptionsT, !IsReverse, IsConst>(*NodePtr);
+ return ilist_iterator<OptionsT, !IsReverse, IsConst>();
+ }
+
+ /// Const-cast.
+ ilist_iterator<OptionsT, IsReverse, false> getNonConst() const {
+ if (NodePtr)
+ return ilist_iterator<OptionsT, IsReverse, false>(
+ const_cast<typename ilist_iterator<OptionsT, IsReverse,
+ false>::node_reference>(*NodePtr));
+ return ilist_iterator<OptionsT, IsReverse, false>();
+ }
+
+ // Accessors...
+ reference operator*() const {
+ assert(!NodePtr->isKnownSentinel());
+ return *Access::getValuePtr(NodePtr);
+ }
+ pointer operator->() const { return &operator*(); }
+
+ // Comparison operators
+ friend bool operator==(const ilist_iterator &LHS, const ilist_iterator &RHS) {
+ return LHS.NodePtr == RHS.NodePtr;
+ }
+ friend bool operator!=(const ilist_iterator &LHS, const ilist_iterator &RHS) {
+ return LHS.NodePtr != RHS.NodePtr;
+ }
+
+ // Increment and decrement operators...
+ ilist_iterator &operator--() {
+ NodePtr = IsReverse ? NodePtr->getNext() : NodePtr->getPrev();
+ return *this;
+ }
+ ilist_iterator &operator++() {
+ NodePtr = IsReverse ? NodePtr->getPrev() : NodePtr->getNext();
+ return *this;
+ }
+ ilist_iterator operator--(int) {
+ ilist_iterator tmp = *this;
+ --*this;
+ return tmp;
+ }
+ ilist_iterator operator++(int) {
+ ilist_iterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ /// Get the underlying ilist_node.
+ node_pointer getNodePtr() const { return static_cast<node_pointer>(NodePtr); }
+
+ /// Check for end. Only valid if ilist_sentinel_tracking<true>.
+ bool isEnd() const { return NodePtr ? NodePtr->isSentinel() : false; }
+};
+
+template <typename From> struct simplify_type;
+
+/// Allow ilist_iterators to convert into pointers to a node automatically when
+/// used by the dyn_cast, cast, isa mechanisms...
+///
+/// FIXME: remove this, since there is no implicit conversion to NodeTy.
+template <class OptionsT, bool IsConst>
+struct simplify_type<ilist_iterator<OptionsT, false, IsConst>> {
+ using iterator = ilist_iterator<OptionsT, false, IsConst>;
+ using SimpleType = typename iterator::pointer;
+
+ static SimpleType getSimplifiedValue(const iterator &Node) { return &*Node; }
+};
+template <class OptionsT, bool IsConst>
+struct simplify_type<const ilist_iterator<OptionsT, false, IsConst>>
+ : simplify_type<ilist_iterator<OptionsT, false, IsConst>> {};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ILIST_ITERATOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/ilist_node.h b/contrib/libs/llvm14/include/llvm/ADT/ilist_node.h
new file mode 100644
index 0000000000..174da6a4c1
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/ilist_node.h
@@ -0,0 +1,317 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/ilist_node.h - Intrusive Linked List Helper -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the ilist_node class template, which is a convenient
+/// base class for creating classes that can be used with ilists.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_NODE_H
+#define LLVM_ADT_ILIST_NODE_H
+
+#include "llvm/ADT/ilist_node_base.h"
+#include "llvm/ADT/ilist_node_options.h"
+
+namespace llvm {
+
+namespace ilist_detail {
+
+struct NodeAccess;
+
+} // end namespace ilist_detail
+
+template <class OptionsT, bool IsReverse, bool IsConst> class ilist_iterator;
+template <class OptionsT> class ilist_sentinel;
+
+/// Implementation for an ilist node.
+///
+/// Templated on an appropriate \a ilist_detail::node_options, usually computed
+/// by \a ilist_detail::compute_node_options.
+///
+/// This is a wrapper around \a ilist_node_base whose main purpose is to
+/// provide type safety: you can't insert nodes of \a ilist_node_impl into the
+/// wrong \a simple_ilist or \a iplist.
+template <class OptionsT> class ilist_node_impl : OptionsT::node_base_type {
+ using value_type = typename OptionsT::value_type;
+ using node_base_type = typename OptionsT::node_base_type;
+ using list_base_type = typename OptionsT::list_base_type;
+
+ friend typename OptionsT::list_base_type;
+ friend struct ilist_detail::NodeAccess;
+ friend class ilist_sentinel<OptionsT>;
+ friend class ilist_iterator<OptionsT, false, false>;
+ friend class ilist_iterator<OptionsT, false, true>;
+ friend class ilist_iterator<OptionsT, true, false>;
+ friend class ilist_iterator<OptionsT, true, true>;
+
+protected:
+ using self_iterator = ilist_iterator<OptionsT, false, false>;
+ using const_self_iterator = ilist_iterator<OptionsT, false, true>;
+ using reverse_self_iterator = ilist_iterator<OptionsT, true, false>;
+ using const_reverse_self_iterator = ilist_iterator<OptionsT, true, true>;
+
+ ilist_node_impl() = default;
+
+private:
+ ilist_node_impl *getPrev() {
+ return static_cast<ilist_node_impl *>(node_base_type::getPrev());
+ }
+
+ ilist_node_impl *getNext() {
+ return static_cast<ilist_node_impl *>(node_base_type::getNext());
+ }
+
+ const ilist_node_impl *getPrev() const {
+ return static_cast<ilist_node_impl *>(node_base_type::getPrev());
+ }
+
+ const ilist_node_impl *getNext() const {
+ return static_cast<ilist_node_impl *>(node_base_type::getNext());
+ }
+
+ void setPrev(ilist_node_impl *N) { node_base_type::setPrev(N); }
+ void setNext(ilist_node_impl *N) { node_base_type::setNext(N); }
+
+public:
+ self_iterator getIterator() { return self_iterator(*this); }
+ const_self_iterator getIterator() const { return const_self_iterator(*this); }
+
+ reverse_self_iterator getReverseIterator() {
+ return reverse_self_iterator(*this);
+ }
+
+ const_reverse_self_iterator getReverseIterator() const {
+ return const_reverse_self_iterator(*this);
+ }
+
+ // Under-approximation, but always available for assertions.
+ using node_base_type::isKnownSentinel;
+
+ /// Check whether this is the sentinel node.
+ ///
+ /// This requires sentinel tracking to be explicitly enabled. Use the
+ /// ilist_sentinel_tracking<true> option to get this API.
+ bool isSentinel() const {
+ static_assert(OptionsT::is_sentinel_tracking_explicit,
+ "Use ilist_sentinel_tracking<true> to enable isSentinel()");
+ return node_base_type::isSentinel();
+ }
+};
+
+/// An intrusive list node.
+///
+/// A base class to enable membership in intrusive lists, including \a
+/// simple_ilist, \a iplist, and \a ilist. The first template parameter is the
+/// \a value_type for the list.
+///
+/// An ilist node can be configured with compile-time options to change
+/// behaviour and/or add API.
+///
+/// By default, an \a ilist_node knows whether it is the list sentinel (an
+/// instance of \a ilist_sentinel) if and only if
+/// LLVM_ENABLE_ABI_BREAKING_CHECKS. The function \a isKnownSentinel() always
+/// returns \c false tracking is off. Sentinel tracking steals a bit from the
+/// "prev" link, which adds a mask operation when decrementing an iterator, but
+/// enables bug-finding assertions in \a ilist_iterator.
+///
+/// To turn sentinel tracking on all the time, pass in the
+/// ilist_sentinel_tracking<true> template parameter. This also enables the \a
+/// isSentinel() function. The same option must be passed to the intrusive
+/// list. (ilist_sentinel_tracking<false> turns sentinel tracking off all the
+/// time.)
+///
+/// A type can inherit from ilist_node multiple times by passing in different
+/// \a ilist_tag options. This allows a single instance to be inserted into
+/// multiple lists simultaneously, where each list is given the same tag.
+///
+/// \example
+/// struct A {};
+/// struct B {};
+/// struct N : ilist_node<N, ilist_tag<A>>, ilist_node<N, ilist_tag<B>> {};
+///
+/// void foo() {
+/// simple_ilist<N, ilist_tag<A>> ListA;
+/// simple_ilist<N, ilist_tag<B>> ListB;
+/// N N1;
+/// ListA.push_back(N1);
+/// ListB.push_back(N1);
+/// }
+/// \endexample
+///
+/// See \a is_valid_option for steps on adding a new option.
+template <class T, class... Options>
+class ilist_node
+ : public ilist_node_impl<
+ typename ilist_detail::compute_node_options<T, Options...>::type> {
+ static_assert(ilist_detail::check_options<Options...>::value,
+ "Unrecognized node option!");
+};
+
+namespace ilist_detail {
+
+/// An access class for ilist_node private API.
+///
+/// This gives access to the private parts of ilist nodes. Nodes for an ilist
+/// should friend this class if they inherit privately from ilist_node.
+///
+/// Using this class outside of the ilist implementation is unsupported.
+struct NodeAccess {
+protected:
+ template <class OptionsT>
+ static ilist_node_impl<OptionsT> *getNodePtr(typename OptionsT::pointer N) {
+ return N;
+ }
+
+ template <class OptionsT>
+ static const ilist_node_impl<OptionsT> *
+ getNodePtr(typename OptionsT::const_pointer N) {
+ return N;
+ }
+
+ template <class OptionsT>
+ static typename OptionsT::pointer getValuePtr(ilist_node_impl<OptionsT> *N) {
+ return static_cast<typename OptionsT::pointer>(N);
+ }
+
+ template <class OptionsT>
+ static typename OptionsT::const_pointer
+ getValuePtr(const ilist_node_impl<OptionsT> *N) {
+ return static_cast<typename OptionsT::const_pointer>(N);
+ }
+
+ template <class OptionsT>
+ static ilist_node_impl<OptionsT> *getPrev(ilist_node_impl<OptionsT> &N) {
+ return N.getPrev();
+ }
+
+ template <class OptionsT>
+ static ilist_node_impl<OptionsT> *getNext(ilist_node_impl<OptionsT> &N) {
+ return N.getNext();
+ }
+
+ template <class OptionsT>
+ static const ilist_node_impl<OptionsT> *
+ getPrev(const ilist_node_impl<OptionsT> &N) {
+ return N.getPrev();
+ }
+
+ template <class OptionsT>
+ static const ilist_node_impl<OptionsT> *
+ getNext(const ilist_node_impl<OptionsT> &N) {
+ return N.getNext();
+ }
+};
+
+template <class OptionsT> struct SpecificNodeAccess : NodeAccess {
+protected:
+ using pointer = typename OptionsT::pointer;
+ using const_pointer = typename OptionsT::const_pointer;
+ using node_type = ilist_node_impl<OptionsT>;
+
+ static node_type *getNodePtr(pointer N) {
+ return NodeAccess::getNodePtr<OptionsT>(N);
+ }
+
+ static const node_type *getNodePtr(const_pointer N) {
+ return NodeAccess::getNodePtr<OptionsT>(N);
+ }
+
+ static pointer getValuePtr(node_type *N) {
+ return NodeAccess::getValuePtr<OptionsT>(N);
+ }
+
+ static const_pointer getValuePtr(const node_type *N) {
+ return NodeAccess::getValuePtr<OptionsT>(N);
+ }
+};
+
+} // end namespace ilist_detail
+
+template <class OptionsT>
+class ilist_sentinel : public ilist_node_impl<OptionsT> {
+public:
+ ilist_sentinel() {
+ this->initializeSentinel();
+ reset();
+ }
+
+ void reset() {
+ this->setPrev(this);
+ this->setNext(this);
+ }
+
+ bool empty() const { return this == this->getPrev(); }
+};
+
+/// An ilist node that can access its parent list.
+///
+/// Requires \c NodeTy to have \a getParent() to find the parent node, and the
+/// \c ParentTy to have \a getSublistAccess() to get a reference to the list.
+template <typename NodeTy, typename ParentTy, class... Options>
+class ilist_node_with_parent : public ilist_node<NodeTy, Options...> {
+protected:
+ ilist_node_with_parent() = default;
+
+private:
+ /// Forward to NodeTy::getParent().
+ ///
+ /// Note: do not use the name "getParent()". We want a compile error
+ /// (instead of recursion) when the subclass fails to implement \a
+ /// getParent().
+ const ParentTy *getNodeParent() const {
+ return static_cast<const NodeTy *>(this)->getParent();
+ }
+
+public:
+ /// @name Adjacent Node Accessors
+ /// @{
+ /// Get the previous node, or \c nullptr for the list head.
+ NodeTy *getPrevNode() {
+ // Should be separated to a reused function, but then we couldn't use auto
+ // (and would need the type of the list).
+ const auto &List =
+ getNodeParent()->*(ParentTy::getSublistAccess((NodeTy *)nullptr));
+ return List.getPrevNode(*static_cast<NodeTy *>(this));
+ }
+
+ /// Get the previous node, or \c nullptr for the list head.
+ const NodeTy *getPrevNode() const {
+ return const_cast<ilist_node_with_parent *>(this)->getPrevNode();
+ }
+
+ /// Get the next node, or \c nullptr for the list tail.
+ NodeTy *getNextNode() {
+ // Should be separated to a reused function, but then we couldn't use auto
+ // (and would need the type of the list).
+ const auto &List =
+ getNodeParent()->*(ParentTy::getSublistAccess((NodeTy *)nullptr));
+ return List.getNextNode(*static_cast<NodeTy *>(this));
+ }
+
+ /// Get the next node, or \c nullptr for the list tail.
+ const NodeTy *getNextNode() const {
+ return const_cast<ilist_node_with_parent *>(this)->getNextNode();
+ }
+ /// @}
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ILIST_NODE_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/ilist_node_base.h b/contrib/libs/llvm14/include/llvm/ADT/ilist_node_base.h
new file mode 100644
index 0000000000..98e83aaf5b
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/ilist_node_base.h
@@ -0,0 +1,63 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/ilist_node_base.h - Intrusive List Node Base -----*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_NODE_BASE_H
+#define LLVM_ADT_ILIST_NODE_BASE_H
+
+#include "llvm/ADT/PointerIntPair.h"
+
+namespace llvm {
+
+/// Base class for ilist nodes.
+///
+/// Optionally tracks whether this node is the sentinel.
+template <bool EnableSentinelTracking> class ilist_node_base;
+
+template <> class ilist_node_base<false> {
+ ilist_node_base *Prev = nullptr;
+ ilist_node_base *Next = nullptr;
+
+public:
+ void setPrev(ilist_node_base *Prev) { this->Prev = Prev; }
+ void setNext(ilist_node_base *Next) { this->Next = Next; }
+ ilist_node_base *getPrev() const { return Prev; }
+ ilist_node_base *getNext() const { return Next; }
+
+ bool isKnownSentinel() const { return false; }
+ void initializeSentinel() {}
+};
+
+template <> class ilist_node_base<true> {
+ PointerIntPair<ilist_node_base *, 1> PrevAndSentinel;
+ ilist_node_base *Next = nullptr;
+
+public:
+ void setPrev(ilist_node_base *Prev) { PrevAndSentinel.setPointer(Prev); }
+ void setNext(ilist_node_base *Next) { this->Next = Next; }
+ ilist_node_base *getPrev() const { return PrevAndSentinel.getPointer(); }
+ ilist_node_base *getNext() const { return Next; }
+
+ bool isSentinel() const { return PrevAndSentinel.getInt(); }
+ bool isKnownSentinel() const { return isSentinel(); }
+ void initializeSentinel() { PrevAndSentinel.setInt(true); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ILIST_NODE_BASE_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/ilist_node_options.h b/contrib/libs/llvm14/include/llvm/ADT/ilist_node_options.h
new file mode 100644
index 0000000000..acd63b1d2f
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/ilist_node_options.h
@@ -0,0 +1,142 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/ilist_node_options.h - ilist_node Options -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_NODE_OPTIONS_H
+#define LLVM_ADT_ILIST_NODE_OPTIONS_H
+
+#include "llvm/Config/abi-breaking.h"
+
+#include <type_traits>
+
+namespace llvm {
+
+template <bool EnableSentinelTracking> class ilist_node_base;
+template <bool EnableSentinelTracking> class ilist_base;
+
+/// Option to choose whether to track sentinels.
+///
+/// This option affects the ABI for the nodes. When not specified explicitly,
+/// the ABI depends on LLVM_ENABLE_ABI_BREAKING_CHECKS. Specify explicitly to
+/// enable \a ilist_node::isSentinel().
+template <bool EnableSentinelTracking> struct ilist_sentinel_tracking {};
+
+/// Option to specify a tag for the node type.
+///
+/// This option allows a single value type to be inserted in multiple lists
+/// simultaneously. See \a ilist_node for usage examples.
+template <class Tag> struct ilist_tag {};
+
+namespace ilist_detail {
+
+/// Helper trait for recording whether an option is specified explicitly.
+template <bool IsExplicit> struct explicitness {
+ static const bool is_explicit = IsExplicit;
+};
+typedef explicitness<true> is_explicit;
+typedef explicitness<false> is_implicit;
+
+/// Check whether an option is valid.
+///
+/// The steps for adding and enabling a new ilist option include:
+/// \li define the option, ilist_foo<Bar>, above;
+/// \li add new parameters for Bar to \a ilist_detail::node_options;
+/// \li add an extraction meta-function, ilist_detail::extract_foo;
+/// \li call extract_foo from \a ilist_detail::compute_node_options and pass it
+/// into \a ilist_detail::node_options; and
+/// \li specialize \c is_valid_option<ilist_foo<Bar>> to inherit from \c
+/// std::true_type to get static assertions passing in \a simple_ilist and \a
+/// ilist_node.
+template <class Option> struct is_valid_option : std::false_type {};
+
+/// Extract sentinel tracking option.
+///
+/// Look through \p Options for the \a ilist_sentinel_tracking option, with the
+/// default depending on LLVM_ENABLE_ABI_BREAKING_CHECKS.
+template <class... Options> struct extract_sentinel_tracking;
+template <bool EnableSentinelTracking, class... Options>
+struct extract_sentinel_tracking<
+ ilist_sentinel_tracking<EnableSentinelTracking>, Options...>
+ : std::integral_constant<bool, EnableSentinelTracking>, is_explicit {};
+template <class Option1, class... Options>
+struct extract_sentinel_tracking<Option1, Options...>
+ : extract_sentinel_tracking<Options...> {};
+#if LLVM_ENABLE_ABI_BREAKING_CHECKS
+template <> struct extract_sentinel_tracking<> : std::true_type, is_implicit {};
+#else
+template <>
+struct extract_sentinel_tracking<> : std::false_type, is_implicit {};
+#endif
+template <bool EnableSentinelTracking>
+struct is_valid_option<ilist_sentinel_tracking<EnableSentinelTracking>>
+ : std::true_type {};
+
+/// Extract custom tag option.
+///
+/// Look through \p Options for the \a ilist_tag option, pulling out the
+/// custom tag type, using void as a default.
+template <class... Options> struct extract_tag;
+template <class Tag, class... Options>
+struct extract_tag<ilist_tag<Tag>, Options...> {
+ typedef Tag type;
+};
+template <class Option1, class... Options>
+struct extract_tag<Option1, Options...> : extract_tag<Options...> {};
+template <> struct extract_tag<> { typedef void type; };
+template <class Tag> struct is_valid_option<ilist_tag<Tag>> : std::true_type {};
+
+/// Check whether options are valid.
+///
+/// The conjunction of \a is_valid_option on each individual option.
+template <class... Options> struct check_options;
+template <> struct check_options<> : std::true_type {};
+template <class Option1, class... Options>
+struct check_options<Option1, Options...>
+ : std::integral_constant<bool, is_valid_option<Option1>::value &&
+ check_options<Options...>::value> {};
+
+/// Traits for options for \a ilist_node.
+///
+/// This is usually computed via \a compute_node_options.
+template <class T, bool EnableSentinelTracking, bool IsSentinelTrackingExplicit,
+ class TagT>
+struct node_options {
+ typedef T value_type;
+ typedef T *pointer;
+ typedef T &reference;
+ typedef const T *const_pointer;
+ typedef const T &const_reference;
+
+ static const bool enable_sentinel_tracking = EnableSentinelTracking;
+ static const bool is_sentinel_tracking_explicit = IsSentinelTrackingExplicit;
+ typedef TagT tag;
+ typedef ilist_node_base<enable_sentinel_tracking> node_base_type;
+ typedef ilist_base<enable_sentinel_tracking> list_base_type;
+};
+
+template <class T, class... Options> struct compute_node_options {
+ typedef node_options<T, extract_sentinel_tracking<Options...>::value,
+ extract_sentinel_tracking<Options...>::is_explicit,
+ typename extract_tag<Options...>::type>
+ type;
+};
+
+} // end namespace ilist_detail
+} // end namespace llvm
+
+#endif // LLVM_ADT_ILIST_NODE_OPTIONS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/iterator.h b/contrib/libs/llvm14/include/llvm/ADT/iterator.h
new file mode 100644
index 0000000000..34992b6020
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/iterator.h
@@ -0,0 +1,389 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- iterator.h - Utilities for using and defining iterators --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ITERATOR_H
+#define LLVM_ADT_ITERATOR_H
+
+#include "llvm/ADT/iterator_range.h"
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+#include <utility>
+
+namespace llvm {
+
+/// CRTP base class which implements the entire standard iterator facade
+/// in terms of a minimal subset of the interface.
+///
+/// Use this when it is reasonable to implement most of the iterator
+/// functionality in terms of a core subset. If you need special behavior or
+/// there are performance implications for this, you may want to override the
+/// relevant members instead.
+///
+/// Note, one abstraction that this does *not* provide is implementing
+/// subtraction in terms of addition by negating the difference. Negation isn't
+/// always information preserving, and I can see very reasonable iterator
+/// designs where this doesn't work well. It doesn't really force much added
+/// boilerplate anyways.
+///
+/// Another abstraction that this doesn't provide is implementing increment in
+/// terms of addition of one. These aren't equivalent for all iterator
+/// categories, and respecting that adds a lot of complexity for little gain.
+///
+/// Iterators are expected to have const rules analogous to pointers, with a
+/// single, const-qualified operator*() that returns ReferenceT. This matches
+/// the second and third pointers in the following example:
+/// \code
+/// int Value;
+/// { int *I = &Value; } // ReferenceT 'int&'
+/// { int *const I = &Value; } // ReferenceT 'int&'; const
+/// { const int *I = &Value; } // ReferenceT 'const int&'
+/// { const int *const I = &Value; } // ReferenceT 'const int&'; const
+/// \endcode
+/// If an iterator facade returns a handle to its own state, then T (and
+/// PointerT and ReferenceT) should usually be const-qualified. Otherwise, if
+/// clients are expected to modify the handle itself, the field can be declared
+/// mutable or use const_cast.
+///
+/// Classes wishing to use `iterator_facade_base` should implement the following
+/// methods:
+///
+/// Forward Iterators:
+/// (All of the following methods)
+/// - DerivedT &operator=(const DerivedT &R);
+/// - bool operator==(const DerivedT &R) const;
+/// - T &operator*() const;
+/// - DerivedT &operator++();
+///
+/// Bidirectional Iterators:
+/// (All methods of forward iterators, plus the following)
+/// - DerivedT &operator--();
+///
+/// Random-access Iterators:
+/// (All methods of bidirectional iterators excluding the following)
+/// - DerivedT &operator++();
+/// - DerivedT &operator--();
+/// (and plus the following)
+/// - bool operator<(const DerivedT &RHS) const;
+/// - DifferenceTypeT operator-(const DerivedT &R) const;
+/// - DerivedT &operator+=(DifferenceTypeT N);
+/// - DerivedT &operator-=(DifferenceTypeT N);
+///
+template <typename DerivedT, typename IteratorCategoryT, typename T,
+ typename DifferenceTypeT = std::ptrdiff_t, typename PointerT = T *,
+ typename ReferenceT = T &>
+class iterator_facade_base {
+public:
+ using iterator_category = IteratorCategoryT;
+ using value_type = T;
+ using difference_type = DifferenceTypeT;
+ using pointer = PointerT;
+ using reference = ReferenceT;
+
+protected:
+ enum {
+ IsRandomAccess = std::is_base_of<std::random_access_iterator_tag,
+ IteratorCategoryT>::value,
+ IsBidirectional = std::is_base_of<std::bidirectional_iterator_tag,
+ IteratorCategoryT>::value,
+ };
+
+ /// A proxy object for computing a reference via indirecting a copy of an
+ /// iterator. This is used in APIs which need to produce a reference via
+ /// indirection but for which the iterator object might be a temporary. The
+ /// proxy preserves the iterator internally and exposes the indirected
+ /// reference via a conversion operator.
+ class ReferenceProxy {
+ friend iterator_facade_base;
+
+ DerivedT I;
+
+ ReferenceProxy(DerivedT I) : I(std::move(I)) {}
+
+ public:
+ operator ReferenceT() const { return *I; }
+ };
+
+ /// A proxy object for computing a pointer via indirecting a copy of a
+ /// reference. This is used in APIs which need to produce a pointer but for
+ /// which the reference might be a temporary. The proxy preserves the
+ /// reference internally and exposes the pointer via a arrow operator.
+ class PointerProxy {
+ friend iterator_facade_base;
+
+ ReferenceT R;
+
+ template <typename RefT>
+ PointerProxy(RefT &&R) : R(std::forward<RefT>(R)) {}
+
+ public:
+ PointerT operator->() const { return &R; }
+ };
+
+public:
+ DerivedT operator+(DifferenceTypeT n) const {
+ static_assert(std::is_base_of<iterator_facade_base, DerivedT>::value,
+ "Must pass the derived type to this template!");
+ static_assert(
+ IsRandomAccess,
+ "The '+' operator is only defined for random access iterators.");
+ DerivedT tmp = *static_cast<const DerivedT *>(this);
+ tmp += n;
+ return tmp;
+ }
+ friend DerivedT operator+(DifferenceTypeT n, const DerivedT &i) {
+ static_assert(
+ IsRandomAccess,
+ "The '+' operator is only defined for random access iterators.");
+ return i + n;
+ }
+ DerivedT operator-(DifferenceTypeT n) const {
+ static_assert(
+ IsRandomAccess,
+ "The '-' operator is only defined for random access iterators.");
+ DerivedT tmp = *static_cast<const DerivedT *>(this);
+ tmp -= n;
+ return tmp;
+ }
+
+ DerivedT &operator++() {
+ static_assert(std::is_base_of<iterator_facade_base, DerivedT>::value,
+ "Must pass the derived type to this template!");
+ return static_cast<DerivedT *>(this)->operator+=(1);
+ }
+ DerivedT operator++(int) {
+ DerivedT tmp = *static_cast<DerivedT *>(this);
+ ++*static_cast<DerivedT *>(this);
+ return tmp;
+ }
+ DerivedT &operator--() {
+ static_assert(
+ IsBidirectional,
+ "The decrement operator is only defined for bidirectional iterators.");
+ return static_cast<DerivedT *>(this)->operator-=(1);
+ }
+ DerivedT operator--(int) {
+ static_assert(
+ IsBidirectional,
+ "The decrement operator is only defined for bidirectional iterators.");
+ DerivedT tmp = *static_cast<DerivedT *>(this);
+ --*static_cast<DerivedT *>(this);
+ return tmp;
+ }
+
+#ifndef __cpp_impl_three_way_comparison
+ bool operator!=(const DerivedT &RHS) const {
+ return !(static_cast<const DerivedT &>(*this) == RHS);
+ }
+#endif
+
+ bool operator>(const DerivedT &RHS) const {
+ static_assert(
+ IsRandomAccess,
+ "Relational operators are only defined for random access iterators.");
+ return !(static_cast<const DerivedT &>(*this) < RHS) &&
+ !(static_cast<const DerivedT &>(*this) == RHS);
+ }
+ bool operator<=(const DerivedT &RHS) const {
+ static_assert(
+ IsRandomAccess,
+ "Relational operators are only defined for random access iterators.");
+ return !(static_cast<const DerivedT &>(*this) > RHS);
+ }
+ bool operator>=(const DerivedT &RHS) const {
+ static_assert(
+ IsRandomAccess,
+ "Relational operators are only defined for random access iterators.");
+ return !(static_cast<const DerivedT &>(*this) < RHS);
+ }
+
+ PointerProxy operator->() const {
+ return static_cast<const DerivedT *>(this)->operator*();
+ }
+ ReferenceProxy operator[](DifferenceTypeT n) const {
+ static_assert(IsRandomAccess,
+ "Subscripting is only defined for random access iterators.");
+ return static_cast<const DerivedT *>(this)->operator+(n);
+ }
+};
+
+/// CRTP base class for adapting an iterator to a different type.
+///
+/// This class can be used through CRTP to adapt one iterator into another.
+/// Typically this is done through providing in the derived class a custom \c
+/// operator* implementation. Other methods can be overridden as well.
+template <
+ typename DerivedT, typename WrappedIteratorT,
+ typename IteratorCategoryT =
+ typename std::iterator_traits<WrappedIteratorT>::iterator_category,
+ typename T = typename std::iterator_traits<WrappedIteratorT>::value_type,
+ typename DifferenceTypeT =
+ typename std::iterator_traits<WrappedIteratorT>::difference_type,
+ typename PointerT = std::conditional_t<
+ std::is_same<T, typename std::iterator_traits<
+ WrappedIteratorT>::value_type>::value,
+ typename std::iterator_traits<WrappedIteratorT>::pointer, T *>,
+ typename ReferenceT = std::conditional_t<
+ std::is_same<T, typename std::iterator_traits<
+ WrappedIteratorT>::value_type>::value,
+ typename std::iterator_traits<WrappedIteratorT>::reference, T &>>
+class iterator_adaptor_base
+ : public iterator_facade_base<DerivedT, IteratorCategoryT, T,
+ DifferenceTypeT, PointerT, ReferenceT> {
+ using BaseT = typename iterator_adaptor_base::iterator_facade_base;
+
+protected:
+ WrappedIteratorT I;
+
+ iterator_adaptor_base() = default;
+
+ explicit iterator_adaptor_base(WrappedIteratorT u) : I(std::move(u)) {
+ static_assert(std::is_base_of<iterator_adaptor_base, DerivedT>::value,
+ "Must pass the derived type to this template!");
+ }
+
+ const WrappedIteratorT &wrapped() const { return I; }
+
+public:
+ using difference_type = DifferenceTypeT;
+
+ DerivedT &operator+=(difference_type n) {
+ static_assert(
+ BaseT::IsRandomAccess,
+ "The '+=' operator is only defined for random access iterators.");
+ I += n;
+ return *static_cast<DerivedT *>(this);
+ }
+ DerivedT &operator-=(difference_type n) {
+ static_assert(
+ BaseT::IsRandomAccess,
+ "The '-=' operator is only defined for random access iterators.");
+ I -= n;
+ return *static_cast<DerivedT *>(this);
+ }
+ using BaseT::operator-;
+ difference_type operator-(const DerivedT &RHS) const {
+ static_assert(
+ BaseT::IsRandomAccess,
+ "The '-' operator is only defined for random access iterators.");
+ return I - RHS.I;
+ }
+
+ // We have to explicitly provide ++ and -- rather than letting the facade
+ // forward to += because WrappedIteratorT might not support +=.
+ using BaseT::operator++;
+ DerivedT &operator++() {
+ ++I;
+ return *static_cast<DerivedT *>(this);
+ }
+ using BaseT::operator--;
+ DerivedT &operator--() {
+ static_assert(
+ BaseT::IsBidirectional,
+ "The decrement operator is only defined for bidirectional iterators.");
+ --I;
+ return *static_cast<DerivedT *>(this);
+ }
+
+ friend bool operator==(const iterator_adaptor_base &LHS,
+ const iterator_adaptor_base &RHS) {
+ return LHS.I == RHS.I;
+ }
+ friend bool operator<(const iterator_adaptor_base &LHS,
+ const iterator_adaptor_base &RHS) {
+ static_assert(
+ BaseT::IsRandomAccess,
+ "Relational operators are only defined for random access iterators.");
+ return LHS.I < RHS.I;
+ }
+
+ ReferenceT operator*() const { return *I; }
+};
+
+/// An iterator type that allows iterating over the pointees via some
+/// other iterator.
+///
+/// The typical usage of this is to expose a type that iterates over Ts, but
+/// which is implemented with some iterator over T*s:
+///
+/// \code
+/// using iterator = pointee_iterator<SmallVectorImpl<T *>::iterator>;
+/// \endcode
+template <typename WrappedIteratorT,
+ typename T = std::remove_reference_t<decltype(
+ **std::declval<WrappedIteratorT>())>>
+struct pointee_iterator
+ : iterator_adaptor_base<
+ pointee_iterator<WrappedIteratorT, T>, WrappedIteratorT,
+ typename std::iterator_traits<WrappedIteratorT>::iterator_category,
+ T> {
+ pointee_iterator() = default;
+ template <typename U>
+ pointee_iterator(U &&u)
+ : pointee_iterator::iterator_adaptor_base(std::forward<U &&>(u)) {}
+
+ T &operator*() const { return **this->I; }
+};
+
+template <typename RangeT, typename WrappedIteratorT =
+ decltype(std::begin(std::declval<RangeT>()))>
+iterator_range<pointee_iterator<WrappedIteratorT>>
+make_pointee_range(RangeT &&Range) {
+ using PointeeIteratorT = pointee_iterator<WrappedIteratorT>;
+ return make_range(PointeeIteratorT(std::begin(std::forward<RangeT>(Range))),
+ PointeeIteratorT(std::end(std::forward<RangeT>(Range))));
+}
+
+template <typename WrappedIteratorT,
+ typename T = decltype(&*std::declval<WrappedIteratorT>())>
+class pointer_iterator
+ : public iterator_adaptor_base<
+ pointer_iterator<WrappedIteratorT, T>, WrappedIteratorT,
+ typename std::iterator_traits<WrappedIteratorT>::iterator_category,
+ T> {
+ mutable T Ptr;
+
+public:
+ pointer_iterator() = default;
+
+ explicit pointer_iterator(WrappedIteratorT u)
+ : pointer_iterator::iterator_adaptor_base(std::move(u)) {}
+
+ T &operator*() const { return Ptr = &*this->I; }
+};
+
+template <typename RangeT, typename WrappedIteratorT =
+ decltype(std::begin(std::declval<RangeT>()))>
+iterator_range<pointer_iterator<WrappedIteratorT>>
+make_pointer_range(RangeT &&Range) {
+ using PointerIteratorT = pointer_iterator<WrappedIteratorT>;
+ return make_range(PointerIteratorT(std::begin(std::forward<RangeT>(Range))),
+ PointerIteratorT(std::end(std::forward<RangeT>(Range))));
+}
+
+template <typename WrappedIteratorT,
+ typename T1 = std::remove_reference_t<decltype(
+ **std::declval<WrappedIteratorT>())>,
+ typename T2 = std::add_pointer_t<T1>>
+using raw_pointer_iterator =
+ pointer_iterator<pointee_iterator<WrappedIteratorT, T1>, T2>;
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_ITERATOR_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/iterator_range.h b/contrib/libs/llvm14/include/llvm/ADT/iterator_range.h
new file mode 100644
index 0000000000..c5e2ef6366
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/iterator_range.h
@@ -0,0 +1,74 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- iterator_range.h - A range adaptor for iterators ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This provides a very simple, boring adaptor for a begin and end iterator
+/// into a range type. This should be used to build range views that work well
+/// with range based for loops and range based constructors.
+///
+/// Note that code here follows more standards-based coding conventions as it
+/// is mirroring proposed interfaces for standardization.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ITERATOR_RANGE_H
+#define LLVM_ADT_ITERATOR_RANGE_H
+
+#include <utility>
+
+namespace llvm {
+
+/// A range adaptor for a pair of iterators.
+///
+/// This just wraps two iterators into a range-compatible interface. Nothing
+/// fancy at all.
+template <typename IteratorT>
+class iterator_range {
+ IteratorT begin_iterator, end_iterator;
+
+public:
+ //TODO: Add SFINAE to test that the Container's iterators match the range's
+ // iterators.
+ template <typename Container>
+ iterator_range(Container &&c)
+ //TODO: Consider ADL/non-member begin/end calls.
+ : begin_iterator(c.begin()), end_iterator(c.end()) {}
+ iterator_range(IteratorT begin_iterator, IteratorT end_iterator)
+ : begin_iterator(std::move(begin_iterator)),
+ end_iterator(std::move(end_iterator)) {}
+
+ IteratorT begin() const { return begin_iterator; }
+ IteratorT end() const { return end_iterator; }
+ bool empty() const { return begin_iterator == end_iterator; }
+};
+
+/// Convenience function for iterating over sub-ranges.
+///
+/// This provides a bit of syntactic sugar to make using sub-ranges
+/// in for loops a bit easier. Analogous to std::make_pair().
+template <class T> iterator_range<T> make_range(T x, T y) {
+ return iterator_range<T>(std::move(x), std::move(y));
+}
+
+template <typename T> iterator_range<T> make_range(std::pair<T, T> p) {
+ return iterator_range<T>(std::move(p.first), std::move(p.second));
+}
+
+}
+
+#endif
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm14/include/llvm/ADT/simple_ilist.h b/contrib/libs/llvm14/include/llvm/ADT/simple_ilist.h
new file mode 100644
index 0000000000..619f3f466b
--- /dev/null
+++ b/contrib/libs/llvm14/include/llvm/ADT/simple_ilist.h
@@ -0,0 +1,325 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/ADT/simple_ilist.h - Simple Intrusive List ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SIMPLE_ILIST_H
+#define LLVM_ADT_SIMPLE_ILIST_H
+
+#include "llvm/ADT/ilist_base.h"
+#include "llvm/ADT/ilist_iterator.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/ilist_node_options.h"
+#include "llvm/Support/Compiler.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <functional>
+#include <iterator>
+#include <utility>
+
+namespace llvm {
+
+/// A simple intrusive list implementation.
+///
+/// This is a simple intrusive list for a \c T that inherits from \c
+/// ilist_node<T>. The list never takes ownership of anything inserted in it.
+///
+/// Unlike \a iplist<T> and \a ilist<T>, \a simple_ilist<T> never deletes
+/// values, and has no callback traits.
+///
+/// The API for adding nodes include \a push_front(), \a push_back(), and \a
+/// insert(). These all take values by reference (not by pointer), except for
+/// the range version of \a insert().
+///
+/// There are three sets of API for discarding nodes from the list: \a
+/// remove(), which takes a reference to the node to remove, \a erase(), which
+/// takes an iterator or iterator range and returns the next one, and \a
+/// clear(), which empties out the container. All three are constant time
+/// operations. None of these deletes any nodes; in particular, if there is a
+/// single node in the list, then these have identical semantics:
+/// \li \c L.remove(L.front());
+/// \li \c L.erase(L.begin());
+/// \li \c L.clear();
+///
+/// As a convenience for callers, there are parallel APIs that take a \c
+/// Disposer (such as \c std::default_delete<T>): \a removeAndDispose(), \a
+/// eraseAndDispose(), and \a clearAndDispose(). These have different names
+/// because the extra semantic is otherwise non-obvious. They are equivalent
+/// to calling \a std::for_each() on the range to be discarded.
+///
+/// The currently available \p Options customize the nodes in the list. The
+/// same options must be specified in the \a ilist_node instantiation for
+/// compatibility (although the order is irrelevant).
+/// \li Use \a ilist_tag to designate which ilist_node for a given \p T this
+/// list should use. This is useful if a type \p T is part of multiple,
+/// independent lists simultaneously.
+/// \li Use \a ilist_sentinel_tracking to always (or never) track whether a
+/// node is a sentinel. Specifying \c true enables the \a
+/// ilist_node::isSentinel() API. Unlike \a ilist_node::isKnownSentinel(),
+/// which is only appropriate for assertions, \a ilist_node::isSentinel() is
+/// appropriate for real logic.
+///
+/// Here are examples of \p Options usage:
+/// \li \c simple_ilist<T> gives the defaults. \li \c
+/// simple_ilist<T,ilist_sentinel_tracking<true>> enables the \a
+/// ilist_node::isSentinel() API.
+/// \li \c simple_ilist<T,ilist_tag<A>,ilist_sentinel_tracking<false>>
+/// specifies a tag of A and that tracking should be off (even when
+/// LLVM_ENABLE_ABI_BREAKING_CHECKS are enabled).
+/// \li \c simple_ilist<T,ilist_sentinel_tracking<false>,ilist_tag<A>> is
+/// equivalent to the last.
+///
+/// See \a is_valid_option for steps on adding a new option.
+template <typename T, class... Options>
+class simple_ilist
+ : ilist_detail::compute_node_options<T, Options...>::type::list_base_type,
+ ilist_detail::SpecificNodeAccess<
+ typename ilist_detail::compute_node_options<T, Options...>::type> {
+ static_assert(ilist_detail::check_options<Options...>::value,
+ "Unrecognized node option!");
+ using OptionsT =
+ typename ilist_detail::compute_node_options<T, Options...>::type;
+ using list_base_type = typename OptionsT::list_base_type;
+ ilist_sentinel<OptionsT> Sentinel;
+
+public:
+ using value_type = typename OptionsT::value_type;
+ using pointer = typename OptionsT::pointer;
+ using reference = typename OptionsT::reference;
+ using const_pointer = typename OptionsT::const_pointer;
+ using const_reference = typename OptionsT::const_reference;
+ using iterator = ilist_iterator<OptionsT, false, false>;
+ using const_iterator = ilist_iterator<OptionsT, false, true>;
+ using reverse_iterator = ilist_iterator<OptionsT, true, false>;
+ using const_reverse_iterator = ilist_iterator<OptionsT, true, true>;
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
+
+ simple_ilist() = default;
+ ~simple_ilist() = default;
+
+ // No copy constructors.
+ simple_ilist(const simple_ilist &) = delete;
+ simple_ilist &operator=(const simple_ilist &) = delete;
+
+ // Move constructors.
+ simple_ilist(simple_ilist &&X) { splice(end(), X); }
+ simple_ilist &operator=(simple_ilist &&X) {
+ clear();
+ splice(end(), X);
+ return *this;
+ }
+
+ iterator begin() { return ++iterator(Sentinel); }
+ const_iterator begin() const { return ++const_iterator(Sentinel); }
+ iterator end() { return iterator(Sentinel); }
+ const_iterator end() const { return const_iterator(Sentinel); }
+ reverse_iterator rbegin() { return ++reverse_iterator(Sentinel); }
+ const_reverse_iterator rbegin() const {
+ return ++const_reverse_iterator(Sentinel);
+ }
+ reverse_iterator rend() { return reverse_iterator(Sentinel); }
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(Sentinel);
+ }
+
+ /// Check if the list is empty in constant time.
+ LLVM_NODISCARD bool empty() const { return Sentinel.empty(); }
+
+ /// Calculate the size of the list in linear time.
+ LLVM_NODISCARD size_type size() const {
+ return std::distance(begin(), end());
+ }
+
+ reference front() { return *begin(); }
+ const_reference front() const { return *begin(); }
+ reference back() { return *rbegin(); }
+ const_reference back() const { return *rbegin(); }
+
+ /// Insert a node at the front; never copies.
+ void push_front(reference Node) { insert(begin(), Node); }
+
+ /// Insert a node at the back; never copies.
+ void push_back(reference Node) { insert(end(), Node); }
+
+ /// Remove the node at the front; never deletes.
+ void pop_front() { erase(begin()); }
+
+ /// Remove the node at the back; never deletes.
+ void pop_back() { erase(--end()); }
+
+ /// Swap with another list in place using std::swap.
+ void swap(simple_ilist &X) { std::swap(*this, X); }
+
+ /// Insert a node by reference; never copies.
+ iterator insert(iterator I, reference Node) {
+ list_base_type::insertBefore(*I.getNodePtr(), *this->getNodePtr(&Node));
+ return iterator(&Node);
+ }
+
+ /// Insert a range of nodes; never copies.
+ template <class Iterator>
+ void insert(iterator I, Iterator First, Iterator Last) {
+ for (; First != Last; ++First)
+ insert(I, *First);
+ }
+
+ /// Clone another list.
+ template <class Cloner, class Disposer>
+ void cloneFrom(const simple_ilist &L2, Cloner clone, Disposer dispose) {
+ clearAndDispose(dispose);
+ for (const_reference V : L2)
+ push_back(*clone(V));
+ }
+
+ /// Remove a node by reference; never deletes.
+ ///
+ /// \see \a erase() for removing by iterator.
+ /// \see \a removeAndDispose() if the node should be deleted.
+ void remove(reference N) { list_base_type::remove(*this->getNodePtr(&N)); }
+
+ /// Remove a node by reference and dispose of it.
+ template <class Disposer>
+ void removeAndDispose(reference N, Disposer dispose) {
+ remove(N);
+ dispose(&N);
+ }
+
+ /// Remove a node by iterator; never deletes.
+ ///
+ /// \see \a remove() for removing by reference.
+ /// \see \a eraseAndDispose() it the node should be deleted.
+ iterator erase(iterator I) {
+ assert(I != end() && "Cannot remove end of list!");
+ remove(*I++);
+ return I;
+ }
+
+ /// Remove a range of nodes; never deletes.
+ ///
+ /// \see \a eraseAndDispose() if the nodes should be deleted.
+ iterator erase(iterator First, iterator Last) {
+ list_base_type::removeRange(*First.getNodePtr(), *Last.getNodePtr());
+ return Last;
+ }
+
+ /// Remove a node by iterator and dispose of it.
+ template <class Disposer>
+ iterator eraseAndDispose(iterator I, Disposer dispose) {
+ auto Next = std::next(I);
+ erase(I);
+ dispose(&*I);
+ return Next;
+ }
+
+ /// Remove a range of nodes and dispose of them.
+ template <class Disposer>
+ iterator eraseAndDispose(iterator First, iterator Last, Disposer dispose) {
+ while (First != Last)
+ First = eraseAndDispose(First, dispose);
+ return Last;
+ }
+
+ /// Clear the list; never deletes.
+ ///
+ /// \see \a clearAndDispose() if the nodes should be deleted.
+ void clear() { Sentinel.reset(); }
+
+ /// Clear the list and dispose of the nodes.
+ template <class Disposer> void clearAndDispose(Disposer dispose) {
+ eraseAndDispose(begin(), end(), dispose);
+ }
+
+ /// Splice in another list.
+ void splice(iterator I, simple_ilist &L2) {
+ splice(I, L2, L2.begin(), L2.end());
+ }
+
+ /// Splice in a node from another list.
+ void splice(iterator I, simple_ilist &L2, iterator Node) {
+ splice(I, L2, Node, std::next(Node));
+ }
+
+ /// Splice in a range of nodes from another list.
+ void splice(iterator I, simple_ilist &, iterator First, iterator Last) {
+ list_base_type::transferBefore(*I.getNodePtr(), *First.getNodePtr(),
+ *Last.getNodePtr());
+ }
+
+ /// Merge in another list.
+ ///
+ /// \pre \c this and \p RHS are sorted.
+ ///@{
+ void merge(simple_ilist &RHS) { merge(RHS, std::less<T>()); }
+ template <class Compare> void merge(simple_ilist &RHS, Compare comp);
+ ///@}
+
+ /// Sort the list.
+ ///@{
+ void sort() { sort(std::less<T>()); }
+ template <class Compare> void sort(Compare comp);
+ ///@}
+};
+
+template <class T, class... Options>
+template <class Compare>
+void simple_ilist<T, Options...>::merge(simple_ilist &RHS, Compare comp) {
+ if (this == &RHS || RHS.empty())
+ return;
+ iterator LI = begin(), LE = end();
+ iterator RI = RHS.begin(), RE = RHS.end();
+ while (LI != LE) {
+ if (comp(*RI, *LI)) {
+ // Transfer a run of at least size 1 from RHS to LHS.
+ iterator RunStart = RI++;
+ RI = std::find_if(RI, RE, [&](reference RV) { return !comp(RV, *LI); });
+ splice(LI, RHS, RunStart, RI);
+ if (RI == RE)
+ return;
+ }
+ ++LI;
+ }
+ // Transfer the remaining RHS nodes once LHS is finished.
+ splice(LE, RHS, RI, RE);
+}
+
+template <class T, class... Options>
+template <class Compare>
+void simple_ilist<T, Options...>::sort(Compare comp) {
+ // Vacuously sorted.
+ if (empty() || std::next(begin()) == end())
+ return;
+
+ // Split the list in the middle.
+ iterator Center = begin(), End = begin();
+ while (End != end() && ++End != end()) {
+ ++Center;
+ ++End;
+ }
+ simple_ilist RHS;
+ RHS.splice(RHS.end(), *this, Center, end());
+
+ // Sort the sublists and merge back together.
+ sort(comp);
+ RHS.sort(comp);
+ merge(RHS, comp);
+}
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_SIMPLE_ILIST_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif