aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm16
diff options
context:
space:
mode:
authorthegeorg <thegeorg@yandex-team.com>2024-03-13 13:58:24 +0300
committerthegeorg <thegeorg@yandex-team.com>2024-03-13 14:11:53 +0300
commit11a895b7e15d1c5a1f52706396b82e3f9db953cb (patch)
treefabc6d883b0f946151f61ae7865cee9f529a1fdd /contrib/libs/llvm16
parent9685917341315774aad5733b1793b1e533a88bbb (diff)
downloadydb-11a895b7e15d1c5a1f52706396b82e3f9db953cb.tar.gz
Export clang-format16 via ydblib project
6e6be3a95868fde888d801b7590af4044049563f
Diffstat (limited to 'contrib/libs/llvm16')
-rw-r--r--contrib/libs/llvm16/include/llvm/ADT/ImmutableList.h257
-rw-r--r--contrib/libs/llvm16/include/llvm/ADT/ImmutableMap.h341
-rw-r--r--contrib/libs/llvm16/include/llvm/ADT/ImmutableSet.h1182
-rw-r--r--contrib/libs/llvm16/include/llvm/IR/FixedPointBuilder.h478
-rw-r--r--contrib/libs/llvm16/include/llvm/Support/LoongArchTargetParser.h26
5 files changed, 2284 insertions, 0 deletions
diff --git a/contrib/libs/llvm16/include/llvm/ADT/ImmutableList.h b/contrib/libs/llvm16/include/llvm/ADT/ImmutableList.h
new file mode 100644
index 0000000000..182670e455
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/ADT/ImmutableList.h
@@ -0,0 +1,257 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//==--- ImmutableList.h - Immutable (functional) list interface --*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the ImmutableList class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_IMMUTABLELIST_H
+#define LLVM_ADT_IMMUTABLELIST_H
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Support/Allocator.h"
+#include <cassert>
+#include <cstdint>
+#include <new>
+
+namespace llvm {
+
+template <typename T> class ImmutableListFactory;
+
+template <typename T>
+class ImmutableListImpl : public FoldingSetNode {
+ friend class ImmutableListFactory<T>;
+
+ T Head;
+ const ImmutableListImpl* Tail;
+
+ template <typename ElemT>
+ ImmutableListImpl(ElemT &&head, const ImmutableListImpl *tail = nullptr)
+ : Head(std::forward<ElemT>(head)), Tail(tail) {}
+
+public:
+ ImmutableListImpl(const ImmutableListImpl &) = delete;
+ ImmutableListImpl &operator=(const ImmutableListImpl &) = delete;
+
+ const T& getHead() const { return Head; }
+ const ImmutableListImpl* getTail() const { return Tail; }
+
+ static inline void Profile(FoldingSetNodeID& ID, const T& H,
+ const ImmutableListImpl* L){
+ ID.AddPointer(L);
+ ID.Add(H);
+ }
+
+ void Profile(FoldingSetNodeID& ID) {
+ Profile(ID, Head, Tail);
+ }
+};
+
+/// ImmutableList - This class represents an immutable (functional) list.
+/// It is implemented as a smart pointer (wraps ImmutableListImpl), so it
+/// it is intended to always be copied by value as if it were a pointer.
+/// This interface matches ImmutableSet and ImmutableMap. ImmutableList
+/// objects should almost never be created directly, and instead should
+/// be created by ImmutableListFactory objects that manage the lifetime
+/// of a group of lists. When the factory object is reclaimed, all lists
+/// created by that factory are released as well.
+template <typename T>
+class ImmutableList {
+public:
+ using value_type = T;
+ using Factory = ImmutableListFactory<T>;
+
+ static_assert(std::is_trivially_destructible<T>::value,
+ "T must be trivially destructible!");
+
+private:
+ const ImmutableListImpl<T>* X;
+
+public:
+ // This constructor should normally only be called by ImmutableListFactory<T>.
+ // There may be cases, however, when one needs to extract the internal pointer
+ // and reconstruct a list object from that pointer.
+ ImmutableList(const ImmutableListImpl<T>* x = nullptr) : X(x) {}
+
+ const ImmutableListImpl<T>* getInternalPointer() const {
+ return X;
+ }
+
+ class iterator {
+ const ImmutableListImpl<T>* L = nullptr;
+
+ public:
+ iterator() = default;
+ iterator(ImmutableList l) : L(l.getInternalPointer()) {}
+
+ iterator& operator++() { L = L->getTail(); return *this; }
+ bool operator==(const iterator& I) const { return L == I.L; }
+ bool operator!=(const iterator& I) const { return L != I.L; }
+ const value_type& operator*() const { return L->getHead(); }
+ const std::remove_reference_t<value_type> *operator->() const {
+ return &L->getHead();
+ }
+
+ ImmutableList getList() const { return L; }
+ };
+
+ /// begin - Returns an iterator referring to the head of the list, or
+ /// an iterator denoting the end of the list if the list is empty.
+ iterator begin() const { return iterator(X); }
+
+ /// end - Returns an iterator denoting the end of the list. This iterator
+ /// does not refer to a valid list element.
+ iterator end() const { return iterator(); }
+
+ /// isEmpty - Returns true if the list is empty.
+ bool isEmpty() const { return !X; }
+
+ bool contains(const T& V) const {
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ if (*I == V)
+ return true;
+ }
+ return false;
+ }
+
+ /// isEqual - Returns true if two lists are equal. Because all lists created
+ /// from the same ImmutableListFactory are uniqued, this has O(1) complexity
+ /// because it the contents of the list do not need to be compared. Note
+ /// that you should only compare two lists created from the same
+ /// ImmutableListFactory.
+ bool isEqual(const ImmutableList& L) const { return X == L.X; }
+
+ bool operator==(const ImmutableList& L) const { return isEqual(L); }
+
+ /// getHead - Returns the head of the list.
+ const T& getHead() const {
+ assert(!isEmpty() && "Cannot get the head of an empty list.");
+ return X->getHead();
+ }
+
+ /// getTail - Returns the tail of the list, which is another (possibly empty)
+ /// ImmutableList.
+ ImmutableList getTail() const {
+ return X ? X->getTail() : nullptr;
+ }
+
+ void Profile(FoldingSetNodeID& ID) const {
+ ID.AddPointer(X);
+ }
+};
+
+template <typename T>
+class ImmutableListFactory {
+ using ListTy = ImmutableListImpl<T>;
+ using CacheTy = FoldingSet<ListTy>;
+
+ CacheTy Cache;
+ uintptr_t Allocator;
+
+ bool ownsAllocator() const {
+ return (Allocator & 0x1) == 0;
+ }
+
+ BumpPtrAllocator& getAllocator() const {
+ return *reinterpret_cast<BumpPtrAllocator*>(Allocator & ~0x1);
+ }
+
+public:
+ ImmutableListFactory()
+ : Allocator(reinterpret_cast<uintptr_t>(new BumpPtrAllocator())) {}
+
+ ImmutableListFactory(BumpPtrAllocator& Alloc)
+ : Allocator(reinterpret_cast<uintptr_t>(&Alloc) | 0x1) {}
+
+ ~ImmutableListFactory() {
+ if (ownsAllocator()) delete &getAllocator();
+ }
+
+ template <typename ElemT>
+ [[nodiscard]] ImmutableList<T> concat(ElemT &&Head, ImmutableList<T> Tail) {
+ // Profile the new list to see if it already exists in our cache.
+ FoldingSetNodeID ID;
+ void* InsertPos;
+
+ const ListTy* TailImpl = Tail.getInternalPointer();
+ ListTy::Profile(ID, Head, TailImpl);
+ ListTy* L = Cache.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!L) {
+ // The list does not exist in our cache. Create it.
+ BumpPtrAllocator& A = getAllocator();
+ L = (ListTy*) A.Allocate<ListTy>();
+ new (L) ListTy(std::forward<ElemT>(Head), TailImpl);
+
+ // Insert the new list into the cache.
+ Cache.InsertNode(L, InsertPos);
+ }
+
+ return L;
+ }
+
+ template <typename ElemT>
+ [[nodiscard]] ImmutableList<T> add(ElemT &&Data, ImmutableList<T> L) {
+ return concat(std::forward<ElemT>(Data), L);
+ }
+
+ template <typename... CtorArgs>
+ [[nodiscard]] ImmutableList<T> emplace(ImmutableList<T> Tail,
+ CtorArgs &&...Args) {
+ return concat(T(std::forward<CtorArgs>(Args)...), Tail);
+ }
+
+ ImmutableList<T> getEmptyList() const {
+ return ImmutableList<T>(nullptr);
+ }
+
+ template <typename ElemT>
+ ImmutableList<T> create(ElemT &&Data) {
+ return concat(std::forward<ElemT>(Data), getEmptyList());
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Partially-specialized Traits.
+//===----------------------------------------------------------------------===//
+
+template <typename T> struct DenseMapInfo<ImmutableList<T>, void> {
+ static inline ImmutableList<T> getEmptyKey() {
+ return reinterpret_cast<ImmutableListImpl<T>*>(-1);
+ }
+
+ static inline ImmutableList<T> getTombstoneKey() {
+ return reinterpret_cast<ImmutableListImpl<T>*>(-2);
+ }
+
+ static unsigned getHashValue(ImmutableList<T> X) {
+ uintptr_t PtrVal = reinterpret_cast<uintptr_t>(X.getInternalPointer());
+ return (unsigned((uintptr_t)PtrVal) >> 4) ^
+ (unsigned((uintptr_t)PtrVal) >> 9);
+ }
+
+ static bool isEqual(ImmutableList<T> X1, ImmutableList<T> X2) {
+ return X1 == X2;
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_IMMUTABLELIST_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/ADT/ImmutableMap.h b/contrib/libs/llvm16/include/llvm/ADT/ImmutableMap.h
new file mode 100644
index 0000000000..50396cdeb0
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/ADT/ImmutableMap.h
@@ -0,0 +1,341 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===--- ImmutableMap.h - Immutable (functional) map interface --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the ImmutableMap class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_IMMUTABLEMAP_H
+#define LLVM_ADT_IMMUTABLEMAP_H
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/ImmutableSet.h"
+#include "llvm/Support/Allocator.h"
+#include <utility>
+
+namespace llvm {
+
+/// ImutKeyValueInfo -Traits class used by ImmutableMap. While both the first
+/// and second elements in a pair are used to generate profile information,
+/// only the first element (the key) is used by isEqual and isLess.
+template <typename T, typename S>
+struct ImutKeyValueInfo {
+ using value_type = const std::pair<T,S>;
+ using value_type_ref = const value_type&;
+ using key_type = const T;
+ using key_type_ref = const T&;
+ using data_type = const S;
+ using data_type_ref = const S&;
+
+ static inline key_type_ref KeyOfValue(value_type_ref V) {
+ return V.first;
+ }
+
+ static inline data_type_ref DataOfValue(value_type_ref V) {
+ return V.second;
+ }
+
+ static inline bool isEqual(key_type_ref L, key_type_ref R) {
+ return ImutContainerInfo<T>::isEqual(L,R);
+ }
+ static inline bool isLess(key_type_ref L, key_type_ref R) {
+ return ImutContainerInfo<T>::isLess(L,R);
+ }
+
+ static inline bool isDataEqual(data_type_ref L, data_type_ref R) {
+ return ImutContainerInfo<S>::isEqual(L,R);
+ }
+
+ static inline void Profile(FoldingSetNodeID& ID, value_type_ref V) {
+ ImutContainerInfo<T>::Profile(ID, V.first);
+ ImutContainerInfo<S>::Profile(ID, V.second);
+ }
+};
+
+template <typename KeyT, typename ValT,
+ typename ValInfo = ImutKeyValueInfo<KeyT,ValT>>
+class ImmutableMap {
+public:
+ using value_type = typename ValInfo::value_type;
+ using value_type_ref = typename ValInfo::value_type_ref;
+ using key_type = typename ValInfo::key_type;
+ using key_type_ref = typename ValInfo::key_type_ref;
+ using data_type = typename ValInfo::data_type;
+ using data_type_ref = typename ValInfo::data_type_ref;
+ using TreeTy = ImutAVLTree<ValInfo>;
+
+protected:
+ IntrusiveRefCntPtr<TreeTy> Root;
+
+public:
+ /// Constructs a map from a pointer to a tree root. In general one
+ /// should use a Factory object to create maps instead of directly
+ /// invoking the constructor, but there are cases where make this
+ /// constructor public is useful.
+ explicit ImmutableMap(const TreeTy *R) : Root(const_cast<TreeTy *>(R)) {}
+
+ class Factory {
+ typename TreeTy::Factory F;
+ const bool Canonicalize;
+
+ public:
+ Factory(bool canonicalize = true) : Canonicalize(canonicalize) {}
+
+ Factory(BumpPtrAllocator &Alloc, bool canonicalize = true)
+ : F(Alloc), Canonicalize(canonicalize) {}
+
+ Factory(const Factory &) = delete;
+ Factory &operator=(const Factory &) = delete;
+
+ ImmutableMap getEmptyMap() { return ImmutableMap(F.getEmptyTree()); }
+
+ [[nodiscard]] ImmutableMap add(ImmutableMap Old, key_type_ref K,
+ data_type_ref D) {
+ TreeTy *T = F.add(Old.Root.get(), std::pair<key_type, data_type>(K, D));
+ return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T);
+ }
+
+ [[nodiscard]] ImmutableMap remove(ImmutableMap Old, key_type_ref K) {
+ TreeTy *T = F.remove(Old.Root.get(), K);
+ return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T);
+ }
+
+ typename TreeTy::Factory *getTreeFactory() const {
+ return const_cast<typename TreeTy::Factory *>(&F);
+ }
+ };
+
+ bool contains(key_type_ref K) const {
+ return Root ? Root->contains(K) : false;
+ }
+
+ bool operator==(const ImmutableMap &RHS) const {
+ return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root;
+ }
+
+ bool operator!=(const ImmutableMap &RHS) const {
+ return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get())
+ : Root != RHS.Root;
+ }
+
+ TreeTy *getRoot() const {
+ if (Root) { Root->retain(); }
+ return Root.get();
+ }
+
+ TreeTy *getRootWithoutRetain() const { return Root.get(); }
+
+ void manualRetain() {
+ if (Root) Root->retain();
+ }
+
+ void manualRelease() {
+ if (Root) Root->release();
+ }
+
+ bool isEmpty() const { return !Root; }
+
+public:
+ //===--------------------------------------------------===//
+ // For testing.
+ //===--------------------------------------------------===//
+
+ void verify() const { if (Root) Root->verify(); }
+
+ //===--------------------------------------------------===//
+ // Iterators.
+ //===--------------------------------------------------===//
+
+ class iterator : public ImutAVLValueIterator<ImmutableMap> {
+ friend class ImmutableMap;
+
+ iterator() = default;
+ explicit iterator(TreeTy *Tree) : iterator::ImutAVLValueIterator(Tree) {}
+
+ public:
+ key_type_ref getKey() const { return (*this)->first; }
+ data_type_ref getData() const { return (*this)->second; }
+ };
+
+ iterator begin() const { return iterator(Root.get()); }
+ iterator end() const { return iterator(); }
+
+ data_type* lookup(key_type_ref K) const {
+ if (Root) {
+ TreeTy* T = Root->find(K);
+ if (T) return &T->getValue().second;
+ }
+
+ return nullptr;
+ }
+
+ /// getMaxElement - Returns the <key,value> pair in the ImmutableMap for
+ /// which key is the highest in the ordering of keys in the map. This
+ /// method returns NULL if the map is empty.
+ value_type* getMaxElement() const {
+ return Root ? &(Root->getMaxElement()->getValue()) : nullptr;
+ }
+
+ //===--------------------------------------------------===//
+ // Utility methods.
+ //===--------------------------------------------------===//
+
+ unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
+
+ static inline void Profile(FoldingSetNodeID& ID, const ImmutableMap& M) {
+ ID.AddPointer(M.Root.get());
+ }
+
+ inline void Profile(FoldingSetNodeID& ID) const {
+ return Profile(ID,*this);
+ }
+};
+
+// NOTE: This will possibly become the new implementation of ImmutableMap some day.
+template <typename KeyT, typename ValT,
+typename ValInfo = ImutKeyValueInfo<KeyT,ValT>>
+class ImmutableMapRef {
+public:
+ using value_type = typename ValInfo::value_type;
+ using value_type_ref = typename ValInfo::value_type_ref;
+ using key_type = typename ValInfo::key_type;
+ using key_type_ref = typename ValInfo::key_type_ref;
+ using data_type = typename ValInfo::data_type;
+ using data_type_ref = typename ValInfo::data_type_ref;
+ using TreeTy = ImutAVLTree<ValInfo>;
+ using FactoryTy = typename TreeTy::Factory;
+
+protected:
+ IntrusiveRefCntPtr<TreeTy> Root;
+ FactoryTy *Factory;
+
+public:
+ /// Constructs a map from a pointer to a tree root. In general one
+ /// should use a Factory object to create maps instead of directly
+ /// invoking the constructor, but there are cases where make this
+ /// constructor public is useful.
+ ImmutableMapRef(const TreeTy *R, FactoryTy *F)
+ : Root(const_cast<TreeTy *>(R)), Factory(F) {}
+
+ ImmutableMapRef(const ImmutableMap<KeyT, ValT> &X,
+ typename ImmutableMap<KeyT, ValT>::Factory &F)
+ : Root(X.getRootWithoutRetain()), Factory(F.getTreeFactory()) {}
+
+ static inline ImmutableMapRef getEmptyMap(FactoryTy *F) {
+ return ImmutableMapRef(nullptr, F);
+ }
+
+ void manualRetain() {
+ if (Root) Root->retain();
+ }
+
+ void manualRelease() {
+ if (Root) Root->release();
+ }
+
+ ImmutableMapRef add(key_type_ref K, data_type_ref D) const {
+ TreeTy *NewT =
+ Factory->add(Root.get(), std::pair<key_type, data_type>(K, D));
+ return ImmutableMapRef(NewT, Factory);
+ }
+
+ ImmutableMapRef remove(key_type_ref K) const {
+ TreeTy *NewT = Factory->remove(Root.get(), K);
+ return ImmutableMapRef(NewT, Factory);
+ }
+
+ bool contains(key_type_ref K) const {
+ return Root ? Root->contains(K) : false;
+ }
+
+ ImmutableMap<KeyT, ValT> asImmutableMap() const {
+ return ImmutableMap<KeyT, ValT>(Factory->getCanonicalTree(Root.get()));
+ }
+
+ bool operator==(const ImmutableMapRef &RHS) const {
+ return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root;
+ }
+
+ bool operator!=(const ImmutableMapRef &RHS) const {
+ return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get())
+ : Root != RHS.Root;
+ }
+
+ bool isEmpty() const { return !Root; }
+
+ //===--------------------------------------------------===//
+ // For testing.
+ //===--------------------------------------------------===//
+
+ void verify() const {
+ if (Root)
+ Root->verify();
+ }
+
+ //===--------------------------------------------------===//
+ // Iterators.
+ //===--------------------------------------------------===//
+
+ class iterator : public ImutAVLValueIterator<ImmutableMapRef> {
+ friend class ImmutableMapRef;
+
+ iterator() = default;
+ explicit iterator(TreeTy *Tree) : iterator::ImutAVLValueIterator(Tree) {}
+
+ public:
+ key_type_ref getKey() const { return (*this)->first; }
+ data_type_ref getData() const { return (*this)->second; }
+ };
+
+ iterator begin() const { return iterator(Root.get()); }
+ iterator end() const { return iterator(); }
+
+ data_type *lookup(key_type_ref K) const {
+ if (Root) {
+ TreeTy* T = Root->find(K);
+ if (T) return &T->getValue().second;
+ }
+
+ return nullptr;
+ }
+
+ /// getMaxElement - Returns the <key,value> pair in the ImmutableMap for
+ /// which key is the highest in the ordering of keys in the map. This
+ /// method returns NULL if the map is empty.
+ value_type* getMaxElement() const {
+ return Root ? &(Root->getMaxElement()->getValue()) : nullptr;
+ }
+
+ //===--------------------------------------------------===//
+ // Utility methods.
+ //===--------------------------------------------------===//
+
+ unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
+
+ static inline void Profile(FoldingSetNodeID &ID, const ImmutableMapRef &M) {
+ ID.AddPointer(M.Root.get());
+ }
+
+ inline void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_IMMUTABLEMAP_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/ADT/ImmutableSet.h b/contrib/libs/llvm16/include/llvm/ADT/ImmutableSet.h
new file mode 100644
index 0000000000..be64a514a9
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/ADT/ImmutableSet.h
@@ -0,0 +1,1182 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===--- ImmutableSet.h - Immutable (functional) set interface --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the ImutAVLTree and ImmutableSet classes.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_IMMUTABLESET_H
+#define LLVM_ADT_IMMUTABLESET_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <cstdint>
+#include <functional>
+#include <iterator>
+#include <new>
+#include <vector>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// Immutable AVL-Tree Definition.
+//===----------------------------------------------------------------------===//
+
+template <typename ImutInfo> class ImutAVLFactory;
+template <typename ImutInfo> class ImutIntervalAVLFactory;
+template <typename ImutInfo> class ImutAVLTreeInOrderIterator;
+template <typename ImutInfo> class ImutAVLTreeGenericIterator;
+
+template <typename ImutInfo >
+class ImutAVLTree {
+public:
+ using key_type_ref = typename ImutInfo::key_type_ref;
+ using value_type = typename ImutInfo::value_type;
+ using value_type_ref = typename ImutInfo::value_type_ref;
+ using Factory = ImutAVLFactory<ImutInfo>;
+ using iterator = ImutAVLTreeInOrderIterator<ImutInfo>;
+
+ friend class ImutAVLFactory<ImutInfo>;
+ friend class ImutIntervalAVLFactory<ImutInfo>;
+ friend class ImutAVLTreeGenericIterator<ImutInfo>;
+
+ //===----------------------------------------------------===//
+ // Public Interface.
+ //===----------------------------------------------------===//
+
+ /// Return a pointer to the left subtree. This value
+ /// is NULL if there is no left subtree.
+ ImutAVLTree *getLeft() const { return left; }
+
+ /// Return a pointer to the right subtree. This value is
+ /// NULL if there is no right subtree.
+ ImutAVLTree *getRight() const { return right; }
+
+ /// getHeight - Returns the height of the tree. A tree with no subtrees
+ /// has a height of 1.
+ unsigned getHeight() const { return height; }
+
+ /// getValue - Returns the data value associated with the tree node.
+ const value_type& getValue() const { return value; }
+
+ /// find - Finds the subtree associated with the specified key value.
+ /// This method returns NULL if no matching subtree is found.
+ ImutAVLTree* find(key_type_ref K) {
+ ImutAVLTree *T = this;
+ while (T) {
+ key_type_ref CurrentKey = ImutInfo::KeyOfValue(T->getValue());
+ if (ImutInfo::isEqual(K,CurrentKey))
+ return T;
+ else if (ImutInfo::isLess(K,CurrentKey))
+ T = T->getLeft();
+ else
+ T = T->getRight();
+ }
+ return nullptr;
+ }
+
+ /// getMaxElement - Find the subtree associated with the highest ranged
+ /// key value.
+ ImutAVLTree* getMaxElement() {
+ ImutAVLTree *T = this;
+ ImutAVLTree *Right = T->getRight();
+ while (Right) { T = Right; Right = T->getRight(); }
+ return T;
+ }
+
+ /// size - Returns the number of nodes in the tree, which includes
+ /// both leaves and non-leaf nodes.
+ unsigned size() const {
+ unsigned n = 1;
+ if (const ImutAVLTree* L = getLeft())
+ n += L->size();
+ if (const ImutAVLTree* R = getRight())
+ n += R->size();
+ return n;
+ }
+
+ /// begin - Returns an iterator that iterates over the nodes of the tree
+ /// in an inorder traversal. The returned iterator thus refers to the
+ /// the tree node with the minimum data element.
+ iterator begin() const { return iterator(this); }
+
+ /// end - Returns an iterator for the tree that denotes the end of an
+ /// inorder traversal.
+ iterator end() const { return iterator(); }
+
+ bool isElementEqual(value_type_ref V) const {
+ // Compare the keys.
+ if (!ImutInfo::isEqual(ImutInfo::KeyOfValue(getValue()),
+ ImutInfo::KeyOfValue(V)))
+ return false;
+
+ // Also compare the data values.
+ if (!ImutInfo::isDataEqual(ImutInfo::DataOfValue(getValue()),
+ ImutInfo::DataOfValue(V)))
+ return false;
+
+ return true;
+ }
+
+ bool isElementEqual(const ImutAVLTree* RHS) const {
+ return isElementEqual(RHS->getValue());
+ }
+
+ /// isEqual - Compares two trees for structural equality and returns true
+ /// if they are equal. This worst case performance of this operation is
+ // linear in the sizes of the trees.
+ bool isEqual(const ImutAVLTree& RHS) const {
+ if (&RHS == this)
+ return true;
+
+ iterator LItr = begin(), LEnd = end();
+ iterator RItr = RHS.begin(), REnd = RHS.end();
+
+ while (LItr != LEnd && RItr != REnd) {
+ if (&*LItr == &*RItr) {
+ LItr.skipSubTree();
+ RItr.skipSubTree();
+ continue;
+ }
+
+ if (!LItr->isElementEqual(&*RItr))
+ return false;
+
+ ++LItr;
+ ++RItr;
+ }
+
+ return LItr == LEnd && RItr == REnd;
+ }
+
+ /// isNotEqual - Compares two trees for structural inequality. Performance
+ /// is the same is isEqual.
+ bool isNotEqual(const ImutAVLTree& RHS) const { return !isEqual(RHS); }
+
+ /// contains - Returns true if this tree contains a subtree (node) that
+ /// has an data element that matches the specified key. Complexity
+ /// is logarithmic in the size of the tree.
+ bool contains(key_type_ref K) { return (bool) find(K); }
+
+ /// validateTree - A utility method that checks that the balancing and
+ /// ordering invariants of the tree are satisfied. It is a recursive
+ /// method that returns the height of the tree, which is then consumed
+ /// by the enclosing validateTree call. External callers should ignore the
+ /// return value. An invalid tree will cause an assertion to fire in
+ /// a debug build.
+ unsigned validateTree() const {
+ unsigned HL = getLeft() ? getLeft()->validateTree() : 0;
+ unsigned HR = getRight() ? getRight()->validateTree() : 0;
+ (void) HL;
+ (void) HR;
+
+ assert(getHeight() == ( HL > HR ? HL : HR ) + 1
+ && "Height calculation wrong");
+
+ assert((HL > HR ? HL-HR : HR-HL) <= 2
+ && "Balancing invariant violated");
+
+ assert((!getLeft() ||
+ ImutInfo::isLess(ImutInfo::KeyOfValue(getLeft()->getValue()),
+ ImutInfo::KeyOfValue(getValue()))) &&
+ "Value in left child is not less that current value");
+
+ assert((!getRight() ||
+ ImutInfo::isLess(ImutInfo::KeyOfValue(getValue()),
+ ImutInfo::KeyOfValue(getRight()->getValue()))) &&
+ "Current value is not less that value of right child");
+
+ return getHeight();
+ }
+
+ //===----------------------------------------------------===//
+ // Internal values.
+ //===----------------------------------------------------===//
+
+private:
+ Factory *factory;
+ ImutAVLTree *left;
+ ImutAVLTree *right;
+ ImutAVLTree *prev = nullptr;
+ ImutAVLTree *next = nullptr;
+
+ unsigned height : 28;
+ bool IsMutable : 1;
+ bool IsDigestCached : 1;
+ bool IsCanonicalized : 1;
+
+ value_type value;
+ uint32_t digest = 0;
+ uint32_t refCount = 0;
+
+ //===----------------------------------------------------===//
+ // Internal methods (node manipulation; used by Factory).
+ //===----------------------------------------------------===//
+
+private:
+ /// ImutAVLTree - Internal constructor that is only called by
+ /// ImutAVLFactory.
+ ImutAVLTree(Factory *f, ImutAVLTree* l, ImutAVLTree* r, value_type_ref v,
+ unsigned height)
+ : factory(f), left(l), right(r), height(height), IsMutable(true),
+ IsDigestCached(false), IsCanonicalized(false), value(v)
+ {
+ if (left) left->retain();
+ if (right) right->retain();
+ }
+
+ /// isMutable - Returns true if the left and right subtree references
+ /// (as well as height) can be changed. If this method returns false,
+ /// the tree is truly immutable. Trees returned from an ImutAVLFactory
+ /// object should always have this method return true. Further, if this
+ /// method returns false for an instance of ImutAVLTree, all subtrees
+ /// will also have this method return false. The converse is not true.
+ bool isMutable() const { return IsMutable; }
+
+ /// hasCachedDigest - Returns true if the digest for this tree is cached.
+ /// This can only be true if the tree is immutable.
+ bool hasCachedDigest() const { return IsDigestCached; }
+
+ //===----------------------------------------------------===//
+ // Mutating operations. A tree root can be manipulated as
+ // long as its reference has not "escaped" from internal
+ // methods of a factory object (see below). When a tree
+ // pointer is externally viewable by client code, the
+ // internal "mutable bit" is cleared to mark the tree
+ // immutable. Note that a tree that still has its mutable
+ // bit set may have children (subtrees) that are themselves
+ // immutable.
+ //===----------------------------------------------------===//
+
+ /// markImmutable - Clears the mutable flag for a tree. After this happens,
+ /// it is an error to call setLeft(), setRight(), and setHeight().
+ void markImmutable() {
+ assert(isMutable() && "Mutable flag already removed.");
+ IsMutable = false;
+ }
+
+ /// markedCachedDigest - Clears the NoCachedDigest flag for a tree.
+ void markedCachedDigest() {
+ assert(!hasCachedDigest() && "NoCachedDigest flag already removed.");
+ IsDigestCached = true;
+ }
+
+ /// setHeight - Changes the height of the tree. Used internally by
+ /// ImutAVLFactory.
+ void setHeight(unsigned h) {
+ assert(isMutable() && "Only a mutable tree can have its height changed.");
+ height = h;
+ }
+
+ static uint32_t computeDigest(ImutAVLTree *L, ImutAVLTree *R,
+ value_type_ref V) {
+ uint32_t digest = 0;
+
+ if (L)
+ digest += L->computeDigest();
+
+ // Compute digest of stored data.
+ FoldingSetNodeID ID;
+ ImutInfo::Profile(ID,V);
+ digest += ID.ComputeHash();
+
+ if (R)
+ digest += R->computeDigest();
+
+ return digest;
+ }
+
+ uint32_t computeDigest() {
+ // Check the lowest bit to determine if digest has actually been
+ // pre-computed.
+ if (hasCachedDigest())
+ return digest;
+
+ uint32_t X = computeDigest(getLeft(), getRight(), getValue());
+ digest = X;
+ markedCachedDigest();
+ return X;
+ }
+
+ //===----------------------------------------------------===//
+ // Reference count operations.
+ //===----------------------------------------------------===//
+
+public:
+ void retain() { ++refCount; }
+
+ void release() {
+ assert(refCount > 0);
+ if (--refCount == 0)
+ destroy();
+ }
+
+ void destroy() {
+ if (left)
+ left->release();
+ if (right)
+ right->release();
+ if (IsCanonicalized) {
+ if (next)
+ next->prev = prev;
+
+ if (prev)
+ prev->next = next;
+ else
+ factory->Cache[factory->maskCacheIndex(computeDigest())] = next;
+ }
+
+ // We need to clear the mutability bit in case we are
+ // destroying the node as part of a sweep in ImutAVLFactory::recoverNodes().
+ IsMutable = false;
+ factory->freeNodes.push_back(this);
+ }
+};
+
+template <typename ImutInfo>
+struct IntrusiveRefCntPtrInfo<ImutAVLTree<ImutInfo>> {
+ static void retain(ImutAVLTree<ImutInfo> *Tree) { Tree->retain(); }
+ static void release(ImutAVLTree<ImutInfo> *Tree) { Tree->release(); }
+};
+
+//===----------------------------------------------------------------------===//
+// Immutable AVL-Tree Factory class.
+//===----------------------------------------------------------------------===//
+
+template <typename ImutInfo >
+class ImutAVLFactory {
+ friend class ImutAVLTree<ImutInfo>;
+
+ using TreeTy = ImutAVLTree<ImutInfo>;
+ using value_type_ref = typename TreeTy::value_type_ref;
+ using key_type_ref = typename TreeTy::key_type_ref;
+ using CacheTy = DenseMap<unsigned, TreeTy*>;
+
+ CacheTy Cache;
+ uintptr_t Allocator;
+ std::vector<TreeTy*> createdNodes;
+ std::vector<TreeTy*> freeNodes;
+
+ bool ownsAllocator() const {
+ return (Allocator & 0x1) == 0;
+ }
+
+ BumpPtrAllocator& getAllocator() const {
+ return *reinterpret_cast<BumpPtrAllocator*>(Allocator & ~0x1);
+ }
+
+ //===--------------------------------------------------===//
+ // Public interface.
+ //===--------------------------------------------------===//
+
+public:
+ ImutAVLFactory()
+ : Allocator(reinterpret_cast<uintptr_t>(new BumpPtrAllocator())) {}
+
+ ImutAVLFactory(BumpPtrAllocator& Alloc)
+ : Allocator(reinterpret_cast<uintptr_t>(&Alloc) | 0x1) {}
+
+ ~ImutAVLFactory() {
+ if (ownsAllocator()) delete &getAllocator();
+ }
+
+ TreeTy* add(TreeTy* T, value_type_ref V) {
+ T = add_internal(V,T);
+ markImmutable(T);
+ recoverNodes();
+ return T;
+ }
+
+ TreeTy* remove(TreeTy* T, key_type_ref V) {
+ T = remove_internal(V,T);
+ markImmutable(T);
+ recoverNodes();
+ return T;
+ }
+
+ TreeTy* getEmptyTree() const { return nullptr; }
+
+protected:
+ //===--------------------------------------------------===//
+ // A bunch of quick helper functions used for reasoning
+ // about the properties of trees and their children.
+ // These have succinct names so that the balancing code
+ // is as terse (and readable) as possible.
+ //===--------------------------------------------------===//
+
+ bool isEmpty(TreeTy* T) const { return !T; }
+ unsigned getHeight(TreeTy* T) const { return T ? T->getHeight() : 0; }
+ TreeTy* getLeft(TreeTy* T) const { return T->getLeft(); }
+ TreeTy* getRight(TreeTy* T) const { return T->getRight(); }
+ value_type_ref getValue(TreeTy* T) const { return T->value; }
+
+ // Make sure the index is not the Tombstone or Entry key of the DenseMap.
+ static unsigned maskCacheIndex(unsigned I) { return (I & ~0x02); }
+
+ unsigned incrementHeight(TreeTy* L, TreeTy* R) const {
+ unsigned hl = getHeight(L);
+ unsigned hr = getHeight(R);
+ return (hl > hr ? hl : hr) + 1;
+ }
+
+ static bool compareTreeWithSection(TreeTy* T,
+ typename TreeTy::iterator& TI,
+ typename TreeTy::iterator& TE) {
+ typename TreeTy::iterator I = T->begin(), E = T->end();
+ for ( ; I!=E ; ++I, ++TI) {
+ if (TI == TE || !I->isElementEqual(&*TI))
+ return false;
+ }
+ return true;
+ }
+
+ //===--------------------------------------------------===//
+ // "createNode" is used to generate new tree roots that link
+ // to other trees. The function may also simply move links
+ // in an existing root if that root is still marked mutable.
+ // This is necessary because otherwise our balancing code
+ // would leak memory as it would create nodes that are
+ // then discarded later before the finished tree is
+ // returned to the caller.
+ //===--------------------------------------------------===//
+
+ TreeTy* createNode(TreeTy* L, value_type_ref V, TreeTy* R) {
+ BumpPtrAllocator& A = getAllocator();
+ TreeTy* T;
+ if (!freeNodes.empty()) {
+ T = freeNodes.back();
+ freeNodes.pop_back();
+ assert(T != L);
+ assert(T != R);
+ } else {
+ T = (TreeTy*) A.Allocate<TreeTy>();
+ }
+ new (T) TreeTy(this, L, R, V, incrementHeight(L,R));
+ createdNodes.push_back(T);
+ return T;
+ }
+
+ TreeTy* createNode(TreeTy* newLeft, TreeTy* oldTree, TreeTy* newRight) {
+ return createNode(newLeft, getValue(oldTree), newRight);
+ }
+
+ void recoverNodes() {
+ for (unsigned i = 0, n = createdNodes.size(); i < n; ++i) {
+ TreeTy *N = createdNodes[i];
+ if (N->isMutable() && N->refCount == 0)
+ N->destroy();
+ }
+ createdNodes.clear();
+ }
+
+ /// balanceTree - Used by add_internal and remove_internal to
+ /// balance a newly created tree.
+ TreeTy* balanceTree(TreeTy* L, value_type_ref V, TreeTy* R) {
+ unsigned hl = getHeight(L);
+ unsigned hr = getHeight(R);
+
+ if (hl > hr + 2) {
+ assert(!isEmpty(L) && "Left tree cannot be empty to have a height >= 2");
+
+ TreeTy *LL = getLeft(L);
+ TreeTy *LR = getRight(L);
+
+ if (getHeight(LL) >= getHeight(LR))
+ return createNode(LL, L, createNode(LR,V,R));
+
+ assert(!isEmpty(LR) && "LR cannot be empty because it has a height >= 1");
+
+ TreeTy *LRL = getLeft(LR);
+ TreeTy *LRR = getRight(LR);
+
+ return createNode(createNode(LL,L,LRL), LR, createNode(LRR,V,R));
+ }
+
+ if (hr > hl + 2) {
+ assert(!isEmpty(R) && "Right tree cannot be empty to have a height >= 2");
+
+ TreeTy *RL = getLeft(R);
+ TreeTy *RR = getRight(R);
+
+ if (getHeight(RR) >= getHeight(RL))
+ return createNode(createNode(L,V,RL), R, RR);
+
+ assert(!isEmpty(RL) && "RL cannot be empty because it has a height >= 1");
+
+ TreeTy *RLL = getLeft(RL);
+ TreeTy *RLR = getRight(RL);
+
+ return createNode(createNode(L,V,RLL), RL, createNode(RLR,R,RR));
+ }
+
+ return createNode(L,V,R);
+ }
+
+ /// add_internal - Creates a new tree that includes the specified
+ /// data and the data from the original tree. If the original tree
+ /// already contained the data item, the original tree is returned.
+ TreeTy* add_internal(value_type_ref V, TreeTy* T) {
+ if (isEmpty(T))
+ return createNode(T, V, T);
+ assert(!T->isMutable());
+
+ key_type_ref K = ImutInfo::KeyOfValue(V);
+ key_type_ref KCurrent = ImutInfo::KeyOfValue(getValue(T));
+
+ if (ImutInfo::isEqual(K,KCurrent))
+ return createNode(getLeft(T), V, getRight(T));
+ else if (ImutInfo::isLess(K,KCurrent))
+ return balanceTree(add_internal(V, getLeft(T)), getValue(T), getRight(T));
+ else
+ return balanceTree(getLeft(T), getValue(T), add_internal(V, getRight(T)));
+ }
+
+ /// remove_internal - Creates a new tree that includes all the data
+ /// from the original tree except the specified data. If the
+ /// specified data did not exist in the original tree, the original
+ /// tree is returned.
+ TreeTy* remove_internal(key_type_ref K, TreeTy* T) {
+ if (isEmpty(T))
+ return T;
+
+ assert(!T->isMutable());
+
+ key_type_ref KCurrent = ImutInfo::KeyOfValue(getValue(T));
+
+ if (ImutInfo::isEqual(K,KCurrent)) {
+ return combineTrees(getLeft(T), getRight(T));
+ } else if (ImutInfo::isLess(K,KCurrent)) {
+ return balanceTree(remove_internal(K, getLeft(T)),
+ getValue(T), getRight(T));
+ } else {
+ return balanceTree(getLeft(T), getValue(T),
+ remove_internal(K, getRight(T)));
+ }
+ }
+
+ TreeTy* combineTrees(TreeTy* L, TreeTy* R) {
+ if (isEmpty(L))
+ return R;
+ if (isEmpty(R))
+ return L;
+ TreeTy* OldNode;
+ TreeTy* newRight = removeMinBinding(R,OldNode);
+ return balanceTree(L, getValue(OldNode), newRight);
+ }
+
+ TreeTy* removeMinBinding(TreeTy* T, TreeTy*& Noderemoved) {
+ assert(!isEmpty(T));
+ if (isEmpty(getLeft(T))) {
+ Noderemoved = T;
+ return getRight(T);
+ }
+ return balanceTree(removeMinBinding(getLeft(T), Noderemoved),
+ getValue(T), getRight(T));
+ }
+
+ /// markImmutable - Clears the mutable bits of a root and all of its
+ /// descendants.
+ void markImmutable(TreeTy* T) {
+ if (!T || !T->isMutable())
+ return;
+ T->markImmutable();
+ markImmutable(getLeft(T));
+ markImmutable(getRight(T));
+ }
+
+public:
+ TreeTy *getCanonicalTree(TreeTy *TNew) {
+ if (!TNew)
+ return nullptr;
+
+ if (TNew->IsCanonicalized)
+ return TNew;
+
+ // Search the hashtable for another tree with the same digest, and
+ // if find a collision compare those trees by their contents.
+ unsigned digest = TNew->computeDigest();
+ TreeTy *&entry = Cache[maskCacheIndex(digest)];
+ do {
+ if (!entry)
+ break;
+ for (TreeTy *T = entry ; T != nullptr; T = T->next) {
+ // Compare the Contents('T') with Contents('TNew')
+ typename TreeTy::iterator TI = T->begin(), TE = T->end();
+ if (!compareTreeWithSection(TNew, TI, TE))
+ continue;
+ if (TI != TE)
+ continue; // T has more contents than TNew.
+ // Trees did match! Return 'T'.
+ if (TNew->refCount == 0)
+ TNew->destroy();
+ return T;
+ }
+ entry->prev = TNew;
+ TNew->next = entry;
+ }
+ while (false);
+
+ entry = TNew;
+ TNew->IsCanonicalized = true;
+ return TNew;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Immutable AVL-Tree Iterators.
+//===----------------------------------------------------------------------===//
+
+template <typename ImutInfo> class ImutAVLTreeGenericIterator {
+ SmallVector<uintptr_t,20> stack;
+
+public:
+ using iterator_category = std::bidirectional_iterator_tag;
+ using value_type = ImutAVLTree<ImutInfo>;
+ using difference_type = std::ptrdiff_t;
+ using pointer = value_type *;
+ using reference = value_type &;
+
+ enum VisitFlag { VisitedNone=0x0, VisitedLeft=0x1, VisitedRight=0x3,
+ Flags=0x3 };
+
+ using TreeTy = ImutAVLTree<ImutInfo>;
+
+ ImutAVLTreeGenericIterator() = default;
+ ImutAVLTreeGenericIterator(const TreeTy *Root) {
+ if (Root) stack.push_back(reinterpret_cast<uintptr_t>(Root));
+ }
+
+ TreeTy &operator*() const {
+ assert(!stack.empty());
+ return *reinterpret_cast<TreeTy *>(stack.back() & ~Flags);
+ }
+ TreeTy *operator->() const { return &*this; }
+
+ uintptr_t getVisitState() const {
+ assert(!stack.empty());
+ return stack.back() & Flags;
+ }
+
+ bool atEnd() const { return stack.empty(); }
+
+ bool atBeginning() const {
+ return stack.size() == 1 && getVisitState() == VisitedNone;
+ }
+
+ void skipToParent() {
+ assert(!stack.empty());
+ stack.pop_back();
+ if (stack.empty())
+ return;
+ switch (getVisitState()) {
+ case VisitedNone:
+ stack.back() |= VisitedLeft;
+ break;
+ case VisitedLeft:
+ stack.back() |= VisitedRight;
+ break;
+ default:
+ llvm_unreachable("Unreachable.");
+ }
+ }
+
+ bool operator==(const ImutAVLTreeGenericIterator &x) const {
+ return stack == x.stack;
+ }
+
+ bool operator!=(const ImutAVLTreeGenericIterator &x) const {
+ return !(*this == x);
+ }
+
+ ImutAVLTreeGenericIterator &operator++() {
+ assert(!stack.empty());
+ TreeTy* Current = reinterpret_cast<TreeTy*>(stack.back() & ~Flags);
+ assert(Current);
+ switch (getVisitState()) {
+ case VisitedNone:
+ if (TreeTy* L = Current->getLeft())
+ stack.push_back(reinterpret_cast<uintptr_t>(L));
+ else
+ stack.back() |= VisitedLeft;
+ break;
+ case VisitedLeft:
+ if (TreeTy* R = Current->getRight())
+ stack.push_back(reinterpret_cast<uintptr_t>(R));
+ else
+ stack.back() |= VisitedRight;
+ break;
+ case VisitedRight:
+ skipToParent();
+ break;
+ default:
+ llvm_unreachable("Unreachable.");
+ }
+ return *this;
+ }
+
+ ImutAVLTreeGenericIterator &operator--() {
+ assert(!stack.empty());
+ TreeTy* Current = reinterpret_cast<TreeTy*>(stack.back() & ~Flags);
+ assert(Current);
+ switch (getVisitState()) {
+ case VisitedNone:
+ stack.pop_back();
+ break;
+ case VisitedLeft:
+ stack.back() &= ~Flags; // Set state to "VisitedNone."
+ if (TreeTy* L = Current->getLeft())
+ stack.push_back(reinterpret_cast<uintptr_t>(L) | VisitedRight);
+ break;
+ case VisitedRight:
+ stack.back() &= ~Flags;
+ stack.back() |= VisitedLeft;
+ if (TreeTy* R = Current->getRight())
+ stack.push_back(reinterpret_cast<uintptr_t>(R) | VisitedRight);
+ break;
+ default:
+ llvm_unreachable("Unreachable.");
+ }
+ return *this;
+ }
+};
+
+template <typename ImutInfo> class ImutAVLTreeInOrderIterator {
+ using InternalIteratorTy = ImutAVLTreeGenericIterator<ImutInfo>;
+
+ InternalIteratorTy InternalItr;
+
+public:
+ using iterator_category = std::bidirectional_iterator_tag;
+ using value_type = ImutAVLTree<ImutInfo>;
+ using difference_type = std::ptrdiff_t;
+ using pointer = value_type *;
+ using reference = value_type &;
+
+ using TreeTy = ImutAVLTree<ImutInfo>;
+
+ ImutAVLTreeInOrderIterator(const TreeTy* Root) : InternalItr(Root) {
+ if (Root)
+ ++*this; // Advance to first element.
+ }
+
+ ImutAVLTreeInOrderIterator() : InternalItr() {}
+
+ bool operator==(const ImutAVLTreeInOrderIterator &x) const {
+ return InternalItr == x.InternalItr;
+ }
+
+ bool operator!=(const ImutAVLTreeInOrderIterator &x) const {
+ return !(*this == x);
+ }
+
+ TreeTy &operator*() const { return *InternalItr; }
+ TreeTy *operator->() const { return &*InternalItr; }
+
+ ImutAVLTreeInOrderIterator &operator++() {
+ do ++InternalItr;
+ while (!InternalItr.atEnd() &&
+ InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft);
+
+ return *this;
+ }
+
+ ImutAVLTreeInOrderIterator &operator--() {
+ do --InternalItr;
+ while (!InternalItr.atBeginning() &&
+ InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft);
+
+ return *this;
+ }
+
+ void skipSubTree() {
+ InternalItr.skipToParent();
+
+ while (!InternalItr.atEnd() &&
+ InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft)
+ ++InternalItr;
+ }
+};
+
+/// Generic iterator that wraps a T::TreeTy::iterator and exposes
+/// iterator::getValue() on dereference.
+template <typename T>
+struct ImutAVLValueIterator
+ : iterator_adaptor_base<
+ ImutAVLValueIterator<T>, typename T::TreeTy::iterator,
+ typename std::iterator_traits<
+ typename T::TreeTy::iterator>::iterator_category,
+ const typename T::value_type> {
+ ImutAVLValueIterator() = default;
+ explicit ImutAVLValueIterator(typename T::TreeTy *Tree)
+ : ImutAVLValueIterator::iterator_adaptor_base(Tree) {}
+
+ typename ImutAVLValueIterator::reference operator*() const {
+ return this->I->getValue();
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Trait classes for Profile information.
+//===----------------------------------------------------------------------===//
+
+/// Generic profile template. The default behavior is to invoke the
+/// profile method of an object. Specializations for primitive integers
+/// and generic handling of pointers is done below.
+template <typename T>
+struct ImutProfileInfo {
+ using value_type = const T;
+ using value_type_ref = const T&;
+
+ static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
+ FoldingSetTrait<T>::Profile(X,ID);
+ }
+};
+
+/// Profile traits for integers.
+template <typename T>
+struct ImutProfileInteger {
+ using value_type = const T;
+ using value_type_ref = const T&;
+
+ static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
+ ID.AddInteger(X);
+ }
+};
+
+#define PROFILE_INTEGER_INFO(X)\
+template<> struct ImutProfileInfo<X> : ImutProfileInteger<X> {};
+
+PROFILE_INTEGER_INFO(char)
+PROFILE_INTEGER_INFO(unsigned char)
+PROFILE_INTEGER_INFO(short)
+PROFILE_INTEGER_INFO(unsigned short)
+PROFILE_INTEGER_INFO(unsigned)
+PROFILE_INTEGER_INFO(signed)
+PROFILE_INTEGER_INFO(long)
+PROFILE_INTEGER_INFO(unsigned long)
+PROFILE_INTEGER_INFO(long long)
+PROFILE_INTEGER_INFO(unsigned long long)
+
+#undef PROFILE_INTEGER_INFO
+
+/// Profile traits for booleans.
+template <>
+struct ImutProfileInfo<bool> {
+ using value_type = const bool;
+ using value_type_ref = const bool&;
+
+ static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
+ ID.AddBoolean(X);
+ }
+};
+
+/// Generic profile trait for pointer types. We treat pointers as
+/// references to unique objects.
+template <typename T>
+struct ImutProfileInfo<T*> {
+ using value_type = const T*;
+ using value_type_ref = value_type;
+
+ static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
+ ID.AddPointer(X);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Trait classes that contain element comparison operators and type
+// definitions used by ImutAVLTree, ImmutableSet, and ImmutableMap. These
+// inherit from the profile traits (ImutProfileInfo) to include operations
+// for element profiling.
+//===----------------------------------------------------------------------===//
+
+/// ImutContainerInfo - Generic definition of comparison operations for
+/// elements of immutable containers that defaults to using
+/// std::equal_to<> and std::less<> to perform comparison of elements.
+template <typename T>
+struct ImutContainerInfo : public ImutProfileInfo<T> {
+ using value_type = typename ImutProfileInfo<T>::value_type;
+ using value_type_ref = typename ImutProfileInfo<T>::value_type_ref;
+ using key_type = value_type;
+ using key_type_ref = value_type_ref;
+ using data_type = bool;
+ using data_type_ref = bool;
+
+ static key_type_ref KeyOfValue(value_type_ref D) { return D; }
+ static data_type_ref DataOfValue(value_type_ref) { return true; }
+
+ static bool isEqual(key_type_ref LHS, key_type_ref RHS) {
+ return std::equal_to<key_type>()(LHS,RHS);
+ }
+
+ static bool isLess(key_type_ref LHS, key_type_ref RHS) {
+ return std::less<key_type>()(LHS,RHS);
+ }
+
+ static bool isDataEqual(data_type_ref, data_type_ref) { return true; }
+};
+
+/// ImutContainerInfo - Specialization for pointer values to treat pointers
+/// as references to unique objects. Pointers are thus compared by
+/// their addresses.
+template <typename T>
+struct ImutContainerInfo<T*> : public ImutProfileInfo<T*> {
+ using value_type = typename ImutProfileInfo<T*>::value_type;
+ using value_type_ref = typename ImutProfileInfo<T*>::value_type_ref;
+ using key_type = value_type;
+ using key_type_ref = value_type_ref;
+ using data_type = bool;
+ using data_type_ref = bool;
+
+ static key_type_ref KeyOfValue(value_type_ref D) { return D; }
+ static data_type_ref DataOfValue(value_type_ref) { return true; }
+
+ static bool isEqual(key_type_ref LHS, key_type_ref RHS) { return LHS == RHS; }
+
+ static bool isLess(key_type_ref LHS, key_type_ref RHS) { return LHS < RHS; }
+
+ static bool isDataEqual(data_type_ref, data_type_ref) { return true; }
+};
+
+//===----------------------------------------------------------------------===//
+// Immutable Set
+//===----------------------------------------------------------------------===//
+
+template <typename ValT, typename ValInfo = ImutContainerInfo<ValT>>
+class ImmutableSet {
+public:
+ using value_type = typename ValInfo::value_type;
+ using value_type_ref = typename ValInfo::value_type_ref;
+ using TreeTy = ImutAVLTree<ValInfo>;
+
+private:
+ IntrusiveRefCntPtr<TreeTy> Root;
+
+public:
+ /// Constructs a set from a pointer to a tree root. In general one
+ /// should use a Factory object to create sets instead of directly
+ /// invoking the constructor, but there are cases where make this
+ /// constructor public is useful.
+ explicit ImmutableSet(TreeTy *R) : Root(R) {}
+
+ class Factory {
+ typename TreeTy::Factory F;
+ const bool Canonicalize;
+
+ public:
+ Factory(bool canonicalize = true)
+ : Canonicalize(canonicalize) {}
+
+ Factory(BumpPtrAllocator& Alloc, bool canonicalize = true)
+ : F(Alloc), Canonicalize(canonicalize) {}
+
+ Factory(const Factory& RHS) = delete;
+ void operator=(const Factory& RHS) = delete;
+
+ /// getEmptySet - Returns an immutable set that contains no elements.
+ ImmutableSet getEmptySet() {
+ return ImmutableSet(F.getEmptyTree());
+ }
+
+ /// add - Creates a new immutable set that contains all of the values
+ /// of the original set with the addition of the specified value. If
+ /// the original set already included the value, then the original set is
+ /// returned and no memory is allocated. The time and space complexity
+ /// of this operation is logarithmic in the size of the original set.
+ /// The memory allocated to represent the set is released when the
+ /// factory object that created the set is destroyed.
+ [[nodiscard]] ImmutableSet add(ImmutableSet Old, value_type_ref V) {
+ TreeTy *NewT = F.add(Old.Root.get(), V);
+ return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT);
+ }
+
+ /// remove - Creates a new immutable set that contains all of the values
+ /// of the original set with the exception of the specified value. If
+ /// the original set did not contain the value, the original set is
+ /// returned and no memory is allocated. The time and space complexity
+ /// of this operation is logarithmic in the size of the original set.
+ /// The memory allocated to represent the set is released when the
+ /// factory object that created the set is destroyed.
+ [[nodiscard]] ImmutableSet remove(ImmutableSet Old, value_type_ref V) {
+ TreeTy *NewT = F.remove(Old.Root.get(), V);
+ return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT);
+ }
+
+ BumpPtrAllocator& getAllocator() { return F.getAllocator(); }
+
+ typename TreeTy::Factory *getTreeFactory() const {
+ return const_cast<typename TreeTy::Factory *>(&F);
+ }
+ };
+
+ friend class Factory;
+
+ /// Returns true if the set contains the specified value.
+ bool contains(value_type_ref V) const {
+ return Root ? Root->contains(V) : false;
+ }
+
+ bool operator==(const ImmutableSet &RHS) const {
+ return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root;
+ }
+
+ bool operator!=(const ImmutableSet &RHS) const {
+ return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get())
+ : Root != RHS.Root;
+ }
+
+ TreeTy *getRoot() {
+ if (Root) { Root->retain(); }
+ return Root.get();
+ }
+
+ TreeTy *getRootWithoutRetain() const { return Root.get(); }
+
+ /// isEmpty - Return true if the set contains no elements.
+ bool isEmpty() const { return !Root; }
+
+ /// isSingleton - Return true if the set contains exactly one element.
+ /// This method runs in constant time.
+ bool isSingleton() const { return getHeight() == 1; }
+
+ //===--------------------------------------------------===//
+ // Iterators.
+ //===--------------------------------------------------===//
+
+ using iterator = ImutAVLValueIterator<ImmutableSet>;
+
+ iterator begin() const { return iterator(Root.get()); }
+ iterator end() const { return iterator(); }
+
+ //===--------------------------------------------------===//
+ // Utility methods.
+ //===--------------------------------------------------===//
+
+ unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
+
+ static void Profile(FoldingSetNodeID &ID, const ImmutableSet &S) {
+ ID.AddPointer(S.Root.get());
+ }
+
+ void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }
+
+ //===--------------------------------------------------===//
+ // For testing.
+ //===--------------------------------------------------===//
+
+ void validateTree() const { if (Root) Root->validateTree(); }
+};
+
+// NOTE: This may some day replace the current ImmutableSet.
+template <typename ValT, typename ValInfo = ImutContainerInfo<ValT>>
+class ImmutableSetRef {
+public:
+ using value_type = typename ValInfo::value_type;
+ using value_type_ref = typename ValInfo::value_type_ref;
+ using TreeTy = ImutAVLTree<ValInfo>;
+ using FactoryTy = typename TreeTy::Factory;
+
+private:
+ IntrusiveRefCntPtr<TreeTy> Root;
+ FactoryTy *Factory;
+
+public:
+ /// Constructs a set from a pointer to a tree root. In general one
+ /// should use a Factory object to create sets instead of directly
+ /// invoking the constructor, but there are cases where make this
+ /// constructor public is useful.
+ ImmutableSetRef(TreeTy *R, FactoryTy *F) : Root(R), Factory(F) {}
+
+ static ImmutableSetRef getEmptySet(FactoryTy *F) {
+ return ImmutableSetRef(0, F);
+ }
+
+ ImmutableSetRef add(value_type_ref V) {
+ return ImmutableSetRef(Factory->add(Root.get(), V), Factory);
+ }
+
+ ImmutableSetRef remove(value_type_ref V) {
+ return ImmutableSetRef(Factory->remove(Root.get(), V), Factory);
+ }
+
+ /// Returns true if the set contains the specified value.
+ bool contains(value_type_ref V) const {
+ return Root ? Root->contains(V) : false;
+ }
+
+ ImmutableSet<ValT> asImmutableSet(bool canonicalize = true) const {
+ return ImmutableSet<ValT>(
+ canonicalize ? Factory->getCanonicalTree(Root.get()) : Root.get());
+ }
+
+ TreeTy *getRootWithoutRetain() const { return Root.get(); }
+
+ bool operator==(const ImmutableSetRef &RHS) const {
+ return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root;
+ }
+
+ bool operator!=(const ImmutableSetRef &RHS) const {
+ return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get())
+ : Root != RHS.Root;
+ }
+
+ /// isEmpty - Return true if the set contains no elements.
+ bool isEmpty() const { return !Root; }
+
+ /// isSingleton - Return true if the set contains exactly one element.
+ /// This method runs in constant time.
+ bool isSingleton() const { return getHeight() == 1; }
+
+ //===--------------------------------------------------===//
+ // Iterators.
+ //===--------------------------------------------------===//
+
+ using iterator = ImutAVLValueIterator<ImmutableSetRef>;
+
+ iterator begin() const { return iterator(Root.get()); }
+ iterator end() const { return iterator(); }
+
+ //===--------------------------------------------------===//
+ // Utility methods.
+ //===--------------------------------------------------===//
+
+ unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
+
+ static void Profile(FoldingSetNodeID &ID, const ImmutableSetRef &S) {
+ ID.AddPointer(S.Root.get());
+ }
+
+ void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }
+
+ //===--------------------------------------------------===//
+ // For testing.
+ //===--------------------------------------------------===//
+
+ void validateTree() const { if (Root) Root->validateTree(); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_IMMUTABLESET_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/IR/FixedPointBuilder.h b/contrib/libs/llvm16/include/llvm/IR/FixedPointBuilder.h
new file mode 100644
index 0000000000..07a68ad4a6
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/IR/FixedPointBuilder.h
@@ -0,0 +1,478 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===- llvm/FixedPointBuilder.h - Builder for fixed-point ops ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the FixedPointBuilder class, which is used as a convenient
+// way to lower fixed-point arithmetic operations to LLVM IR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_FIXEDPOINTBUILDER_H
+#define LLVM_IR_FIXEDPOINTBUILDER_H
+
+#include "llvm/ADT/APFixedPoint.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+
+#include <cmath>
+
+namespace llvm {
+
+template <class IRBuilderTy> class FixedPointBuilder {
+ IRBuilderTy &B;
+
+ Value *Convert(Value *Src, const FixedPointSemantics &SrcSema,
+ const FixedPointSemantics &DstSema, bool DstIsInteger) {
+ unsigned SrcWidth = SrcSema.getWidth();
+ unsigned DstWidth = DstSema.getWidth();
+ unsigned SrcScale = SrcSema.getScale();
+ unsigned DstScale = DstSema.getScale();
+ bool SrcIsSigned = SrcSema.isSigned();
+ bool DstIsSigned = DstSema.isSigned();
+
+ Type *DstIntTy = B.getIntNTy(DstWidth);
+
+ Value *Result = Src;
+ unsigned ResultWidth = SrcWidth;
+
+ // Downscale.
+ if (DstScale < SrcScale) {
+ // When converting to integers, we round towards zero. For negative
+ // numbers, right shifting rounds towards negative infinity. In this case,
+ // we can just round up before shifting.
+ if (DstIsInteger && SrcIsSigned) {
+ Value *Zero = Constant::getNullValue(Result->getType());
+ Value *IsNegative = B.CreateICmpSLT(Result, Zero);
+ Value *LowBits = ConstantInt::get(
+ B.getContext(), APInt::getLowBitsSet(ResultWidth, SrcScale));
+ Value *Rounded = B.CreateAdd(Result, LowBits);
+ Result = B.CreateSelect(IsNegative, Rounded, Result);
+ }
+
+ Result = SrcIsSigned
+ ? B.CreateAShr(Result, SrcScale - DstScale, "downscale")
+ : B.CreateLShr(Result, SrcScale - DstScale, "downscale");
+ }
+
+ if (!DstSema.isSaturated()) {
+ // Resize.
+ Result = B.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
+
+ // Upscale.
+ if (DstScale > SrcScale)
+ Result = B.CreateShl(Result, DstScale - SrcScale, "upscale");
+ } else {
+ // Adjust the number of fractional bits.
+ if (DstScale > SrcScale) {
+ // Compare to DstWidth to prevent resizing twice.
+ ResultWidth = std::max(SrcWidth + DstScale - SrcScale, DstWidth);
+ Type *UpscaledTy = B.getIntNTy(ResultWidth);
+ Result = B.CreateIntCast(Result, UpscaledTy, SrcIsSigned, "resize");
+ Result = B.CreateShl(Result, DstScale - SrcScale, "upscale");
+ }
+
+ // Handle saturation.
+ bool LessIntBits = DstSema.getIntegralBits() < SrcSema.getIntegralBits();
+ if (LessIntBits) {
+ Value *Max = ConstantInt::get(
+ B.getContext(),
+ APFixedPoint::getMax(DstSema).getValue().extOrTrunc(ResultWidth));
+ Value *TooHigh = SrcIsSigned ? B.CreateICmpSGT(Result, Max)
+ : B.CreateICmpUGT(Result, Max);
+ Result = B.CreateSelect(TooHigh, Max, Result, "satmax");
+ }
+ // Cannot overflow min to dest type if src is unsigned since all fixed
+ // point types can cover the unsigned min of 0.
+ if (SrcIsSigned && (LessIntBits || !DstIsSigned)) {
+ Value *Min = ConstantInt::get(
+ B.getContext(),
+ APFixedPoint::getMin(DstSema).getValue().extOrTrunc(ResultWidth));
+ Value *TooLow = B.CreateICmpSLT(Result, Min);
+ Result = B.CreateSelect(TooLow, Min, Result, "satmin");
+ }
+
+ // Resize the integer part to get the final destination size.
+ if (ResultWidth != DstWidth)
+ Result = B.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
+ }
+ return Result;
+ }
+
+ /// Get the common semantic for two semantics, with the added imposition that
+ /// saturated padded types retain the padding bit.
+ FixedPointSemantics
+ getCommonBinopSemantic(const FixedPointSemantics &LHSSema,
+ const FixedPointSemantics &RHSSema) {
+ auto C = LHSSema.getCommonSemantics(RHSSema);
+ bool BothPadded =
+ LHSSema.hasUnsignedPadding() && RHSSema.hasUnsignedPadding();
+ return FixedPointSemantics(
+ C.getWidth() + (unsigned)(BothPadded && C.isSaturated()), C.getScale(),
+ C.isSigned(), C.isSaturated(), BothPadded);
+ }
+
+ /// Given a floating point type and a fixed-point semantic, return a floating
+ /// point type which can accommodate the fixed-point semantic. This is either
+ /// \p Ty, or a floating point type with a larger exponent than Ty.
+ Type *getAccommodatingFloatType(Type *Ty, const FixedPointSemantics &Sema) {
+ const fltSemantics *FloatSema = &Ty->getFltSemantics();
+ while (!Sema.fitsInFloatSemantics(*FloatSema))
+ FloatSema = APFixedPoint::promoteFloatSemantics(FloatSema);
+ return Type::getFloatingPointTy(Ty->getContext(), *FloatSema);
+ }
+
+public:
+ FixedPointBuilder(IRBuilderTy &Builder) : B(Builder) {}
+
+ /// Convert an integer value representing a fixed-point number from one
+ /// fixed-point semantic to another fixed-point semantic.
+ /// \p Src - The source value
+ /// \p SrcSema - The fixed-point semantic of the source value
+ /// \p DstSema - The resulting fixed-point semantic
+ Value *CreateFixedToFixed(Value *Src, const FixedPointSemantics &SrcSema,
+ const FixedPointSemantics &DstSema) {
+ return Convert(Src, SrcSema, DstSema, false);
+ }
+
+ /// Convert an integer value representing a fixed-point number to an integer
+ /// with the given bit width and signedness.
+ /// \p Src - The source value
+ /// \p SrcSema - The fixed-point semantic of the source value
+ /// \p DstWidth - The bit width of the result value
+ /// \p DstIsSigned - The signedness of the result value
+ Value *CreateFixedToInteger(Value *Src, const FixedPointSemantics &SrcSema,
+ unsigned DstWidth, bool DstIsSigned) {
+ return Convert(
+ Src, SrcSema,
+ FixedPointSemantics::GetIntegerSemantics(DstWidth, DstIsSigned), true);
+ }
+
+ /// Convert an integer value with the given signedness to an integer value
+ /// representing the given fixed-point semantic.
+ /// \p Src - The source value
+ /// \p SrcIsSigned - The signedness of the source value
+ /// \p DstSema - The resulting fixed-point semantic
+ Value *CreateIntegerToFixed(Value *Src, unsigned SrcIsSigned,
+ const FixedPointSemantics &DstSema) {
+ return Convert(Src,
+ FixedPointSemantics::GetIntegerSemantics(
+ Src->getType()->getScalarSizeInBits(), SrcIsSigned),
+ DstSema, false);
+ }
+
+ Value *CreateFixedToFloating(Value *Src, const FixedPointSemantics &SrcSema,
+ Type *DstTy) {
+ Value *Result;
+ Type *OpTy = getAccommodatingFloatType(DstTy, SrcSema);
+ // Convert the raw fixed-point value directly to floating point. If the
+ // value is too large to fit, it will be rounded, not truncated.
+ Result = SrcSema.isSigned() ? B.CreateSIToFP(Src, OpTy)
+ : B.CreateUIToFP(Src, OpTy);
+ // Rescale the integral-in-floating point by the scaling factor. This is
+ // lossless, except for overflow to infinity which is unlikely.
+ Result = B.CreateFMul(Result,
+ ConstantFP::get(OpTy, std::pow(2, -(int)SrcSema.getScale())));
+ if (OpTy != DstTy)
+ Result = B.CreateFPTrunc(Result, DstTy);
+ return Result;
+ }
+
+ Value *CreateFloatingToFixed(Value *Src, const FixedPointSemantics &DstSema) {
+ bool UseSigned = DstSema.isSigned() || DstSema.hasUnsignedPadding();
+ Value *Result = Src;
+ Type *OpTy = getAccommodatingFloatType(Src->getType(), DstSema);
+ if (OpTy != Src->getType())
+ Result = B.CreateFPExt(Result, OpTy);
+ // Rescale the floating point value so that its significant bits (for the
+ // purposes of the conversion) are in the integral range.
+ Result = B.CreateFMul(Result,
+ ConstantFP::get(OpTy, std::pow(2, DstSema.getScale())));
+
+ Type *ResultTy = B.getIntNTy(DstSema.getWidth());
+ if (DstSema.isSaturated()) {
+ Intrinsic::ID IID =
+ UseSigned ? Intrinsic::fptosi_sat : Intrinsic::fptoui_sat;
+ Result = B.CreateIntrinsic(IID, {ResultTy, OpTy}, {Result});
+ } else {
+ Result = UseSigned ? B.CreateFPToSI(Result, ResultTy)
+ : B.CreateFPToUI(Result, ResultTy);
+ }
+
+ // When saturating unsigned-with-padding using signed operations, we may
+ // get negative values. Emit an extra clamp to zero.
+ if (DstSema.isSaturated() && DstSema.hasUnsignedPadding()) {
+ Constant *Zero = Constant::getNullValue(Result->getType());
+ Result =
+ B.CreateSelect(B.CreateICmpSLT(Result, Zero), Zero, Result, "satmin");
+ }
+
+ return Result;
+ }
+
+ /// Add two fixed-point values and return the result in their common semantic.
+ /// \p LHS - The left hand side
+ /// \p LHSSema - The semantic of the left hand side
+ /// \p RHS - The right hand side
+ /// \p RHSSema - The semantic of the right hand side
+ Value *CreateAdd(Value *LHS, const FixedPointSemantics &LHSSema,
+ Value *RHS, const FixedPointSemantics &RHSSema) {
+ auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
+ bool UseSigned = CommonSema.isSigned() || CommonSema.hasUnsignedPadding();
+
+ Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
+ Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);
+
+ Value *Result;
+ if (CommonSema.isSaturated()) {
+ Intrinsic::ID IID = UseSigned ? Intrinsic::sadd_sat : Intrinsic::uadd_sat;
+ Result = B.CreateBinaryIntrinsic(IID, WideLHS, WideRHS);
+ } else {
+ Result = B.CreateAdd(WideLHS, WideRHS);
+ }
+
+ return CreateFixedToFixed(Result, CommonSema,
+ LHSSema.getCommonSemantics(RHSSema));
+ }
+
+ /// Subtract two fixed-point values and return the result in their common
+ /// semantic.
+ /// \p LHS - The left hand side
+ /// \p LHSSema - The semantic of the left hand side
+ /// \p RHS - The right hand side
+ /// \p RHSSema - The semantic of the right hand side
+ Value *CreateSub(Value *LHS, const FixedPointSemantics &LHSSema,
+ Value *RHS, const FixedPointSemantics &RHSSema) {
+ auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
+ bool UseSigned = CommonSema.isSigned() || CommonSema.hasUnsignedPadding();
+
+ Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
+ Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);
+
+ Value *Result;
+ if (CommonSema.isSaturated()) {
+ Intrinsic::ID IID = UseSigned ? Intrinsic::ssub_sat : Intrinsic::usub_sat;
+ Result = B.CreateBinaryIntrinsic(IID, WideLHS, WideRHS);
+ } else {
+ Result = B.CreateSub(WideLHS, WideRHS);
+ }
+
+ // Subtraction can end up below 0 for padded unsigned operations, so emit
+ // an extra clamp in that case.
+ if (CommonSema.isSaturated() && CommonSema.hasUnsignedPadding()) {
+ Constant *Zero = Constant::getNullValue(Result->getType());
+ Result =
+ B.CreateSelect(B.CreateICmpSLT(Result, Zero), Zero, Result, "satmin");
+ }
+
+ return CreateFixedToFixed(Result, CommonSema,
+ LHSSema.getCommonSemantics(RHSSema));
+ }
+
+ /// Multiply two fixed-point values and return the result in their common
+ /// semantic.
+ /// \p LHS - The left hand side
+ /// \p LHSSema - The semantic of the left hand side
+ /// \p RHS - The right hand side
+ /// \p RHSSema - The semantic of the right hand side
+ Value *CreateMul(Value *LHS, const FixedPointSemantics &LHSSema,
+ Value *RHS, const FixedPointSemantics &RHSSema) {
+ auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
+ bool UseSigned = CommonSema.isSigned() || CommonSema.hasUnsignedPadding();
+
+ Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
+ Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);
+
+ Intrinsic::ID IID;
+ if (CommonSema.isSaturated()) {
+ IID = UseSigned ? Intrinsic::smul_fix_sat : Intrinsic::umul_fix_sat;
+ } else {
+ IID = UseSigned ? Intrinsic::smul_fix : Intrinsic::umul_fix;
+ }
+ Value *Result = B.CreateIntrinsic(
+ IID, {WideLHS->getType()},
+ {WideLHS, WideRHS, B.getInt32(CommonSema.getScale())});
+
+ return CreateFixedToFixed(Result, CommonSema,
+ LHSSema.getCommonSemantics(RHSSema));
+ }
+
+ /// Divide two fixed-point values and return the result in their common
+ /// semantic.
+ /// \p LHS - The left hand side
+ /// \p LHSSema - The semantic of the left hand side
+ /// \p RHS - The right hand side
+ /// \p RHSSema - The semantic of the right hand side
+ Value *CreateDiv(Value *LHS, const FixedPointSemantics &LHSSema,
+ Value *RHS, const FixedPointSemantics &RHSSema) {
+ auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
+ bool UseSigned = CommonSema.isSigned() || CommonSema.hasUnsignedPadding();
+
+ Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
+ Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);
+
+ Intrinsic::ID IID;
+ if (CommonSema.isSaturated()) {
+ IID = UseSigned ? Intrinsic::sdiv_fix_sat : Intrinsic::udiv_fix_sat;
+ } else {
+ IID = UseSigned ? Intrinsic::sdiv_fix : Intrinsic::udiv_fix;
+ }
+ Value *Result = B.CreateIntrinsic(
+ IID, {WideLHS->getType()},
+ {WideLHS, WideRHS, B.getInt32(CommonSema.getScale())});
+
+ return CreateFixedToFixed(Result, CommonSema,
+ LHSSema.getCommonSemantics(RHSSema));
+ }
+
+ /// Left shift a fixed-point value by an unsigned integer value. The integer
+ /// value can be any bit width.
+ /// \p LHS - The left hand side
+ /// \p LHSSema - The semantic of the left hand side
+ /// \p RHS - The right hand side
+ Value *CreateShl(Value *LHS, const FixedPointSemantics &LHSSema, Value *RHS) {
+ bool UseSigned = LHSSema.isSigned() || LHSSema.hasUnsignedPadding();
+
+ RHS = B.CreateIntCast(RHS, LHS->getType(), /*IsSigned=*/false);
+
+ Value *Result;
+ if (LHSSema.isSaturated()) {
+ Intrinsic::ID IID = UseSigned ? Intrinsic::sshl_sat : Intrinsic::ushl_sat;
+ Result = B.CreateBinaryIntrinsic(IID, LHS, RHS);
+ } else {
+ Result = B.CreateShl(LHS, RHS);
+ }
+
+ return Result;
+ }
+
+ /// Right shift a fixed-point value by an unsigned integer value. The integer
+ /// value can be any bit width.
+ /// \p LHS - The left hand side
+ /// \p LHSSema - The semantic of the left hand side
+ /// \p RHS - The right hand side
+ Value *CreateShr(Value *LHS, const FixedPointSemantics &LHSSema, Value *RHS) {
+ RHS = B.CreateIntCast(RHS, LHS->getType(), false);
+
+ return LHSSema.isSigned() ? B.CreateAShr(LHS, RHS) : B.CreateLShr(LHS, RHS);
+ }
+
+ /// Compare two fixed-point values for equality.
+ /// \p LHS - The left hand side
+ /// \p LHSSema - The semantic of the left hand side
+ /// \p RHS - The right hand side
+ /// \p RHSSema - The semantic of the right hand side
+ Value *CreateEQ(Value *LHS, const FixedPointSemantics &LHSSema,
+ Value *RHS, const FixedPointSemantics &RHSSema) {
+ auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
+
+ Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
+ Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);
+
+ return B.CreateICmpEQ(WideLHS, WideRHS);
+ }
+
+ /// Compare two fixed-point values for inequality.
+ /// \p LHS - The left hand side
+ /// \p LHSSema - The semantic of the left hand side
+ /// \p RHS - The right hand side
+ /// \p RHSSema - The semantic of the right hand side
+ Value *CreateNE(Value *LHS, const FixedPointSemantics &LHSSema,
+ Value *RHS, const FixedPointSemantics &RHSSema) {
+ auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
+
+ Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
+ Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);
+
+ return B.CreateICmpNE(WideLHS, WideRHS);
+ }
+
+ /// Compare two fixed-point values as LHS < RHS.
+ /// \p LHS - The left hand side
+ /// \p LHSSema - The semantic of the left hand side
+ /// \p RHS - The right hand side
+ /// \p RHSSema - The semantic of the right hand side
+ Value *CreateLT(Value *LHS, const FixedPointSemantics &LHSSema,
+ Value *RHS, const FixedPointSemantics &RHSSema) {
+ auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
+
+ Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
+ Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);
+
+ return CommonSema.isSigned() ? B.CreateICmpSLT(WideLHS, WideRHS)
+ : B.CreateICmpULT(WideLHS, WideRHS);
+ }
+
+ /// Compare two fixed-point values as LHS <= RHS.
+ /// \p LHS - The left hand side
+ /// \p LHSSema - The semantic of the left hand side
+ /// \p RHS - The right hand side
+ /// \p RHSSema - The semantic of the right hand side
+ Value *CreateLE(Value *LHS, const FixedPointSemantics &LHSSema,
+ Value *RHS, const FixedPointSemantics &RHSSema) {
+ auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
+
+ Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
+ Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);
+
+ return CommonSema.isSigned() ? B.CreateICmpSLE(WideLHS, WideRHS)
+ : B.CreateICmpULE(WideLHS, WideRHS);
+ }
+
+ /// Compare two fixed-point values as LHS > RHS.
+ /// \p LHS - The left hand side
+ /// \p LHSSema - The semantic of the left hand side
+ /// \p RHS - The right hand side
+ /// \p RHSSema - The semantic of the right hand side
+ Value *CreateGT(Value *LHS, const FixedPointSemantics &LHSSema,
+ Value *RHS, const FixedPointSemantics &RHSSema) {
+ auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
+
+ Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
+ Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);
+
+ return CommonSema.isSigned() ? B.CreateICmpSGT(WideLHS, WideRHS)
+ : B.CreateICmpUGT(WideLHS, WideRHS);
+ }
+
+ /// Compare two fixed-point values as LHS >= RHS.
+ /// \p LHS - The left hand side
+ /// \p LHSSema - The semantic of the left hand side
+ /// \p RHS - The right hand side
+ /// \p RHSSema - The semantic of the right hand side
+ Value *CreateGE(Value *LHS, const FixedPointSemantics &LHSSema,
+ Value *RHS, const FixedPointSemantics &RHSSema) {
+ auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
+
+ Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
+ Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);
+
+ return CommonSema.isSigned() ? B.CreateICmpSGE(WideLHS, WideRHS)
+ : B.CreateICmpUGE(WideLHS, WideRHS);
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_IR_FIXEDPOINTBUILDER_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm16/include/llvm/Support/LoongArchTargetParser.h b/contrib/libs/llvm16/include/llvm/Support/LoongArchTargetParser.h
new file mode 100644
index 0000000000..3e3cd8c18f
--- /dev/null
+++ b/contrib/libs/llvm16/include/llvm/Support/LoongArchTargetParser.h
@@ -0,0 +1,26 @@
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- llvm/Support/LoongArchTargetParser.h --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This header is deprecated in favour of
+/// `llvm/TargetParser/LoongArchTargetParser.h`.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/TargetParser/LoongArchTargetParser.h"
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif